2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static akka.actor.ActorRef.noSender;
11 import static com.google.common.base.Preconditions.checkState;
12 import static com.google.common.base.Verify.verify;
13 import static com.google.common.base.Verify.verifyNotNull;
14 import static java.util.Objects.requireNonNull;
15 import static java.util.Objects.requireNonNullElse;
17 import akka.actor.ActorRef;
18 import akka.util.Timeout;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Stopwatch;
21 import com.google.common.collect.ImmutableList;
22 import com.google.common.collect.ImmutableMap;
23 import com.google.common.collect.ImmutableMap.Builder;
24 import com.google.common.collect.Iterables;
25 import com.google.common.primitives.UnsignedLong;
26 import com.google.common.util.concurrent.FutureCallback;
27 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
29 import java.io.IOException;
30 import java.util.ArrayDeque;
31 import java.util.ArrayList;
32 import java.util.Collection;
33 import java.util.Collections;
34 import java.util.Deque;
35 import java.util.HashMap;
36 import java.util.Iterator;
38 import java.util.Optional;
39 import java.util.OptionalLong;
40 import java.util.Queue;
41 import java.util.SortedSet;
42 import java.util.concurrent.TimeUnit;
43 import java.util.concurrent.TimeoutException;
44 import java.util.function.Consumer;
45 import java.util.function.Function;
46 import java.util.function.UnaryOperator;
47 import org.eclipse.jdt.annotation.NonNull;
48 import org.eclipse.jdt.annotation.Nullable;
49 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
50 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
51 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
52 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
53 import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
54 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
55 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
56 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
57 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
58 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
59 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
60 import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
61 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
62 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
63 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
64 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
65 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
66 import org.opendaylight.controller.cluster.datastore.persisted.SkipTransactionsPayload;
67 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
68 import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
69 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
70 import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
71 import org.opendaylight.controller.cluster.raft.messages.Payload;
72 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
73 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
74 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
75 import org.opendaylight.yangtools.concepts.Identifier;
76 import org.opendaylight.yangtools.concepts.ListenerRegistration;
77 import org.opendaylight.yangtools.yang.common.Empty;
78 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
79 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
80 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
81 import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
82 import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
83 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
84 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
85 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
86 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
87 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
88 import org.opendaylight.yangtools.yang.data.tree.api.DataTreeTip;
89 import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
90 import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
91 import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
92 import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
93 import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
94 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
95 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
96 import org.slf4j.Logger;
97 import org.slf4j.LoggerFactory;
98 import scala.concurrent.duration.FiniteDuration;
101 * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
102 * public interfaces and assumes it is only ever called from a single thread.
105 * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
108 // non-final for mocking
109 public class ShardDataTree extends ShardDataTreeTransactionParent {
110 private static final class CommitEntry {
111 final SimpleShardDataTreeCohort cohort;
114 CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
115 this.cohort = requireNonNull(cohort);
120 public String toString() {
121 return "CommitEntry [tx=" + cohort.transactionId() + ", state=" + cohort.getState() + "]";
125 private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(FiniteDuration.create(5, TimeUnit.SECONDS));
126 private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
129 * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
130 * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
131 * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
132 * result in StackOverflowError.
134 private static final int MAX_TRANSACTION_BATCH = 100;
136 private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
137 private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
138 private final Deque<CommitEntry> pendingTransactions = new ArrayDeque<>();
139 private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
140 private final Queue<CommitEntry> pendingFinishCommits = new ArrayDeque<>();
143 * Callbacks that need to be invoked once a payload is replicated.
145 private final Map<Payload, Runnable> replicationCallbacks = new HashMap<>();
147 private final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher;
148 private final Collection<ShardDataTreeMetadata<?>> metadata;
149 private final DataTree dataTree;
150 private final String logContext;
151 private final Shard shard;
152 private Runnable runOnPendingTransactionsComplete;
155 * Optimistic {@link DataTreeCandidate} preparation. Since our DataTree implementation is a
156 * {@link DataTree}, each {@link DataTreeCandidate} is also a {@link DataTreeTip}, e.g. another
157 * candidate can be prepared on top of it. They still need to be committed in sequence. Here we track the current
158 * tip of the data tree, which is the last DataTreeCandidate we have in flight, or the DataTree itself.
160 private DataTreeTip tip;
162 private EffectiveModelContext schemaContext;
163 private DataSchemaContextTree dataSchemaContext;
165 private int currentTransactionBatch;
167 ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final DataTree dataTree,
168 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
169 final String logContext,
170 final ShardDataTreeMetadata<?>... metadata) {
171 this.dataTree = requireNonNull(dataTree);
172 updateSchemaContext(schemaContext);
174 this.shard = requireNonNull(shard);
175 this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
176 this.logContext = requireNonNull(logContext);
177 this.metadata = ImmutableList.copyOf(metadata);
181 ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType,
182 final YangInstanceIdentifier root,
183 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
184 final String logContext,
185 final ShardDataTreeMetadata<?>... metadata) {
186 this(shard, schemaContext, createDataTree(treeType, root), treeChangeListenerPublisher, logContext, metadata);
189 private static DataTree createDataTree(final TreeType treeType, final YangInstanceIdentifier root) {
190 final DataTreeConfiguration baseConfig = DataTreeConfiguration.getDefault(treeType);
191 return new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
192 .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
193 .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
199 public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) {
200 this(shard, schemaContext, treeType, YangInstanceIdentifier.of(),
201 new DefaultShardDataTreeChangeListenerPublisher(""), "");
204 final String logContext() {
208 final long readTime() {
209 return shard.ticker().read();
212 final DataTree getDataTree() {
217 final EffectiveModelContext getSchemaContext() {
218 return schemaContext;
221 final void updateSchemaContext(final @NonNull EffectiveModelContext newSchemaContext) {
222 dataTree.setEffectiveModelContext(newSchemaContext);
223 schemaContext = newSchemaContext;
224 dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
227 final void resetTransactionBatch() {
228 currentTransactionBatch = 0;
232 * Take a snapshot of current state for later recovery.
234 * @return A state snapshot
236 @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
237 final NormalizedNode rootNode = takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
238 final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
239 ImmutableMap.builder();
241 for (ShardDataTreeMetadata<?> m : metadata) {
242 final ShardDataTreeSnapshotMetadata<?> meta = m.toSnapshot();
244 metaBuilder.put(meta.getType(), meta);
248 return new MetadataShardDataTreeSnapshot(rootNode, metaBuilder.build());
251 private boolean anyPendingTransactions() {
252 return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty();
255 private void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot,
256 final UnaryOperator<DataTreeModification> wrapper) throws DataValidationFailedException {
257 final Stopwatch elapsed = Stopwatch.createStarted();
259 if (anyPendingTransactions()) {
260 LOG.warn("{}: applying state snapshot with pending transactions", logContext);
263 final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
264 if (snapshot instanceof MetadataShardDataTreeSnapshot metaSnapshot) {
265 snapshotMeta = metaSnapshot.getMetadata();
267 snapshotMeta = ImmutableMap.of();
270 for (var m : metadata) {
271 final var s = snapshotMeta.get(m.getSupportedType());
279 final DataTreeModification unwrapped = newModification();
280 final DataTreeModification mod = wrapper.apply(unwrapped);
281 // delete everything first
282 mod.delete(YangInstanceIdentifier.of());
284 snapshot.getRootNode().ifPresent(rootNode -> {
285 // Add everything from the remote node back
286 mod.write(YangInstanceIdentifier.of(), rootNode);
291 dataTree.validate(unwrapped);
292 DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
293 dataTree.commit(candidate);
294 notifyListeners(candidate);
296 LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
300 * Apply a snapshot coming from the leader. This method assumes the leader and follower SchemaContexts match and
301 * does not perform any pruning.
303 * @param snapshot Snapshot that needs to be applied
304 * @throws DataValidationFailedException when the snapshot fails to apply
306 final void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
307 // TODO: we should be taking ShardSnapshotState here and performing forward-compatibility translation
308 applySnapshot(snapshot, UnaryOperator.identity());
312 * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
313 * pruning in an attempt to adjust the state to our current SchemaContext.
315 * @param snapshot Snapshot that needs to be applied
316 * @throws DataValidationFailedException when the snapshot fails to apply
318 final void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
319 // TODO: we should be able to reuse the pruner, provided we are not reentrant
320 final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
322 if (snapshot.needsMigration()) {
323 final ReusableNormalizedNodePruner uintPruner = pruner.withUintAdaption();
324 applySnapshot(snapshot.getSnapshot(),
325 delegate -> new PruningDataTreeModification.Proactive(delegate, dataTree, uintPruner));
327 applySnapshot(snapshot.getSnapshot(),
328 delegate -> new PruningDataTreeModification.Reactive(delegate, dataTree, pruner));
332 @SuppressWarnings("checkstyle:IllegalCatch")
333 private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
334 final var entry = payload.acquireCandidate();
335 final var unwrapped = newModification();
336 final var pruningMod = createPruningModification(unwrapped,
337 NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.streamVersion()) > 0);
339 DataTreeCandidates.applyToModification(pruningMod, entry.candidate());
341 LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
344 dataTree.validate(unwrapped);
345 dataTree.commit(dataTree.prepare(unwrapped));
346 } catch (Exception e) {
347 final var file = new File(System.getProperty("karaf.data", "."),
348 "failed-recovery-payload-" + logContext + ".out");
349 DataTreeModificationOutput.toFile(file, unwrapped);
350 throw new IllegalStateException(
351 "%s: Failed to apply recovery payload. Modification data was written to file %s".formatted(
356 allMetadataCommittedTransaction(entry.transactionId());
359 private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
360 final boolean uintAdapting) {
361 // TODO: we should be able to reuse the pruner, provided we are not reentrant
362 final var pruner = ReusableNormalizedNodePruner.forDataSchemaContext(dataSchemaContext);
363 return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
364 : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
368 * Apply a payload coming from recovery. This method does not assume the SchemaContexts match and performs data
369 * pruning in an attempt to adjust the state to our current SchemaContext.
371 * @param payload Payload
372 * @throws IOException when the snapshot fails to deserialize
373 * @throws DataValidationFailedException when the snapshot fails to apply
375 final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
376 if (payload instanceof CommitTransactionPayload commit) {
377 applyRecoveryCandidate(commit);
378 } else if (payload instanceof AbortTransactionPayload abort) {
379 allMetadataAbortedTransaction(abort.getIdentifier());
380 } else if (payload instanceof PurgeTransactionPayload purge) {
381 allMetadataPurgedTransaction(purge.getIdentifier());
382 } else if (payload instanceof CreateLocalHistoryPayload create) {
383 allMetadataCreatedLocalHistory(create.getIdentifier());
384 } else if (payload instanceof CloseLocalHistoryPayload close) {
385 allMetadataClosedLocalHistory(close.getIdentifier());
386 } else if (payload instanceof PurgeLocalHistoryPayload purge) {
387 allMetadataPurgedLocalHistory(purge.getIdentifier());
388 } else if (payload instanceof SkipTransactionsPayload skip) {
389 allMetadataSkipTransactions(skip);
391 LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
395 private void applyReplicatedCandidate(final CommitTransactionPayload payload)
396 throws DataValidationFailedException, IOException {
397 final var payloadCandidate = payload.acquireCandidate();
398 final var transactionId = payloadCandidate.transactionId();
399 LOG.debug("{}: Applying foreign transaction {}", logContext, transactionId);
401 final var mod = newModification();
402 // TODO: check version here, which will enable us to perform forward-compatibility transformations
403 DataTreeCandidates.applyToModification(mod, payloadCandidate.candidate());
406 LOG.trace("{}: Applying foreign modification {}", logContext, mod);
407 dataTree.validate(mod);
408 final var candidate = dataTree.prepare(mod);
409 dataTree.commit(candidate);
411 allMetadataCommittedTransaction(transactionId);
412 notifyListeners(candidate);
416 * Apply a payload coming from the leader, which could actually be us. This method assumes the leader and follower
417 * SchemaContexts match and does not perform any pruning.
419 * @param identifier Payload identifier as returned from RaftActor
420 * @param payload Payload
421 * @throws IOException when the snapshot fails to deserialize
422 * @throws DataValidationFailedException when the snapshot fails to apply
424 final void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
425 DataValidationFailedException {
427 * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
428 * if we are the leader and it has originated with us.
430 * The identifier will only ever be non-null when we were the leader which achieved consensus. Unfortunately,
431 * though, this may not be the case anymore, as we are being called some time afterwards and we may not be
432 * acting in that capacity anymore.
434 * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
435 * pre-Boron state -- which limits the number of options here.
437 if (payload instanceof CommitTransactionPayload commit) {
438 if (identifier == null) {
439 applyReplicatedCandidate(commit);
441 verify(identifier instanceof TransactionIdentifier);
442 // if we did not track this transaction before, it means that it came from another leader and we are in
443 // the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to
444 // the local DataTree and would be lost if it was only applied via payloadReplicationComplete().
445 if (!payloadReplicationComplete((TransactionIdentifier) identifier)) {
446 applyReplicatedCandidate(commit);
450 // make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed.
451 checkRootOverwrite(commit.acquireCandidate().candidate());
452 } else if (payload instanceof AbortTransactionPayload abort) {
453 if (identifier != null) {
454 payloadReplicationComplete(abort);
456 allMetadataAbortedTransaction(abort.getIdentifier());
457 } else if (payload instanceof PurgeTransactionPayload purge) {
458 if (identifier != null) {
459 payloadReplicationComplete(purge);
461 allMetadataPurgedTransaction(purge.getIdentifier());
462 } else if (payload instanceof CloseLocalHistoryPayload close) {
463 if (identifier != null) {
464 payloadReplicationComplete(close);
466 allMetadataClosedLocalHistory(close.getIdentifier());
467 } else if (payload instanceof CreateLocalHistoryPayload create) {
468 if (identifier != null) {
469 payloadReplicationComplete(create);
471 allMetadataCreatedLocalHistory(create.getIdentifier());
472 } else if (payload instanceof PurgeLocalHistoryPayload purge) {
473 if (identifier != null) {
474 payloadReplicationComplete(purge);
476 allMetadataPurgedLocalHistory(purge.getIdentifier());
477 } else if (payload instanceof SkipTransactionsPayload skip) {
478 if (identifier != null) {
479 payloadReplicationComplete(skip);
481 allMetadataSkipTransactions(skip);
483 LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
487 private void checkRootOverwrite(final DataTreeCandidate candidate) {
488 final DatastoreContext datastoreContext = shard.getDatastoreContext();
489 if (!datastoreContext.isSnapshotOnRootOverwrite()) {
493 if (!datastoreContext.isPersistent()) {
494 // FIXME: why don't we want a snapshot in non-persistent state?
498 // top level container ie "/"
499 if (candidate.getRootPath().isEmpty() && candidate.getRootNode().modificationType() == ModificationType.WRITE) {
500 LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext);
501 shard.self().tell(new InitiateCaptureSnapshot(), noSender());
505 private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
506 if (callback != null) {
507 replicationCallbacks.put(payload, callback);
509 shard.persistPayload(id, payload, true);
512 private void payloadReplicationComplete(final AbstractIdentifiablePayload<?> payload) {
513 final Runnable callback = replicationCallbacks.remove(payload);
514 if (callback != null) {
515 LOG.debug("{}: replication of {} completed, invoking {}", logContext, payload.getIdentifier(), callback);
518 LOG.debug("{}: replication of {} has no callback", logContext, payload.getIdentifier());
522 private boolean payloadReplicationComplete(final TransactionIdentifier txId) {
523 final var current = pendingFinishCommits.peek();
524 if (current == null) {
525 LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
526 allMetadataCommittedTransaction(txId);
530 final var cohortTxId = current.cohort.transactionId();
531 if (!cohortTxId.equals(txId)) {
532 LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
534 allMetadataCommittedTransaction(txId);
538 finishCommit(current.cohort);
542 private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
543 for (ShardDataTreeMetadata<?> m : metadata) {
544 m.onTransactionAborted(txId);
548 private void allMetadataCommittedTransaction(final TransactionIdentifier txId) {
549 for (ShardDataTreeMetadata<?> m : metadata) {
550 m.onTransactionCommitted(txId);
554 private void allMetadataPurgedTransaction(final TransactionIdentifier txId) {
555 for (ShardDataTreeMetadata<?> m : metadata) {
556 m.onTransactionPurged(txId);
560 private void allMetadataCreatedLocalHistory(final LocalHistoryIdentifier historyId) {
561 for (ShardDataTreeMetadata<?> m : metadata) {
562 m.onHistoryCreated(historyId);
566 private void allMetadataClosedLocalHistory(final LocalHistoryIdentifier historyId) {
567 for (ShardDataTreeMetadata<?> m : metadata) {
568 m.onHistoryClosed(historyId);
572 private void allMetadataPurgedLocalHistory(final LocalHistoryIdentifier historyId) {
573 for (ShardDataTreeMetadata<?> m : metadata) {
574 m.onHistoryPurged(historyId);
578 private void allMetadataSkipTransactions(final SkipTransactionsPayload payload) {
579 final var historyId = payload.getIdentifier();
580 final var txIds = payload.getTransactionIds();
581 for (ShardDataTreeMetadata<?> m : metadata) {
582 m.onTransactionsSkipped(historyId, txIds);
587 * Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
588 * this method is used for re-establishing state when we are taking over
590 * @param historyId Local history identifier
591 * @param closed True if the chain should be created in closed state (i.e. pending purge)
592 * @return Transaction chain handle
594 final ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
595 final boolean closed) {
596 final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
597 final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
598 checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
602 final ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
603 final @Nullable Runnable callback) {
604 ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
606 chain = new ShardDataTreeTransactionChain(historyId, this);
607 transactionChains.put(historyId, chain);
608 replicatePayload(historyId, CreateLocalHistoryPayload.create(
609 historyId, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
610 } else if (callback != null) {
617 final @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
618 shard.getShardMBean().incrementReadOnlyTransactionCount();
620 final var historyId = txId.getHistoryId();
621 return historyId.getHistoryId() == 0 ? newStandaloneReadOnlyTransaction(txId)
622 : ensureTransactionChain(historyId, null).newReadOnlyTransaction(txId);
625 final @NonNull ReadOnlyShardDataTreeTransaction newStandaloneReadOnlyTransaction(final TransactionIdentifier txId) {
626 return new ReadOnlyShardDataTreeTransaction(this, txId, takeSnapshot());
629 final @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
630 shard.getShardMBean().incrementReadWriteTransactionCount();
632 final var historyId = txId.getHistoryId();
633 return historyId.getHistoryId() == 0 ? newStandaloneReadWriteTransaction(txId)
634 : ensureTransactionChain(historyId, null).newReadWriteTransaction(txId);
637 final @NonNull ReadWriteShardDataTreeTransaction newStandaloneReadWriteTransaction(
638 final TransactionIdentifier txId) {
639 return new ReadWriteShardDataTreeTransaction(this, txId, newModification());
643 final void notifyListeners(final DataTreeCandidate candidate) {
644 treeChangeListenerPublisher.publishChanges(candidate);
648 * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
649 * replication callbacks.
651 final void purgeLeaderState() {
652 for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
656 transactionChains.clear();
657 replicationCallbacks.clear();
661 * Close a single transaction chain.
663 * @param id History identifier
664 * @param callback Callback to invoke upon completion, may be null
666 final void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
667 if (commonCloseTransactionChain(id, callback)) {
668 replicatePayload(id, CloseLocalHistoryPayload.create(id,
669 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
674 * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
676 * @param id History identifier
678 final void closeTransactionChain(final LocalHistoryIdentifier id) {
679 commonCloseTransactionChain(id, null);
682 private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
683 final ShardDataTreeTransactionChain chain = transactionChains.get(id);
685 LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
686 if (callback != null) {
697 * Purge a single transaction chain.
699 * @param id History identifier
700 * @param callback Callback to invoke upon completion, may be null
702 final void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
703 final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
705 LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
706 if (callback != null) {
712 replicatePayload(id, PurgeLocalHistoryPayload.create(
713 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
716 final void skipTransactions(final LocalHistoryIdentifier id, final ImmutableUnsignedLongSet transactionIds,
717 final Runnable callback) {
718 final ShardDataTreeTransactionChain chain = transactionChains.get(id);
720 LOG.debug("{}: Skipping on non-existent transaction chain {}", logContext, id);
721 if (callback != null) {
727 replicatePayload(id, SkipTransactionsPayload.create(id, transactionIds,
728 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
731 final Optional<DataTreeCandidate> readCurrentData() {
732 return readNode(YangInstanceIdentifier.of())
733 .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), state));
736 final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
737 final Optional<DataTreeCandidate> initialState,
738 final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
739 treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
742 final int getQueueSize() {
743 return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
747 final void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
748 final TransactionIdentifier id = transaction.getIdentifier();
749 LOG.debug("{}: aborting transaction {}", logContext, id);
750 replicatePayload(id, AbortTransactionPayload.create(
751 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
755 final void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
756 // No-op for free-standing transactions
760 final ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
761 final Optional<SortedSet<String>> participatingShardNames) {
762 final DataTreeModification snapshot = transaction.getSnapshot();
763 final TransactionIdentifier id = transaction.getIdentifier();
764 LOG.debug("{}: readying transaction {}", logContext, id);
766 LOG.debug("{}: transaction {} ready", logContext, id);
768 return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
771 final void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
772 LOG.debug("{}: purging transaction {}", logContext, id);
773 replicatePayload(id, PurgeTransactionPayload.create(
774 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
778 public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
779 return takeSnapshot().readNode(path);
782 final DataTreeSnapshot takeSnapshot() {
783 return dataTree.takeSnapshot();
787 final DataTreeModification newModification() {
788 return takeSnapshot().newModification();
791 final Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
792 Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
794 for (CommitEntry entry: pendingFinishCommits) {
795 ret.add(entry.cohort);
798 for (CommitEntry entry: pendingCommits) {
799 ret.add(entry.cohort);
802 for (CommitEntry entry: pendingTransactions) {
803 ret.add(entry.cohort);
806 pendingFinishCommits.clear();
807 pendingCommits.clear();
808 pendingTransactions.clear();
814 * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
816 final void resumeNextPendingTransaction() {
817 LOG.debug("{}: attempting to resume transaction processing", logContext);
818 processNextPending();
821 @SuppressWarnings("checkstyle:IllegalCatch")
822 private void processNextPendingTransaction() {
823 ++currentTransactionBatch;
824 if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
825 LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
826 shard.scheduleNextPendingTransaction();
830 processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
831 final SimpleShardDataTreeCohort cohort = entry.cohort;
832 final DataTreeModification modification = cohort.getDataTreeModification();
834 LOG.debug("{}: Validating transaction {}", logContext, cohort.transactionId());
837 tip.validate(modification);
838 LOG.debug("{}: Transaction {} validated", logContext, cohort.transactionId());
839 cohort.successfulCanCommit();
840 entry.lastAccess = readTime();
842 } catch (ConflictingModificationAppliedException e) {
843 LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.transactionId(),
845 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
846 } catch (DataValidationFailedException e) {
847 LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.transactionId(),
850 // For debugging purposes, allow dumping of the modification. Coupled with the above
851 // precondition log, it should allow us to understand what went on.
852 LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.transactionId(), modification);
853 LOG.trace("{}: Current tree: {}", logContext, dataTree);
854 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
855 } catch (Exception e) {
856 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
860 // Failure path: propagate the failure, remove the transaction from the queue and loop to the next one
861 pendingTransactions.poll().cohort.failedCanCommit(cause);
865 private void processNextPending() {
866 processNextPendingCommit();
867 processNextPendingTransaction();
870 private void processNextPending(final Queue<CommitEntry> queue, final State allowedState,
871 final Consumer<CommitEntry> processor) {
872 while (!queue.isEmpty()) {
873 final CommitEntry entry = queue.peek();
874 final SimpleShardDataTreeCohort cohort = entry.cohort;
876 if (cohort.isFailed()) {
877 LOG.debug("{}: Removing failed transaction {}", logContext, cohort.transactionId());
882 if (cohort.getState() == allowedState) {
883 processor.accept(entry);
889 maybeRunOperationOnPendingTransactionsComplete();
892 private void processNextPendingCommit() {
893 processNextPending(pendingCommits, State.COMMIT_PENDING,
894 entry -> startCommit(entry.cohort, entry.cohort.getCandidate()));
897 private boolean peekNextPendingCommit() {
898 final CommitEntry first = pendingCommits.peek();
899 return first != null && first.cohort.getState() == State.COMMIT_PENDING;
902 // non-final for mocking
903 void startCanCommit(final SimpleShardDataTreeCohort cohort) {
904 final CommitEntry head = pendingTransactions.peek();
906 LOG.warn("{}: No transactions enqueued while attempting to start canCommit on {}", logContext, cohort);
909 if (!cohort.equals(head.cohort)) {
910 // The tx isn't at the head of the queue so we can't start canCommit at this point. Here we check if this
911 // tx should be moved ahead of other tx's in the READY state in the pendingTransactions queue. If this tx
912 // has other participating shards, it could deadlock with other tx's accessing the same shards
913 // depending on the order the tx's are readied on each shard
914 // (see https://jira.opendaylight.org/browse/CONTROLLER-1836). Therefore, if the preceding participating
915 // shard names for a preceding pending tx, call it A, in the queue matches that of this tx, then this tx
916 // is allowed to be moved ahead of tx A in the queue so it is processed first to avoid potential deadlock
917 // if tx A is behind this tx in the pendingTransactions queue for a preceding shard. In other words, since
918 // canCommmit for this tx was requested before tx A, honor that request. If this tx is moved to the head of
919 // the queue as a result, then proceed with canCommit.
921 Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
922 if (precedingShardNames.isEmpty()) {
923 LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.transactionId());
927 LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}",
928 logContext, cohort.transactionId(), precedingShardNames);
929 final Iterator<CommitEntry> iter = pendingTransactions.iterator();
931 int moveToIndex = -1;
932 while (iter.hasNext()) {
933 final CommitEntry entry = iter.next();
936 if (cohort.equals(entry.cohort)) {
937 if (moveToIndex < 0) {
938 LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
939 logContext, cohort.transactionId());
943 LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
944 logContext, cohort.transactionId(), moveToIndex);
946 insertEntry(pendingTransactions, entry, moveToIndex);
948 if (!cohort.equals(pendingTransactions.peek().cohort)) {
949 LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
950 logContext, cohort.transactionId());
954 LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
955 logContext, cohort.transactionId());
959 if (entry.cohort.getState() != State.READY) {
960 LOG.debug("{}: Skipping pending transaction {} in state {}",
961 logContext, entry.cohort.transactionId(), entry.cohort.getState());
965 final Collection<String> pendingPrecedingShardNames = extractPrecedingShardNames(
966 entry.cohort.getParticipatingShardNames());
968 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
969 if (moveToIndex < 0) {
970 LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
971 logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), index);
975 "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
976 logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), moveToIndex);
979 LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
980 logContext, pendingPrecedingShardNames, entry.cohort.transactionId());
985 processNextPendingTransaction();
988 private static void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
990 queue.addFirst(entry);
994 LOG.trace("Inserting into Deque at index {}", atIndex);
996 Deque<CommitEntry> tempStack = new ArrayDeque<>(atIndex);
997 for (int i = 0; i < atIndex; i++) {
998 tempStack.push(queue.poll());
1001 queue.addFirst(entry);
1003 tempStack.forEach(queue::addFirst);
1006 private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
1007 return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
1008 set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
1011 private void failPreCommit(final Throwable cause) {
1012 shard.getShardMBean().incrementFailedTransactionsCount();
1013 pendingTransactions.poll().cohort.failedPreCommit(cause);
1014 processNextPendingTransaction();
1017 // non-final for mocking
1018 @SuppressWarnings("checkstyle:IllegalCatch")
1019 void startPreCommit(final SimpleShardDataTreeCohort cohort) {
1020 final CommitEntry entry = pendingTransactions.peek();
1021 checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
1023 final SimpleShardDataTreeCohort current = entry.cohort;
1024 verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
1026 final TransactionIdentifier currentId = current.transactionId();
1027 LOG.debug("{}: Preparing transaction {}", logContext, currentId);
1029 final DataTreeCandidateTip candidate;
1031 candidate = tip.prepare(cohort.getDataTreeModification());
1032 LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
1033 } catch (DataValidationFailedException | RuntimeException e) {
1038 cohort.userPreCommit(candidate, new FutureCallback<>() {
1040 public void onSuccess(final Empty result) {
1041 // Set the tip of the data tree.
1042 tip = verifyNotNull(candidate);
1044 entry.lastAccess = readTime();
1046 pendingTransactions.remove();
1047 pendingCommits.add(entry);
1049 LOG.debug("{}: Transaction {} prepared", logContext, currentId);
1051 cohort.successfulPreCommit(candidate);
1053 processNextPendingTransaction();
1057 public void onFailure(final Throwable failure) {
1058 failPreCommit(failure);
1063 private void failCommit(final Exception cause) {
1064 shard.getShardMBean().incrementFailedTransactionsCount();
1065 pendingFinishCommits.poll().cohort.failedCommit(cause);
1066 processNextPending();
1069 @SuppressWarnings("checkstyle:IllegalCatch")
1070 private void finishCommit(final SimpleShardDataTreeCohort cohort) {
1071 final TransactionIdentifier txId = cohort.transactionId();
1072 final DataTreeCandidate candidate = cohort.getCandidate();
1074 LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
1076 if (tip == candidate) {
1077 // All pending candidates have been committed, reset the tip to the data tree.
1082 dataTree.commit(candidate);
1083 } catch (Exception e) {
1084 LOG.error("{}: Failed to commit transaction {}", logContext, txId, e);
1089 allMetadataCommittedTransaction(txId);
1090 shard.getShardMBean().incrementCommittedTransactionCount();
1091 shard.getShardMBean().setLastCommittedTransactionTime(System.currentTimeMillis());
1093 // FIXME: propagate journal index
1094 pendingFinishCommits.poll().cohort.successfulCommit(UnsignedLong.ZERO, () -> {
1095 LOG.trace("{}: Transaction {} committed, proceeding to notify", logContext, txId);
1096 notifyListeners(candidate);
1098 processNextPending();
1102 // non-final for mocking
1103 void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
1104 final CommitEntry entry = pendingCommits.peek();
1105 checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
1107 final SimpleShardDataTreeCohort current = entry.cohort;
1108 if (!cohort.equals(current)) {
1109 LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.transactionId());
1113 LOG.debug("{}: Starting commit for transaction {}", logContext, current.transactionId());
1115 final TransactionIdentifier txId = cohort.transactionId();
1116 final Payload payload;
1118 payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
1119 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
1120 } catch (IOException e) {
1121 LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
1122 pendingCommits.poll().cohort.failedCommit(e);
1123 processNextPending();
1127 // We process next transactions pending canCommit before we call persistPayload to possibly progress subsequent
1128 // transactions to the COMMIT_PENDING state so the payloads can be batched for replication. This is done for
1129 // single-shard transactions that immediately transition from canCommit to preCommit to commit. Note that
1130 // if the next pending transaction is progressed to COMMIT_PENDING and this method (startCommit) is called,
1131 // the next transaction will not attempt to replicate b/c the current transaction is still at the head of the
1132 // pendingCommits queue.
1133 processNextPendingTransaction();
1135 // After processing next pending transactions, we can now remove the current transaction from pendingCommits.
1136 // Note this must be done before the call to peekNextPendingCommit below so we check the next transaction
1137 // in order to properly determine the batchHint flag for the call to persistPayload.
1138 pendingCommits.remove();
1139 pendingFinishCommits.add(entry);
1141 // See if the next transaction is pending commit (ie in the COMMIT_PENDING state) so it can be batched with
1142 // this transaction for replication.
1143 boolean replicationBatchHint = peekNextPendingCommit();
1145 // Once completed, we will continue via payloadReplicationComplete
1146 shard.persistPayload(txId, payload, replicationBatchHint);
1148 entry.lastAccess = shard.ticker().read();
1150 LOG.debug("{}: Transaction {} submitted to persistence", logContext, txId);
1152 // Process the next transaction pending commit, if any. If there is one it will be batched with this
1153 // transaction for replication.
1154 processNextPendingCommit();
1157 final Collection<ActorRef> getCohortActors() {
1158 return cohortRegistry.getCohortActors();
1161 final void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
1162 cohortRegistry.process(sender, message);
1166 final ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1167 final Exception failure) {
1168 final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
1169 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1174 final ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1175 final Optional<SortedSet<String>> participatingShardNames) {
1176 SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
1177 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
1178 COMMIT_STEP_TIMEOUT), participatingShardNames);
1179 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1183 // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
1184 // the newReadWriteTransaction()
1185 final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1186 final Optional<SortedSet<String>> participatingShardNames) {
1187 final var historyId = txId.getHistoryId();
1188 if (historyId.getHistoryId() == 0) {
1189 return createReadyCohort(txId, mod, participatingShardNames);
1191 return ensureTransactionChain(historyId, null).createReadyCohort(txId, mod, participatingShardNames);
1194 @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
1195 final void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
1196 final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
1197 final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
1198 final long now = readTime();
1200 final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
1201 !pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
1202 final CommitEntry currentTx = currentQueue.peek();
1203 if (currentTx == null) {
1204 // Empty queue, no-op
1208 long delta = now - currentTx.lastAccess;
1209 if (delta < timeout) {
1210 // Not expired yet, bail
1214 final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
1215 if (updateOpt.isPresent()) {
1216 final long newAccess = updateOpt.orElseThrow();
1217 final long newDelta = now - newAccess;
1218 if (newDelta < delta) {
1219 LOG.debug("{}: Updated current transaction {} access time", logContext,
1220 currentTx.cohort.transactionId());
1221 currentTx.lastAccess = newAccess;
1225 if (delta < timeout) {
1226 // Not expired yet, bail
1231 final long deltaMillis = TimeUnit.NANOSECONDS.toMillis(delta);
1232 final State state = currentTx.cohort.getState();
1234 LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
1235 currentTx.cohort.transactionId(), deltaMillis, state);
1236 boolean processNext = true;
1237 final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
1238 + deltaMillis + "ms");
1241 case CAN_COMMIT_PENDING:
1242 currentQueue.remove().cohort.failedCanCommit(cohortFailure);
1244 case CAN_COMMIT_COMPLETE:
1245 // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
1246 // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
1247 // in PRE_COMMIT_COMPLETE is changed.
1248 currentQueue.remove().cohort.reportFailure(cohortFailure);
1250 case PRE_COMMIT_PENDING:
1251 currentQueue.remove().cohort.failedPreCommit(cohortFailure);
1253 case PRE_COMMIT_COMPLETE:
1254 // FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we
1255 // are ready we should commit the transaction, not abort it. Our current software stack does
1256 // not allow us to do that consistently, because we persist at the time of commit, hence
1257 // we can end up in a state where we have pre-committed a transaction, then a leader failover
1258 // occurred ... the new leader does not see the pre-committed transaction and does not have
1259 // a running timer. To fix this we really need two persistence events.
1261 // The first one, done at pre-commit time will hold the transaction payload. When consensus
1262 // is reached, we exit the pre-commit phase and start the pre-commit timer. Followers do not
1263 // apply the state in this event.
1265 // The second one, done at commit (or abort) time holds only the transaction identifier and
1266 // signals to followers that the state should (or should not) be applied.
1268 // In order to make the pre-commit timer working across failovers, though, we need
1269 // a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately
1270 // restart the timer.
1271 currentQueue.remove().cohort.reportFailure(cohortFailure);
1273 case COMMIT_PENDING:
1274 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
1275 currentTx.cohort.transactionId());
1276 currentTx.lastAccess = now;
1277 processNext = false;
1280 currentQueue.remove().cohort.reportFailure(cohortFailure);
1286 currentQueue.remove();
1290 processNextPending();
1294 // non-final for mocking
1295 boolean startAbort(final SimpleShardDataTreeCohort cohort) {
1296 final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
1297 pendingTransactions).iterator();
1298 if (!it.hasNext()) {
1299 LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.transactionId());
1303 // First entry is special, as it may already be committing
1304 final CommitEntry first = it.next();
1305 if (cohort.equals(first.cohort)) {
1306 if (cohort.getState() != State.COMMIT_PENDING) {
1307 LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.transactionId(),
1308 cohort.transactionId());
1311 if (cohort.getCandidate() != null) {
1312 rebaseTransactions(it, dataTree);
1315 processNextPending();
1319 LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.transactionId());
1323 DataTreeTip newTip = requireNonNullElse(first.cohort.getCandidate(), dataTree);
1324 while (it.hasNext()) {
1325 final CommitEntry e = it.next();
1326 if (cohort.equals(e.cohort)) {
1327 LOG.debug("{}: aborting queued transaction {}", logContext, cohort.transactionId());
1330 if (cohort.getCandidate() != null) {
1331 rebaseTransactions(it, newTip);
1337 newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
1340 LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.transactionId());
1344 @SuppressWarnings("checkstyle:IllegalCatch")
1345 private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
1346 tip = requireNonNull(newTip);
1347 while (iter.hasNext()) {
1348 final SimpleShardDataTreeCohort cohort = iter.next().cohort;
1349 if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
1350 LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.transactionId());
1353 tip.validate(cohort.getDataTreeModification());
1354 } catch (DataValidationFailedException | RuntimeException e) {
1355 LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.transactionId(), e);
1356 cohort.reportFailure(e);
1358 } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
1359 LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.transactionId());
1362 tip.validate(cohort.getDataTreeModification());
1363 DataTreeCandidateTip candidate = tip.prepare(cohort.getDataTreeModification());
1365 cohort.setNewCandidate(candidate);
1367 } catch (RuntimeException | DataValidationFailedException e) {
1368 LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.transactionId(), e);
1369 cohort.reportFailure(e);
1375 final void setRunOnPendingTransactionsComplete(final Runnable operation) {
1376 runOnPendingTransactionsComplete = operation;
1377 maybeRunOperationOnPendingTransactionsComplete();
1380 private void maybeRunOperationOnPendingTransactionsComplete() {
1381 if (runOnPendingTransactionsComplete != null && !anyPendingTransactions()) {
1382 LOG.debug("{}: Pending transactions complete - running operation {}", logContext,
1383 runOnPendingTransactionsComplete);
1385 runOnPendingTransactionsComplete.run();
1386 runOnPendingTransactionsComplete = null;
1390 final ShardStats getStats() {
1391 return shard.getShardMBean();
1394 final Iterator<SimpleShardDataTreeCohort> cohortIterator() {
1395 return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
1396 e -> e.cohort).iterator();
1399 final void removeTransactionChain(final LocalHistoryIdentifier id) {
1400 if (transactionChains.remove(id) != null) {
1401 LOG.debug("{}: Removed transaction chain {}", logContext, id);