2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.util.Timeout;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.MoreObjects;
19 import com.google.common.base.Stopwatch;
20 import com.google.common.collect.ImmutableList;
21 import com.google.common.collect.ImmutableMap;
22 import com.google.common.collect.ImmutableMap.Builder;
23 import com.google.common.collect.Iterables;
24 import com.google.common.primitives.UnsignedLong;
25 import com.google.common.util.concurrent.FutureCallback;
26 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
28 import java.io.IOException;
29 import java.util.ArrayDeque;
30 import java.util.ArrayList;
31 import java.util.Collection;
32 import java.util.Collections;
33 import java.util.Deque;
34 import java.util.HashMap;
35 import java.util.Iterator;
37 import java.util.Map.Entry;
38 import java.util.Optional;
39 import java.util.OptionalLong;
40 import java.util.Queue;
41 import java.util.SortedSet;
42 import java.util.concurrent.TimeUnit;
43 import java.util.concurrent.TimeoutException;
44 import java.util.function.Consumer;
45 import java.util.function.Function;
46 import java.util.function.UnaryOperator;
47 import org.eclipse.jdt.annotation.NonNull;
48 import org.eclipse.jdt.annotation.Nullable;
49 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
50 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
51 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
52 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
53 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
54 import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
55 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
56 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
57 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
58 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
59 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
60 import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
61 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
62 import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
63 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
64 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
65 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
66 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
67 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
68 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
69 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
70 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
71 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
72 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
73 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
74 import org.opendaylight.yangtools.concepts.Identifier;
75 import org.opendaylight.yangtools.concepts.ListenerRegistration;
76 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
77 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
78 import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
79 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
80 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
81 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
82 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
83 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
84 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
85 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
86 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
87 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
88 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
89 import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
90 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
91 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
92 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
93 import org.slf4j.Logger;
94 import org.slf4j.LoggerFactory;
95 import scala.concurrent.duration.FiniteDuration;
98 * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
99 * public interfaces and assumes it is only ever called from a single thread.
102 * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
104 public class ShardDataTree extends ShardDataTreeTransactionParent {
105 private static final class CommitEntry {
106 final SimpleShardDataTreeCohort cohort;
109 CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
110 this.cohort = requireNonNull(cohort);
115 public String toString() {
116 return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
120 private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(FiniteDuration.create(5, TimeUnit.SECONDS));
121 private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
124 * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
125 * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
126 * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
127 * result in StackOverflowError.
129 private static final int MAX_TRANSACTION_BATCH = 100;
131 private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
132 private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
133 private final Deque<CommitEntry> pendingTransactions = new ArrayDeque<>();
134 private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
135 private final Queue<CommitEntry> pendingFinishCommits = new ArrayDeque<>();
138 * Callbacks that need to be invoked once a payload is replicated.
140 private final Map<Payload, Runnable> replicationCallbacks = new HashMap<>();
142 private final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher;
143 private final Collection<ShardDataTreeMetadata<?>> metadata;
144 private final DataTree dataTree;
145 private final String logContext;
146 private final Shard shard;
147 private Runnable runOnPendingTransactionsComplete;
150 * Optimistic {@link DataTreeCandidate} preparation. Since our DataTree implementation is a
151 * {@link DataTree}, each {@link DataTreeCandidate} is also a {@link DataTreeTip}, e.g. another
152 * candidate can be prepared on top of it. They still need to be committed in sequence. Here we track the current
153 * tip of the data tree, which is the last DataTreeCandidate we have in flight, or the DataTree itself.
155 private DataTreeTip tip;
157 private SchemaContext schemaContext;
158 private DataSchemaContextTree dataSchemaContext;
160 private int currentTransactionBatch;
162 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final DataTree dataTree,
163 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
164 final String logContext,
165 final ShardDataTreeMetadata<?>... metadata) {
166 this.dataTree = requireNonNull(dataTree);
167 updateSchemaContext(schemaContext);
169 this.shard = requireNonNull(shard);
170 this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
171 this.logContext = requireNonNull(logContext);
172 this.metadata = ImmutableList.copyOf(metadata);
176 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType,
177 final YangInstanceIdentifier root,
178 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
179 final String logContext,
180 final ShardDataTreeMetadata<?>... metadata) {
181 this(shard, schemaContext, createDataTree(treeType, root), treeChangeListenerPublisher, logContext, metadata);
184 private static DataTree createDataTree(final TreeType treeType, final YangInstanceIdentifier root) {
185 final DataTreeConfiguration baseConfig = DataTreeConfiguration.getDefault(treeType);
186 return new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
187 .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
188 .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
194 public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
195 this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(),
196 new DefaultShardDataTreeChangeListenerPublisher(""), "");
199 final String logContext() {
203 final long readTime() {
204 return shard.ticker().read();
207 public DataTree getDataTree() {
211 SchemaContext getSchemaContext() {
212 return schemaContext;
215 void updateSchemaContext(final SchemaContext newSchemaContext) {
216 dataTree.setSchemaContext(newSchemaContext);
217 this.schemaContext = requireNonNull(newSchemaContext);
218 this.dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
221 void resetTransactionBatch() {
222 currentTransactionBatch = 0;
226 * Take a snapshot of current state for later recovery.
228 * @return A state snapshot
230 @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
231 final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
232 final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
233 ImmutableMap.builder();
235 for (ShardDataTreeMetadata<?> m : metadata) {
236 final ShardDataTreeSnapshotMetadata<?> meta = m.toSnapshot();
238 metaBuilder.put(meta.getType(), meta);
242 return new MetadataShardDataTreeSnapshot(rootNode, metaBuilder.build());
245 private boolean anyPendingTransactions() {
246 return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty();
249 private void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot,
250 final UnaryOperator<DataTreeModification> wrapper) throws DataValidationFailedException {
251 final Stopwatch elapsed = Stopwatch.createStarted();
253 if (anyPendingTransactions()) {
254 LOG.warn("{}: applying state snapshot with pending transactions", logContext);
257 final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
258 if (snapshot instanceof MetadataShardDataTreeSnapshot) {
259 snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
261 snapshotMeta = ImmutableMap.of();
264 for (ShardDataTreeMetadata<?> m : metadata) {
265 final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
273 final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
274 final DataTreeModification mod = wrapper.apply(unwrapped);
275 // delete everything first
276 mod.delete(YangInstanceIdentifier.empty());
278 final Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
279 if (maybeNode.isPresent()) {
280 // Add everything from the remote node back
281 mod.write(YangInstanceIdentifier.empty(), maybeNode.get());
285 dataTree.validate(unwrapped);
286 DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
287 dataTree.commit(candidate);
288 notifyListeners(candidate);
290 LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
294 * Apply a snapshot coming from the leader. This method assumes the leader and follower SchemaContexts match and
295 * does not perform any pruning.
297 * @param snapshot Snapshot that needs to be applied
298 * @throws DataValidationFailedException when the snapshot fails to apply
300 void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
301 // TODO: we should be taking ShardSnapshotState here and performing forward-compatibility translation
302 applySnapshot(snapshot, UnaryOperator.identity());
306 * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
307 * pruning in an attempt to adjust the state to our current SchemaContext.
309 * @param snapshot Snapshot that needs to be applied
310 * @throws DataValidationFailedException when the snapshot fails to apply
312 void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
313 // TODO: we should be able to reuse the pruner, provided we are not reentrant
314 final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
316 if (snapshot.needsMigration()) {
317 final ReusableNormalizedNodePruner uintPruner = pruner.withUintAdaption();
318 applySnapshot(snapshot.getSnapshot(),
319 delegate -> new PruningDataTreeModification.Proactive(delegate, dataTree, uintPruner));
321 applySnapshot(snapshot.getSnapshot(),
322 delegate -> new PruningDataTreeModification.Reactive(delegate, dataTree, pruner));
326 @SuppressWarnings("checkstyle:IllegalCatch")
327 private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
328 final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.getCandidate();
329 final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
330 final PruningDataTreeModification mod = createPruningModification(unwrapped,
331 NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.getValue().getVersion()) > 0);
333 DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
335 LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
338 dataTree.validate(unwrapped);
339 dataTree.commit(dataTree.prepare(unwrapped));
340 } catch (Exception e) {
341 File file = new File(System.getProperty("karaf.data", "."),
342 "failed-recovery-payload-" + logContext + ".out");
343 DataTreeModificationOutput.toFile(file, unwrapped);
344 throw new IllegalStateException(String.format(
345 "%s: Failed to apply recovery payload. Modification data was written to file %s",
346 logContext, file), e);
349 allMetadataCommittedTransaction(entry.getKey());
352 private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
353 final boolean uintAdapting) {
354 // TODO: we should be able to reuse the pruner, provided we are not reentrant
355 final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
357 return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
358 : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
362 * Apply a payload coming from recovery. This method does not assume the SchemaContexts match and performs data
363 * pruning in an attempt to adjust the state to our current SchemaContext.
365 * @param payload Payload
366 * @throws IOException when the snapshot fails to deserialize
367 * @throws DataValidationFailedException when the snapshot fails to apply
369 void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
370 if (payload instanceof CommitTransactionPayload) {
371 applyRecoveryCandidate((CommitTransactionPayload) payload);
372 } else if (payload instanceof AbortTransactionPayload) {
373 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
374 } else if (payload instanceof PurgeTransactionPayload) {
375 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
376 } else if (payload instanceof CreateLocalHistoryPayload) {
377 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
378 } else if (payload instanceof CloseLocalHistoryPayload) {
379 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
380 } else if (payload instanceof PurgeLocalHistoryPayload) {
381 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
383 LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
387 private void applyReplicatedCandidate(final CommitTransactionPayload payload)
388 throws DataValidationFailedException, IOException {
389 final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.getCandidate();
390 final TransactionIdentifier identifier = entry.getKey();
391 LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
393 final DataTreeModification mod = dataTree.takeSnapshot().newModification();
394 // TODO: check version here, which will enable us to perform forward-compatibility transformations
395 DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
398 LOG.trace("{}: Applying foreign modification {}", logContext, mod);
399 dataTree.validate(mod);
400 final DataTreeCandidate candidate = dataTree.prepare(mod);
401 dataTree.commit(candidate);
403 allMetadataCommittedTransaction(identifier);
404 notifyListeners(candidate);
408 * Apply a payload coming from the leader, which could actually be us. This method assumes the leader and follower
409 * SchemaContexts match and does not perform any pruning.
411 * @param identifier Payload identifier as returned from RaftActor
412 * @param payload Payload
413 * @throws IOException when the snapshot fails to deserialize
414 * @throws DataValidationFailedException when the snapshot fails to apply
416 void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
417 DataValidationFailedException {
419 * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
420 * if we are the leader and it has originated with us.
422 * The identifier will only ever be non-null when we were the leader which achieved consensus. Unfortunately,
423 * though, this may not be the case anymore, as we are being called some time afterwards and we may not be
424 * acting in that capacity anymore.
426 * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
427 * pre-Boron state -- which limits the number of options here.
429 if (payload instanceof CommitTransactionPayload) {
430 if (identifier == null) {
431 applyReplicatedCandidate((CommitTransactionPayload) payload);
433 verify(identifier instanceof TransactionIdentifier);
434 payloadReplicationComplete((TransactionIdentifier) identifier);
436 } else if (payload instanceof AbortTransactionPayload) {
437 if (identifier != null) {
438 payloadReplicationComplete((AbortTransactionPayload) payload);
440 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
441 } else if (payload instanceof PurgeTransactionPayload) {
442 if (identifier != null) {
443 payloadReplicationComplete((PurgeTransactionPayload) payload);
445 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
446 } else if (payload instanceof CloseLocalHistoryPayload) {
447 if (identifier != null) {
448 payloadReplicationComplete((CloseLocalHistoryPayload) payload);
450 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
451 } else if (payload instanceof CreateLocalHistoryPayload) {
452 if (identifier != null) {
453 payloadReplicationComplete((CreateLocalHistoryPayload)payload);
455 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
456 } else if (payload instanceof PurgeLocalHistoryPayload) {
457 if (identifier != null) {
458 payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
460 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
462 LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
466 private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
467 if (callback != null) {
468 replicationCallbacks.put(payload, callback);
470 shard.persistPayload(id, payload, true);
473 private void payloadReplicationComplete(final AbstractIdentifiablePayload<?> payload) {
474 final Runnable callback = replicationCallbacks.remove(payload);
475 if (callback != null) {
476 LOG.debug("{}: replication of {} completed, invoking {}", logContext, payload.getIdentifier(), callback);
479 LOG.debug("{}: replication of {} has no callback", logContext, payload.getIdentifier());
483 private void payloadReplicationComplete(final TransactionIdentifier txId) {
484 final CommitEntry current = pendingFinishCommits.peek();
485 if (current == null) {
486 LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
487 allMetadataCommittedTransaction(txId);
491 if (!current.cohort.getIdentifier().equals(txId)) {
492 LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
493 current.cohort.getIdentifier(), txId);
494 allMetadataCommittedTransaction(txId);
498 finishCommit(current.cohort);
501 private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
502 for (ShardDataTreeMetadata<?> m : metadata) {
503 m.onTransactionAborted(txId);
507 private void allMetadataCommittedTransaction(final TransactionIdentifier txId) {
508 for (ShardDataTreeMetadata<?> m : metadata) {
509 m.onTransactionCommitted(txId);
513 private void allMetadataPurgedTransaction(final TransactionIdentifier txId) {
514 for (ShardDataTreeMetadata<?> m : metadata) {
515 m.onTransactionPurged(txId);
519 private void allMetadataCreatedLocalHistory(final LocalHistoryIdentifier historyId) {
520 for (ShardDataTreeMetadata<?> m : metadata) {
521 m.onHistoryCreated(historyId);
525 private void allMetadataClosedLocalHistory(final LocalHistoryIdentifier historyId) {
526 for (ShardDataTreeMetadata<?> m : metadata) {
527 m.onHistoryClosed(historyId);
531 private void allMetadataPurgedLocalHistory(final LocalHistoryIdentifier historyId) {
532 for (ShardDataTreeMetadata<?> m : metadata) {
533 m.onHistoryPurged(historyId);
538 * Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
539 * this method is used for re-establishing state when we are taking over
541 * @param historyId Local history identifier
542 * @param closed True if the chain should be created in closed state (i.e. pending purge)
543 * @return Transaction chain handle
545 ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
546 final boolean closed) {
547 final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
548 final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
549 checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
553 ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
554 final @Nullable Runnable callback) {
555 ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
557 chain = new ShardDataTreeTransactionChain(historyId, this);
558 transactionChains.put(historyId, chain);
559 replicatePayload(historyId, CreateLocalHistoryPayload.create(
560 historyId, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
561 } else if (callback != null) {
568 ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
569 shard.getShardMBean().incrementReadOnlyTransactionCount();
571 if (txId.getHistoryId().getHistoryId() == 0) {
572 return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
575 return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
578 ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
579 shard.getShardMBean().incrementReadWriteTransactionCount();
581 if (txId.getHistoryId().getHistoryId() == 0) {
582 return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
586 return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
590 public void notifyListeners(final DataTreeCandidate candidate) {
591 treeChangeListenerPublisher.publishChanges(candidate);
595 * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
596 * replication callbacks.
598 void purgeLeaderState() {
599 for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
603 transactionChains.clear();
604 replicationCallbacks.clear();
608 * Close a single transaction chain.
610 * @param id History identifier
611 * @param callback Callback to invoke upon completion, may be null
613 void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
614 if (commonCloseTransactionChain(id, callback)) {
615 replicatePayload(id, CloseLocalHistoryPayload.create(id,
616 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
621 * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
623 * @param id History identifier
625 void closeTransactionChain(final LocalHistoryIdentifier id) {
626 commonCloseTransactionChain(id, null);
629 private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
630 final ShardDataTreeTransactionChain chain = transactionChains.get(id);
632 LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
633 if (callback != null) {
644 * Purge a single transaction chain.
646 * @param id History identifier
647 * @param callback Callback to invoke upon completion, may be null
649 void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
650 final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
652 LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
653 if (callback != null) {
659 replicatePayload(id, PurgeLocalHistoryPayload.create(
660 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
663 Optional<DataTreeCandidate> readCurrentData() {
664 return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty())
665 .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state));
668 public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
669 final Optional<DataTreeCandidate> initialState,
670 final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
671 treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
675 return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
679 void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
680 final TransactionIdentifier id = transaction.getIdentifier();
681 LOG.debug("{}: aborting transaction {}", logContext, id);
682 replicatePayload(id, AbortTransactionPayload.create(
683 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
687 void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
688 // No-op for free-standing transactions
693 ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
694 final Optional<SortedSet<String>> participatingShardNames) {
695 final DataTreeModification snapshot = transaction.getSnapshot();
696 final TransactionIdentifier id = transaction.getIdentifier();
697 LOG.debug("{}: readying transaction {}", logContext, id);
699 LOG.debug("{}: transaction {} ready", logContext, id);
701 return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
704 void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
705 LOG.debug("{}: purging transaction {}", logContext, id);
706 replicatePayload(id, PurgeTransactionPayload.create(
707 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
710 public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
711 return dataTree.takeSnapshot().readNode(path);
714 DataTreeSnapshot takeSnapshot() {
715 return dataTree.takeSnapshot();
719 public DataTreeModification newModification() {
720 return dataTree.takeSnapshot().newModification();
723 public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
724 Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
726 for (CommitEntry entry: pendingFinishCommits) {
727 ret.add(entry.cohort);
730 for (CommitEntry entry: pendingCommits) {
731 ret.add(entry.cohort);
734 for (CommitEntry entry: pendingTransactions) {
735 ret.add(entry.cohort);
738 pendingFinishCommits.clear();
739 pendingCommits.clear();
740 pendingTransactions.clear();
746 * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
748 void resumeNextPendingTransaction() {
749 LOG.debug("{}: attempting to resume transaction processing", logContext);
750 processNextPending();
753 @SuppressWarnings("checkstyle:IllegalCatch")
754 private void processNextPendingTransaction() {
755 ++currentTransactionBatch;
756 if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
757 LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
758 shard.scheduleNextPendingTransaction();
762 processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
763 final SimpleShardDataTreeCohort cohort = entry.cohort;
764 final DataTreeModification modification = cohort.getDataTreeModification();
766 LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
769 tip.validate(modification);
770 LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
771 cohort.successfulCanCommit();
772 entry.lastAccess = readTime();
774 } catch (ConflictingModificationAppliedException e) {
775 LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
777 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
778 } catch (DataValidationFailedException e) {
779 LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
782 // For debugging purposes, allow dumping of the modification. Coupled with the above
783 // precondition log, it should allow us to understand what went on.
784 LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
785 LOG.trace("{}: Current tree: {}", logContext, dataTree);
786 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
787 } catch (Exception e) {
788 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
792 // Failure path: propagate the failure, remove the transaction from the queue and loop to the next one
793 pendingTransactions.poll().cohort.failedCanCommit(cause);
797 private void processNextPending() {
798 processNextPendingCommit();
799 processNextPendingTransaction();
802 private void processNextPending(final Queue<CommitEntry> queue, final State allowedState,
803 final Consumer<CommitEntry> processor) {
804 while (!queue.isEmpty()) {
805 final CommitEntry entry = queue.peek();
806 final SimpleShardDataTreeCohort cohort = entry.cohort;
808 if (cohort.isFailed()) {
809 LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
814 if (cohort.getState() == allowedState) {
815 processor.accept(entry);
821 maybeRunOperationOnPendingTransactionsComplete();
824 private void processNextPendingCommit() {
825 processNextPending(pendingCommits, State.COMMIT_PENDING,
826 entry -> startCommit(entry.cohort, entry.cohort.getCandidate()));
829 private boolean peekNextPendingCommit() {
830 final CommitEntry first = pendingCommits.peek();
831 return first != null && first.cohort.getState() == State.COMMIT_PENDING;
834 void startCanCommit(final SimpleShardDataTreeCohort cohort) {
835 final CommitEntry head = pendingTransactions.peek();
837 LOG.warn("{}: No transactions enqueued while attempting to start canCommit on {}", logContext, cohort);
840 if (!cohort.equals(head.cohort)) {
841 // The tx isn't at the head of the queue so we can't start canCommit at this point. Here we check if this
842 // tx should be moved ahead of other tx's in the READY state in the pendingTransactions queue. If this tx
843 // has other participating shards, it could deadlock with other tx's accessing the same shards
844 // depending on the order the tx's are readied on each shard
845 // (see https://jira.opendaylight.org/browse/CONTROLLER-1836). Therefore, if the preceding participating
846 // shard names for a preceding pending tx, call it A, in the queue matches that of this tx, then this tx
847 // is allowed to be moved ahead of tx A in the queue so it is processed first to avoid potential deadlock
848 // if tx A is behind this tx in the pendingTransactions queue for a preceding shard. In other words, since
849 // canCommmit for this tx was requested before tx A, honor that request. If this tx is moved to the head of
850 // the queue as a result, then proceed with canCommit.
852 Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
853 if (precedingShardNames.isEmpty()) {
854 LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
858 LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}",
859 logContext, cohort.getIdentifier(), precedingShardNames);
860 final Iterator<CommitEntry> iter = pendingTransactions.iterator();
862 int moveToIndex = -1;
863 while (iter.hasNext()) {
864 final CommitEntry entry = iter.next();
867 if (cohort.equals(entry.cohort)) {
868 if (moveToIndex < 0) {
869 LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
870 logContext, cohort.getIdentifier());
874 LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
875 logContext, cohort.getIdentifier(), moveToIndex);
877 insertEntry(pendingTransactions, entry, moveToIndex);
879 if (!cohort.equals(pendingTransactions.peek().cohort)) {
880 LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
881 logContext, cohort.getIdentifier());
885 LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
886 logContext, cohort.getIdentifier());
890 if (entry.cohort.getState() != State.READY) {
891 LOG.debug("{}: Skipping pending transaction {} in state {}",
892 logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
896 final Collection<String> pendingPrecedingShardNames = extractPrecedingShardNames(
897 entry.cohort.getParticipatingShardNames());
899 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
900 if (moveToIndex < 0) {
901 LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
902 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
906 "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
907 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
910 LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
911 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
916 processNextPendingTransaction();
919 private static void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
921 queue.addFirst(entry);
925 LOG.trace("Inserting into Deque at index {}", atIndex);
927 Deque<CommitEntry> tempStack = new ArrayDeque<>(atIndex);
928 for (int i = 0; i < atIndex; i++) {
929 tempStack.push(queue.poll());
932 queue.addFirst(entry);
934 tempStack.forEach(queue::addFirst);
937 private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
938 return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
939 set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
942 private void failPreCommit(final Throwable cause) {
943 shard.getShardMBean().incrementFailedTransactionsCount();
944 pendingTransactions.poll().cohort.failedPreCommit(cause);
945 processNextPendingTransaction();
948 @SuppressWarnings("checkstyle:IllegalCatch")
949 void startPreCommit(final SimpleShardDataTreeCohort cohort) {
950 final CommitEntry entry = pendingTransactions.peek();
951 checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
953 final SimpleShardDataTreeCohort current = entry.cohort;
954 verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
956 final TransactionIdentifier currentId = current.getIdentifier();
957 LOG.debug("{}: Preparing transaction {}", logContext, currentId);
959 final DataTreeCandidateTip candidate;
961 candidate = tip.prepare(cohort.getDataTreeModification());
962 LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
963 } catch (DataValidationFailedException | RuntimeException e) {
968 cohort.userPreCommit(candidate, new FutureCallback<Void>() {
970 public void onSuccess(final Void noop) {
971 // Set the tip of the data tree.
972 tip = verifyNotNull(candidate);
974 entry.lastAccess = readTime();
976 pendingTransactions.remove();
977 pendingCommits.add(entry);
979 LOG.debug("{}: Transaction {} prepared", logContext, currentId);
981 cohort.successfulPreCommit(candidate);
983 processNextPendingTransaction();
987 public void onFailure(final Throwable failure) {
988 failPreCommit(failure);
993 private void failCommit(final Exception cause) {
994 shard.getShardMBean().incrementFailedTransactionsCount();
995 pendingFinishCommits.poll().cohort.failedCommit(cause);
996 processNextPending();
999 @SuppressWarnings("checkstyle:IllegalCatch")
1000 private void finishCommit(final SimpleShardDataTreeCohort cohort) {
1001 final TransactionIdentifier txId = cohort.getIdentifier();
1002 final DataTreeCandidate candidate = cohort.getCandidate();
1004 LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
1006 if (tip == candidate) {
1007 // All pending candidates have been committed, reset the tip to the data tree.
1012 dataTree.commit(candidate);
1013 } catch (Exception e) {
1014 LOG.error("{}: Failed to commit transaction {}", logContext, txId, e);
1019 allMetadataCommittedTransaction(txId);
1020 shard.getShardMBean().incrementCommittedTransactionCount();
1021 shard.getShardMBean().setLastCommittedTransactionTime(System.currentTimeMillis());
1023 // FIXME: propagate journal index
1024 pendingFinishCommits.poll().cohort.successfulCommit(UnsignedLong.ZERO, () -> {
1025 LOG.trace("{}: Transaction {} committed, proceeding to notify", logContext, txId);
1026 notifyListeners(candidate);
1028 processNextPending();
1032 void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
1033 final CommitEntry entry = pendingCommits.peek();
1034 checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
1036 final SimpleShardDataTreeCohort current = entry.cohort;
1037 if (!cohort.equals(current)) {
1038 LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
1042 LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
1044 final TransactionIdentifier txId = cohort.getIdentifier();
1045 final Payload payload;
1047 payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
1048 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
1049 } catch (IOException e) {
1050 LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
1051 pendingCommits.poll().cohort.failedCommit(e);
1052 processNextPending();
1056 // We process next transactions pending canCommit before we call persistPayload to possibly progress subsequent
1057 // transactions to the COMMIT_PENDING state so the payloads can be batched for replication. This is done for
1058 // single-shard transactions that immediately transition from canCommit to preCommit to commit. Note that
1059 // if the next pending transaction is progressed to COMMIT_PENDING and this method (startCommit) is called,
1060 // the next transaction will not attempt to replicate b/c the current transaction is still at the head of the
1061 // pendingCommits queue.
1062 processNextPendingTransaction();
1064 // After processing next pending transactions, we can now remove the current transaction from pendingCommits.
1065 // Note this must be done before the call to peekNextPendingCommit below so we check the next transaction
1066 // in order to properly determine the batchHint flag for the call to persistPayload.
1067 pendingCommits.remove();
1068 pendingFinishCommits.add(entry);
1070 // See if the next transaction is pending commit (ie in the COMMIT_PENDING state) so it can be batched with
1071 // this transaction for replication.
1072 boolean replicationBatchHint = peekNextPendingCommit();
1074 // Once completed, we will continue via payloadReplicationComplete
1075 shard.persistPayload(txId, payload, replicationBatchHint);
1077 entry.lastAccess = shard.ticker().read();
1079 LOG.debug("{}: Transaction {} submitted to persistence", logContext, txId);
1081 // Process the next transaction pending commit, if any. If there is one it will be batched with this
1082 // transaction for replication.
1083 processNextPendingCommit();
1086 Collection<ActorRef> getCohortActors() {
1087 return cohortRegistry.getCohortActors();
1090 void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
1091 cohortRegistry.process(sender, message);
1095 ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1096 final Exception failure) {
1097 final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
1098 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1103 ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1104 final Optional<SortedSet<String>> participatingShardNames) {
1105 SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
1106 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
1107 COMMIT_STEP_TIMEOUT), participatingShardNames);
1108 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1112 // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
1113 // the newReadWriteTransaction()
1114 ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1115 final Optional<SortedSet<String>> participatingShardNames) {
1116 if (txId.getHistoryId().getHistoryId() == 0) {
1117 return createReadyCohort(txId, mod, participatingShardNames);
1120 return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
1123 @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
1124 void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
1125 final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
1126 final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
1127 final long now = readTime();
1129 final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
1130 !pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
1131 final CommitEntry currentTx = currentQueue.peek();
1132 if (currentTx == null) {
1133 // Empty queue, no-op
1137 long delta = now - currentTx.lastAccess;
1138 if (delta < timeout) {
1139 // Not expired yet, bail
1143 final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
1144 if (updateOpt.isPresent()) {
1145 final long newAccess = updateOpt.getAsLong();
1146 final long newDelta = now - newAccess;
1147 if (newDelta < delta) {
1148 LOG.debug("{}: Updated current transaction {} access time", logContext,
1149 currentTx.cohort.getIdentifier());
1150 currentTx.lastAccess = newAccess;
1154 if (delta < timeout) {
1155 // Not expired yet, bail
1160 final long deltaMillis = TimeUnit.NANOSECONDS.toMillis(delta);
1161 final State state = currentTx.cohort.getState();
1163 LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
1164 currentTx.cohort.getIdentifier(), deltaMillis, state);
1165 boolean processNext = true;
1166 final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
1167 + deltaMillis + "ms");
1170 case CAN_COMMIT_PENDING:
1171 currentQueue.remove().cohort.failedCanCommit(cohortFailure);
1173 case CAN_COMMIT_COMPLETE:
1174 // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
1175 // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
1176 // in PRE_COMMIT_COMPLETE is changed.
1177 currentQueue.remove().cohort.reportFailure(cohortFailure);
1179 case PRE_COMMIT_PENDING:
1180 currentQueue.remove().cohort.failedPreCommit(cohortFailure);
1182 case PRE_COMMIT_COMPLETE:
1183 // FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we
1184 // are ready we should commit the transaction, not abort it. Our current software stack does
1185 // not allow us to do that consistently, because we persist at the time of commit, hence
1186 // we can end up in a state where we have pre-committed a transaction, then a leader failover
1187 // occurred ... the new leader does not see the pre-committed transaction and does not have
1188 // a running timer. To fix this we really need two persistence events.
1190 // The first one, done at pre-commit time will hold the transaction payload. When consensus
1191 // is reached, we exit the pre-commit phase and start the pre-commit timer. Followers do not
1192 // apply the state in this event.
1194 // The second one, done at commit (or abort) time holds only the transaction identifier and
1195 // signals to followers that the state should (or should not) be applied.
1197 // In order to make the pre-commit timer working across failovers, though, we need
1198 // a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately
1199 // restart the timer.
1200 currentQueue.remove().cohort.reportFailure(cohortFailure);
1202 case COMMIT_PENDING:
1203 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
1204 currentTx.cohort.getIdentifier());
1205 currentTx.lastAccess = now;
1206 processNext = false;
1209 currentQueue.remove().cohort.reportFailure(cohortFailure);
1215 currentQueue.remove();
1219 processNextPending();
1223 boolean startAbort(final SimpleShardDataTreeCohort cohort) {
1224 final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
1225 pendingTransactions).iterator();
1226 if (!it.hasNext()) {
1227 LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
1231 // First entry is special, as it may already be committing
1232 final CommitEntry first = it.next();
1233 if (cohort.equals(first.cohort)) {
1234 if (cohort.getState() != State.COMMIT_PENDING) {
1235 LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
1236 cohort.getIdentifier());
1239 if (cohort.getCandidate() != null) {
1240 rebaseTransactions(it, dataTree);
1243 processNextPending();
1247 LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
1251 DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
1252 while (it.hasNext()) {
1253 final CommitEntry e = it.next();
1254 if (cohort.equals(e.cohort)) {
1255 LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
1258 if (cohort.getCandidate() != null) {
1259 rebaseTransactions(it, newTip);
1264 newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
1268 LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
1272 @SuppressWarnings("checkstyle:IllegalCatch")
1273 private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
1274 tip = requireNonNull(newTip);
1275 while (iter.hasNext()) {
1276 final SimpleShardDataTreeCohort cohort = iter.next().cohort;
1277 if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
1278 LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
1281 tip.validate(cohort.getDataTreeModification());
1282 } catch (DataValidationFailedException | RuntimeException e) {
1283 LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
1284 cohort.reportFailure(e);
1286 } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
1287 LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
1290 tip.validate(cohort.getDataTreeModification());
1291 DataTreeCandidateTip candidate = tip.prepare(cohort.getDataTreeModification());
1293 cohort.setNewCandidate(candidate);
1295 } catch (RuntimeException | DataValidationFailedException e) {
1296 LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
1297 cohort.reportFailure(e);
1303 void setRunOnPendingTransactionsComplete(final Runnable operation) {
1304 runOnPendingTransactionsComplete = operation;
1305 maybeRunOperationOnPendingTransactionsComplete();
1308 private void maybeRunOperationOnPendingTransactionsComplete() {
1309 if (runOnPendingTransactionsComplete != null && !anyPendingTransactions()) {
1310 LOG.debug("{}: Pending transactions complete - running operation {}", logContext,
1311 runOnPendingTransactionsComplete);
1313 runOnPendingTransactionsComplete.run();
1314 runOnPendingTransactionsComplete = null;
1318 ShardStats getStats() {
1319 return shard.getShardMBean();
1322 Iterator<SimpleShardDataTreeCohort> cohortIterator() {
1323 return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
1324 e -> e.cohort).iterator();
1327 void removeTransactionChain(final LocalHistoryIdentifier id) {
1328 if (transactionChains.remove(id) != null) {
1329 LOG.debug("{}: Removed transaction chain {}", logContext, id);