2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.util.Timeout;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.MoreObjects;
19 import com.google.common.base.Stopwatch;
20 import com.google.common.collect.ImmutableList;
21 import com.google.common.collect.ImmutableMap;
22 import com.google.common.collect.ImmutableMap.Builder;
23 import com.google.common.collect.Iterables;
24 import com.google.common.primitives.UnsignedLong;
25 import com.google.common.util.concurrent.FutureCallback;
26 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
28 import java.io.IOException;
29 import java.util.ArrayDeque;
30 import java.util.ArrayList;
31 import java.util.Collection;
32 import java.util.Collections;
33 import java.util.Deque;
34 import java.util.HashMap;
35 import java.util.Iterator;
37 import java.util.Map.Entry;
38 import java.util.Optional;
39 import java.util.OptionalLong;
40 import java.util.Queue;
41 import java.util.SortedSet;
42 import java.util.concurrent.TimeUnit;
43 import java.util.concurrent.TimeoutException;
44 import java.util.function.Consumer;
45 import java.util.function.Function;
46 import java.util.function.UnaryOperator;
47 import org.eclipse.jdt.annotation.NonNull;
48 import org.eclipse.jdt.annotation.Nullable;
49 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
50 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
51 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
52 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
53 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
54 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
55 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
56 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
57 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
58 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
59 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
60 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
61 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
62 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
63 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
64 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
65 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
67 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
68 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
69 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
70 import org.opendaylight.yangtools.concepts.Identifier;
71 import org.opendaylight.yangtools.concepts.ListenerRegistration;
72 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
73 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
74 import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
75 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
76 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
77 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
78 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
79 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
80 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
81 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
82 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
83 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
84 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
85 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
86 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
87 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
88 import org.slf4j.Logger;
89 import org.slf4j.LoggerFactory;
90 import scala.concurrent.duration.FiniteDuration;
93 * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
94 * public interfaces and assumes it is only ever called from a single thread.
97 * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
99 public class ShardDataTree extends ShardDataTreeTransactionParent {
100 private static final class CommitEntry {
101 final SimpleShardDataTreeCohort cohort;
104 CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
105 this.cohort = requireNonNull(cohort);
110 public String toString() {
111 return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
115 private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(FiniteDuration.create(5, TimeUnit.SECONDS));
116 private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
119 * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
120 * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
121 * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
122 * result in StackOverflowError.
124 private static final int MAX_TRANSACTION_BATCH = 100;
126 private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
127 private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
128 private final Deque<CommitEntry> pendingTransactions = new ArrayDeque<>();
129 private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
130 private final Queue<CommitEntry> pendingFinishCommits = new ArrayDeque<>();
133 * Callbacks that need to be invoked once a payload is replicated.
135 private final Map<Payload, Runnable> replicationCallbacks = new HashMap<>();
137 private final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher;
138 private final Collection<ShardDataTreeMetadata<?>> metadata;
139 private final DataTree dataTree;
140 private final String logContext;
141 private final Shard shard;
142 private Runnable runOnPendingTransactionsComplete;
145 * Optimistic {@link DataTreeCandidate} preparation. Since our DataTree implementation is a
146 * {@link DataTree}, each {@link DataTreeCandidate} is also a {@link DataTreeTip}, e.g. another
147 * candidate can be prepared on top of it. They still need to be committed in sequence. Here we track the current
148 * tip of the data tree, which is the last DataTreeCandidate we have in flight, or the DataTree itself.
150 private DataTreeTip tip;
152 private SchemaContext schemaContext;
153 private DataSchemaContextTree dataSchemaContext;
155 private int currentTransactionBatch;
157 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final DataTree dataTree,
158 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
159 final String logContext,
160 final ShardDataTreeMetadata<?>... metadata) {
161 this.dataTree = requireNonNull(dataTree);
162 updateSchemaContext(schemaContext);
164 this.shard = requireNonNull(shard);
165 this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
166 this.logContext = requireNonNull(logContext);
167 this.metadata = ImmutableList.copyOf(metadata);
171 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType,
172 final YangInstanceIdentifier root,
173 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
174 final String logContext,
175 final ShardDataTreeMetadata<?>... metadata) {
176 this(shard, schemaContext, createDataTree(treeType, root), treeChangeListenerPublisher, logContext, metadata);
179 private static DataTree createDataTree(final TreeType treeType, final YangInstanceIdentifier root) {
180 final DataTreeConfiguration baseConfig = DataTreeConfiguration.getDefault(treeType);
181 return new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
182 .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
183 .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
189 public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
190 this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(),
191 new DefaultShardDataTreeChangeListenerPublisher(""), "");
194 final String logContext() {
198 final long readTime() {
199 return shard.ticker().read();
202 public DataTree getDataTree() {
206 SchemaContext getSchemaContext() {
207 return schemaContext;
210 void updateSchemaContext(final SchemaContext newSchemaContext) {
211 dataTree.setSchemaContext(newSchemaContext);
212 this.schemaContext = requireNonNull(newSchemaContext);
213 this.dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
216 void resetTransactionBatch() {
217 currentTransactionBatch = 0;
221 * Take a snapshot of current state for later recovery.
223 * @return A state snapshot
225 @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
226 final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
227 final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
228 ImmutableMap.builder();
230 for (ShardDataTreeMetadata<?> m : metadata) {
231 final ShardDataTreeSnapshotMetadata<?> meta = m.toSnapshot();
233 metaBuilder.put(meta.getType(), meta);
237 return new MetadataShardDataTreeSnapshot(rootNode, metaBuilder.build());
240 private boolean anyPendingTransactions() {
241 return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty();
244 private void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot,
245 final UnaryOperator<DataTreeModification> wrapper) throws DataValidationFailedException {
246 final Stopwatch elapsed = Stopwatch.createStarted();
248 if (anyPendingTransactions()) {
249 LOG.warn("{}: applying state snapshot with pending transactions", logContext);
252 final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
253 if (snapshot instanceof MetadataShardDataTreeSnapshot) {
254 snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
256 snapshotMeta = ImmutableMap.of();
259 for (ShardDataTreeMetadata<?> m : metadata) {
260 final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
268 final DataTreeModification mod = wrapper.apply(dataTree.takeSnapshot().newModification());
269 // delete everything first
270 mod.delete(YangInstanceIdentifier.empty());
272 final Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
273 if (maybeNode.isPresent()) {
274 // Add everything from the remote node back
275 mod.write(YangInstanceIdentifier.empty(), maybeNode.get());
279 final DataTreeModification unwrapped = unwrap(mod);
280 dataTree.validate(unwrapped);
281 DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
282 dataTree.commit(candidate);
283 notifyListeners(candidate);
285 LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
289 * Apply a snapshot coming from the leader. This method assumes the leader and follower SchemaContexts match and
290 * does not perform any pruning.
292 * @param snapshot Snapshot that needs to be applied
293 * @throws DataValidationFailedException when the snapshot fails to apply
295 void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
296 applySnapshot(snapshot, UnaryOperator.identity());
299 private PruningDataTreeModification wrapWithPruning(final DataTreeModification delegate) {
300 return new PruningDataTreeModification(delegate, dataTree, dataSchemaContext);
303 private static DataTreeModification unwrap(final DataTreeModification modification) {
304 if (modification instanceof PruningDataTreeModification) {
305 return ((PruningDataTreeModification)modification).delegate();
311 * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
312 * pruning in an attempt to adjust the state to our current SchemaContext.
314 * @param snapshot Snapshot that needs to be applied
315 * @throws DataValidationFailedException when the snapshot fails to apply
317 void applyRecoverySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
318 applySnapshot(snapshot, this::wrapWithPruning);
321 @SuppressWarnings("checkstyle:IllegalCatch")
322 private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
323 final Entry<TransactionIdentifier, DataTreeCandidate> entry = payload.getCandidate();
325 final PruningDataTreeModification mod = wrapWithPruning(dataTree.takeSnapshot().newModification());
326 DataTreeCandidates.applyToModification(mod, entry.getValue());
329 final DataTreeModification unwrapped = mod.delegate();
330 LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
333 dataTree.validate(unwrapped);
334 dataTree.commit(dataTree.prepare(unwrapped));
335 } catch (Exception e) {
336 File file = new File(System.getProperty("karaf.data", "."),
337 "failed-recovery-payload-" + logContext + ".out");
338 DataTreeModificationOutput.toFile(file, unwrapped);
339 throw new IllegalStateException(String.format(
340 "%s: Failed to apply recovery payload. Modification data was written to file %s",
341 logContext, file), e);
344 allMetadataCommittedTransaction(entry.getKey());
348 * Apply a payload coming from recovery. This method does not assume the SchemaContexts match and performs data
349 * pruning in an attempt to adjust the state to our current SchemaContext.
351 * @param payload Payload
352 * @throws IOException when the snapshot fails to deserialize
353 * @throws DataValidationFailedException when the snapshot fails to apply
355 void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
356 if (payload instanceof CommitTransactionPayload) {
357 applyRecoveryCandidate((CommitTransactionPayload) payload);
358 } else if (payload instanceof AbortTransactionPayload) {
359 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
360 } else if (payload instanceof PurgeTransactionPayload) {
361 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
362 } else if (payload instanceof CreateLocalHistoryPayload) {
363 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
364 } else if (payload instanceof CloseLocalHistoryPayload) {
365 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
366 } else if (payload instanceof PurgeLocalHistoryPayload) {
367 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
369 LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
373 private void applyReplicatedCandidate(final CommitTransactionPayload payload)
374 throws DataValidationFailedException, IOException {
375 final Entry<TransactionIdentifier, DataTreeCandidate> entry = payload.getCandidate();
376 final TransactionIdentifier identifier = entry.getKey();
377 LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
379 final DataTreeModification mod = dataTree.takeSnapshot().newModification();
380 DataTreeCandidates.applyToModification(mod, entry.getValue());
383 LOG.trace("{}: Applying foreign modification {}", logContext, mod);
384 dataTree.validate(mod);
385 final DataTreeCandidate candidate = dataTree.prepare(mod);
386 dataTree.commit(candidate);
388 allMetadataCommittedTransaction(identifier);
389 notifyListeners(candidate);
393 * Apply a payload coming from the leader, which could actually be us. This method assumes the leader and follower
394 * SchemaContexts match and does not perform any pruning.
396 * @param identifier Payload identifier as returned from RaftActor
397 * @param payload Payload
398 * @throws IOException when the snapshot fails to deserialize
399 * @throws DataValidationFailedException when the snapshot fails to apply
401 void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
402 DataValidationFailedException {
404 * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
405 * if we are the leader and it has originated with us.
407 * The identifier will only ever be non-null when we were the leader which achieved consensus. Unfortunately,
408 * though, this may not be the case anymore, as we are being called some time afterwards and we may not be
409 * acting in that capacity anymore.
411 * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
412 * pre-Boron state -- which limits the number of options here.
414 if (payload instanceof CommitTransactionPayload) {
415 if (identifier == null) {
416 applyReplicatedCandidate((CommitTransactionPayload) payload);
418 verify(identifier instanceof TransactionIdentifier);
419 payloadReplicationComplete((TransactionIdentifier) identifier);
421 } else if (payload instanceof AbortTransactionPayload) {
422 if (identifier != null) {
423 payloadReplicationComplete((AbortTransactionPayload) payload);
425 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
426 } else if (payload instanceof PurgeTransactionPayload) {
427 if (identifier != null) {
428 payloadReplicationComplete((PurgeTransactionPayload) payload);
430 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
431 } else if (payload instanceof CloseLocalHistoryPayload) {
432 if (identifier != null) {
433 payloadReplicationComplete((CloseLocalHistoryPayload) payload);
435 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
436 } else if (payload instanceof CreateLocalHistoryPayload) {
437 if (identifier != null) {
438 payloadReplicationComplete((CreateLocalHistoryPayload)payload);
440 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
441 } else if (payload instanceof PurgeLocalHistoryPayload) {
442 if (identifier != null) {
443 payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
445 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
447 LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
451 private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
452 if (callback != null) {
453 replicationCallbacks.put(payload, callback);
455 shard.persistPayload(id, payload, true);
458 private void payloadReplicationComplete(final AbstractIdentifiablePayload<?> payload) {
459 final Runnable callback = replicationCallbacks.remove(payload);
460 if (callback != null) {
461 LOG.debug("{}: replication of {} completed, invoking {}", logContext, payload.getIdentifier(), callback);
464 LOG.debug("{}: replication of {} has no callback", logContext, payload.getIdentifier());
468 private void payloadReplicationComplete(final TransactionIdentifier txId) {
469 final CommitEntry current = pendingFinishCommits.peek();
470 if (current == null) {
471 LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
472 allMetadataCommittedTransaction(txId);
476 if (!current.cohort.getIdentifier().equals(txId)) {
477 LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
478 current.cohort.getIdentifier(), txId);
479 allMetadataCommittedTransaction(txId);
483 finishCommit(current.cohort);
486 private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
487 for (ShardDataTreeMetadata<?> m : metadata) {
488 m.onTransactionAborted(txId);
492 private void allMetadataCommittedTransaction(final TransactionIdentifier txId) {
493 for (ShardDataTreeMetadata<?> m : metadata) {
494 m.onTransactionCommitted(txId);
498 private void allMetadataPurgedTransaction(final TransactionIdentifier txId) {
499 for (ShardDataTreeMetadata<?> m : metadata) {
500 m.onTransactionPurged(txId);
504 private void allMetadataCreatedLocalHistory(final LocalHistoryIdentifier historyId) {
505 for (ShardDataTreeMetadata<?> m : metadata) {
506 m.onHistoryCreated(historyId);
510 private void allMetadataClosedLocalHistory(final LocalHistoryIdentifier historyId) {
511 for (ShardDataTreeMetadata<?> m : metadata) {
512 m.onHistoryClosed(historyId);
516 private void allMetadataPurgedLocalHistory(final LocalHistoryIdentifier historyId) {
517 for (ShardDataTreeMetadata<?> m : metadata) {
518 m.onHistoryPurged(historyId);
523 * Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
524 * this method is used for re-establishing state when we are taking over
526 * @param historyId Local history identifier
527 * @param closed True if the chain should be created in closed state (i.e. pending purge)
528 * @return Transaction chain handle
530 ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
531 final boolean closed) {
532 final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
533 final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
534 checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
538 ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
539 final @Nullable Runnable callback) {
540 ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
542 chain = new ShardDataTreeTransactionChain(historyId, this);
543 transactionChains.put(historyId, chain);
544 replicatePayload(historyId, CreateLocalHistoryPayload.create(
545 historyId, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
546 } else if (callback != null) {
553 ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
554 shard.getShardMBean().incrementReadOnlyTransactionCount();
556 if (txId.getHistoryId().getHistoryId() == 0) {
557 return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
560 return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
563 ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
564 shard.getShardMBean().incrementReadWriteTransactionCount();
566 if (txId.getHistoryId().getHistoryId() == 0) {
567 return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
571 return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
575 public void notifyListeners(final DataTreeCandidate candidate) {
576 treeChangeListenerPublisher.publishChanges(candidate);
580 * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
581 * replication callbacks.
583 void purgeLeaderState() {
584 for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
588 transactionChains.clear();
589 replicationCallbacks.clear();
593 * Close a single transaction chain.
595 * @param id History identifier
596 * @param callback Callback to invoke upon completion, may be null
598 void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
599 if (commonCloseTransactionChain(id, callback)) {
600 replicatePayload(id, CloseLocalHistoryPayload.create(id,
601 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
606 * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
608 * @param id History identifier
610 void closeTransactionChain(final LocalHistoryIdentifier id) {
611 commonCloseTransactionChain(id, null);
614 private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
615 final ShardDataTreeTransactionChain chain = transactionChains.get(id);
617 LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
618 if (callback != null) {
629 * Purge a single transaction chain.
631 * @param id History identifier
632 * @param callback Callback to invoke upon completion, may be null
634 void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
635 final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
637 LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
638 if (callback != null) {
644 replicatePayload(id, PurgeLocalHistoryPayload.create(
645 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
648 Optional<DataTreeCandidate> readCurrentData() {
649 return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty())
650 .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state));
653 public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
654 final Optional<DataTreeCandidate> initialState,
655 final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
656 treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
660 return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
664 void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
665 final TransactionIdentifier id = transaction.getIdentifier();
666 LOG.debug("{}: aborting transaction {}", logContext, id);
667 replicatePayload(id, AbortTransactionPayload.create(
668 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
672 void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
673 // No-op for free-standing transactions
678 ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
679 final Optional<SortedSet<String>> participatingShardNames) {
680 final DataTreeModification snapshot = transaction.getSnapshot();
681 final TransactionIdentifier id = transaction.getIdentifier();
682 LOG.debug("{}: readying transaction {}", logContext, id);
684 LOG.debug("{}: transaction {} ready", logContext, id);
686 return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
689 void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
690 LOG.debug("{}: purging transaction {}", logContext, id);
691 replicatePayload(id, PurgeTransactionPayload.create(
692 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
695 public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
696 return dataTree.takeSnapshot().readNode(path);
699 DataTreeSnapshot takeSnapshot() {
700 return dataTree.takeSnapshot();
704 public DataTreeModification newModification() {
705 return dataTree.takeSnapshot().newModification();
708 public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
709 Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
711 for (CommitEntry entry: pendingFinishCommits) {
712 ret.add(entry.cohort);
715 for (CommitEntry entry: pendingCommits) {
716 ret.add(entry.cohort);
719 for (CommitEntry entry: pendingTransactions) {
720 ret.add(entry.cohort);
723 pendingFinishCommits.clear();
724 pendingCommits.clear();
725 pendingTransactions.clear();
731 * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
733 void resumeNextPendingTransaction() {
734 LOG.debug("{}: attempting to resume transaction processing", logContext);
735 processNextPending();
738 @SuppressWarnings("checkstyle:IllegalCatch")
739 private void processNextPendingTransaction() {
740 ++currentTransactionBatch;
741 if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
742 LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
743 shard.scheduleNextPendingTransaction();
747 processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
748 final SimpleShardDataTreeCohort cohort = entry.cohort;
749 final DataTreeModification modification = cohort.getDataTreeModification();
751 LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
754 tip.validate(modification);
755 LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
756 cohort.successfulCanCommit();
757 entry.lastAccess = readTime();
759 } catch (ConflictingModificationAppliedException e) {
760 LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
762 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
763 } catch (DataValidationFailedException e) {
764 LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
767 // For debugging purposes, allow dumping of the modification. Coupled with the above
768 // precondition log, it should allow us to understand what went on.
769 LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
770 LOG.trace("{}: Current tree: {}", logContext, dataTree);
771 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
772 } catch (Exception e) {
773 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
777 // Failure path: propagate the failure, remove the transaction from the queue and loop to the next one
778 pendingTransactions.poll().cohort.failedCanCommit(cause);
782 private void processNextPending() {
783 processNextPendingCommit();
784 processNextPendingTransaction();
787 private void processNextPending(final Queue<CommitEntry> queue, final State allowedState,
788 final Consumer<CommitEntry> processor) {
789 while (!queue.isEmpty()) {
790 final CommitEntry entry = queue.peek();
791 final SimpleShardDataTreeCohort cohort = entry.cohort;
793 if (cohort.isFailed()) {
794 LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
799 if (cohort.getState() == allowedState) {
800 processor.accept(entry);
806 maybeRunOperationOnPendingTransactionsComplete();
809 private void processNextPendingCommit() {
810 processNextPending(pendingCommits, State.COMMIT_PENDING,
811 entry -> startCommit(entry.cohort, entry.cohort.getCandidate()));
814 private boolean peekNextPendingCommit() {
815 final CommitEntry first = pendingCommits.peek();
816 return first != null && first.cohort.getState() == State.COMMIT_PENDING;
819 void startCanCommit(final SimpleShardDataTreeCohort cohort) {
820 final CommitEntry head = pendingTransactions.peek();
822 LOG.warn("{}: No transactions enqueued while attempting to start canCommit on {}", logContext, cohort);
825 if (!cohort.equals(head.cohort)) {
826 // The tx isn't at the head of the queue so we can't start canCommit at this point. Here we check if this
827 // tx should be moved ahead of other tx's in the READY state in the pendingTransactions queue. If this tx
828 // has other participating shards, it could deadlock with other tx's accessing the same shards
829 // depending on the order the tx's are readied on each shard
830 // (see https://jira.opendaylight.org/browse/CONTROLLER-1836). Therefore, if the preceding participating
831 // shard names for a preceding pending tx, call it A, in the queue matches that of this tx, then this tx
832 // is allowed to be moved ahead of tx A in the queue so it is processed first to avoid potential deadlock
833 // if tx A is behind this tx in the pendingTransactions queue for a preceding shard. In other words, since
834 // canCommmit for this tx was requested before tx A, honor that request. If this tx is moved to the head of
835 // the queue as a result, then proceed with canCommit.
837 Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
838 if (precedingShardNames.isEmpty()) {
839 LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
843 LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}",
844 logContext, cohort.getIdentifier(), precedingShardNames);
845 final Iterator<CommitEntry> iter = pendingTransactions.iterator();
847 int moveToIndex = -1;
848 while (iter.hasNext()) {
849 final CommitEntry entry = iter.next();
852 if (cohort.equals(entry.cohort)) {
853 if (moveToIndex < 0) {
854 LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
855 logContext, cohort.getIdentifier());
859 LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
860 logContext, cohort.getIdentifier(), moveToIndex);
862 insertEntry(pendingTransactions, entry, moveToIndex);
864 if (!cohort.equals(pendingTransactions.peek().cohort)) {
865 LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
866 logContext, cohort.getIdentifier());
870 LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
871 logContext, cohort.getIdentifier());
875 if (entry.cohort.getState() != State.READY) {
876 LOG.debug("{}: Skipping pending transaction {} in state {}",
877 logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
881 final Collection<String> pendingPrecedingShardNames = extractPrecedingShardNames(
882 entry.cohort.getParticipatingShardNames());
884 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
885 if (moveToIndex < 0) {
886 LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
887 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
891 "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
892 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
895 LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
896 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
901 processNextPendingTransaction();
904 private static void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
906 queue.addFirst(entry);
910 LOG.trace("Inserting into Deque at index {}", atIndex);
912 Deque<CommitEntry> tempStack = new ArrayDeque<>(atIndex);
913 for (int i = 0; i < atIndex; i++) {
914 tempStack.push(queue.poll());
917 queue.addFirst(entry);
919 tempStack.forEach(queue::addFirst);
922 private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
923 return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
924 set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
927 private void failPreCommit(final Throwable cause) {
928 shard.getShardMBean().incrementFailedTransactionsCount();
929 pendingTransactions.poll().cohort.failedPreCommit(cause);
930 processNextPendingTransaction();
933 @SuppressWarnings("checkstyle:IllegalCatch")
934 void startPreCommit(final SimpleShardDataTreeCohort cohort) {
935 final CommitEntry entry = pendingTransactions.peek();
936 checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
938 final SimpleShardDataTreeCohort current = entry.cohort;
939 verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
941 final TransactionIdentifier currentId = current.getIdentifier();
942 LOG.debug("{}: Preparing transaction {}", logContext, currentId);
944 final DataTreeCandidateTip candidate;
946 candidate = tip.prepare(cohort.getDataTreeModification());
947 LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
948 } catch (RuntimeException e) {
953 cohort.userPreCommit(candidate, new FutureCallback<Void>() {
955 public void onSuccess(final Void noop) {
956 // Set the tip of the data tree.
957 tip = verifyNotNull(candidate);
959 entry.lastAccess = readTime();
961 pendingTransactions.remove();
962 pendingCommits.add(entry);
964 LOG.debug("{}: Transaction {} prepared", logContext, currentId);
966 cohort.successfulPreCommit(candidate);
968 processNextPendingTransaction();
972 public void onFailure(final Throwable failure) {
973 failPreCommit(failure);
978 private void failCommit(final Exception cause) {
979 shard.getShardMBean().incrementFailedTransactionsCount();
980 pendingFinishCommits.poll().cohort.failedCommit(cause);
981 processNextPending();
984 @SuppressWarnings("checkstyle:IllegalCatch")
985 private void finishCommit(final SimpleShardDataTreeCohort cohort) {
986 final TransactionIdentifier txId = cohort.getIdentifier();
987 final DataTreeCandidate candidate = cohort.getCandidate();
989 LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
991 if (tip == candidate) {
992 // All pending candidates have been committed, reset the tip to the data tree.
997 dataTree.commit(candidate);
998 } catch (Exception e) {
999 LOG.error("{}: Failed to commit transaction {}", logContext, txId, e);
1004 allMetadataCommittedTransaction(txId);
1005 shard.getShardMBean().incrementCommittedTransactionCount();
1006 shard.getShardMBean().setLastCommittedTransactionTime(System.currentTimeMillis());
1008 // FIXME: propagate journal index
1009 pendingFinishCommits.poll().cohort.successfulCommit(UnsignedLong.ZERO, () -> {
1010 LOG.trace("{}: Transaction {} committed, proceeding to notify", logContext, txId);
1011 notifyListeners(candidate);
1013 processNextPending();
1017 void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
1018 final CommitEntry entry = pendingCommits.peek();
1019 checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
1021 final SimpleShardDataTreeCohort current = entry.cohort;
1022 if (!cohort.equals(current)) {
1023 LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
1027 LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
1029 final TransactionIdentifier txId = cohort.getIdentifier();
1030 final Payload payload;
1032 payload = CommitTransactionPayload.create(txId, candidate,
1033 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
1034 } catch (IOException e) {
1035 LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
1036 pendingCommits.poll().cohort.failedCommit(e);
1037 processNextPending();
1041 // We process next transactions pending canCommit before we call persistPayload to possibly progress subsequent
1042 // transactions to the COMMIT_PENDING state so the payloads can be batched for replication. This is done for
1043 // single-shard transactions that immediately transition from canCommit to preCommit to commit. Note that
1044 // if the next pending transaction is progressed to COMMIT_PENDING and this method (startCommit) is called,
1045 // the next transaction will not attempt to replicate b/c the current transaction is still at the head of the
1046 // pendingCommits queue.
1047 processNextPendingTransaction();
1049 // After processing next pending transactions, we can now remove the current transaction from pendingCommits.
1050 // Note this must be done before the call to peekNextPendingCommit below so we check the next transaction
1051 // in order to properly determine the batchHint flag for the call to persistPayload.
1052 pendingCommits.remove();
1053 pendingFinishCommits.add(entry);
1055 // See if the next transaction is pending commit (ie in the COMMIT_PENDING state) so it can be batched with
1056 // this transaction for replication.
1057 boolean replicationBatchHint = peekNextPendingCommit();
1059 // Once completed, we will continue via payloadReplicationComplete
1060 shard.persistPayload(txId, payload, replicationBatchHint);
1062 entry.lastAccess = shard.ticker().read();
1064 LOG.debug("{}: Transaction {} submitted to persistence", logContext, txId);
1066 // Process the next transaction pending commit, if any. If there is one it will be batched with this
1067 // transaction for replication.
1068 processNextPendingCommit();
1071 Collection<ActorRef> getCohortActors() {
1072 return cohortRegistry.getCohortActors();
1075 void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
1076 cohortRegistry.process(sender, message);
1080 ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1081 final Exception failure) {
1082 final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
1083 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1088 ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1089 final Optional<SortedSet<String>> participatingShardNames) {
1090 SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
1091 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
1092 COMMIT_STEP_TIMEOUT), participatingShardNames);
1093 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1097 // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
1098 // the newReadWriteTransaction()
1099 ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1100 final Optional<SortedSet<String>> participatingShardNames) {
1101 if (txId.getHistoryId().getHistoryId() == 0) {
1102 return createReadyCohort(txId, mod, participatingShardNames);
1105 return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
1108 @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
1109 void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
1110 final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
1111 final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
1112 final long now = readTime();
1114 final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
1115 !pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
1116 final CommitEntry currentTx = currentQueue.peek();
1117 if (currentTx == null) {
1118 // Empty queue, no-op
1122 long delta = now - currentTx.lastAccess;
1123 if (delta < timeout) {
1124 // Not expired yet, bail
1128 final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
1129 if (updateOpt.isPresent()) {
1130 final long newAccess = updateOpt.getAsLong();
1131 final long newDelta = now - newAccess;
1132 if (newDelta < delta) {
1133 LOG.debug("{}: Updated current transaction {} access time", logContext,
1134 currentTx.cohort.getIdentifier());
1135 currentTx.lastAccess = newAccess;
1139 if (delta < timeout) {
1140 // Not expired yet, bail
1145 final long deltaMillis = TimeUnit.NANOSECONDS.toMillis(delta);
1146 final State state = currentTx.cohort.getState();
1148 LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
1149 currentTx.cohort.getIdentifier(), deltaMillis, state);
1150 boolean processNext = true;
1151 final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
1152 + deltaMillis + "ms");
1155 case CAN_COMMIT_PENDING:
1156 currentQueue.remove().cohort.failedCanCommit(cohortFailure);
1158 case CAN_COMMIT_COMPLETE:
1159 // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
1160 // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
1161 // in PRE_COMMIT_COMPLETE is changed.
1162 currentQueue.remove().cohort.reportFailure(cohortFailure);
1164 case PRE_COMMIT_PENDING:
1165 currentQueue.remove().cohort.failedPreCommit(cohortFailure);
1167 case PRE_COMMIT_COMPLETE:
1168 // FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we
1169 // are ready we should commit the transaction, not abort it. Our current software stack does
1170 // not allow us to do that consistently, because we persist at the time of commit, hence
1171 // we can end up in a state where we have pre-committed a transaction, then a leader failover
1172 // occurred ... the new leader does not see the pre-committed transaction and does not have
1173 // a running timer. To fix this we really need two persistence events.
1175 // The first one, done at pre-commit time will hold the transaction payload. When consensus
1176 // is reached, we exit the pre-commit phase and start the pre-commit timer. Followers do not
1177 // apply the state in this event.
1179 // The second one, done at commit (or abort) time holds only the transaction identifier and
1180 // signals to followers that the state should (or should not) be applied.
1182 // In order to make the pre-commit timer working across failovers, though, we need
1183 // a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately
1184 // restart the timer.
1185 currentQueue.remove().cohort.reportFailure(cohortFailure);
1187 case COMMIT_PENDING:
1188 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
1189 currentTx.cohort.getIdentifier());
1190 currentTx.lastAccess = now;
1191 processNext = false;
1194 currentQueue.remove().cohort.reportFailure(cohortFailure);
1200 currentQueue.remove();
1204 processNextPending();
1208 boolean startAbort(final SimpleShardDataTreeCohort cohort) {
1209 final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
1210 pendingTransactions).iterator();
1211 if (!it.hasNext()) {
1212 LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
1216 // First entry is special, as it may already be committing
1217 final CommitEntry first = it.next();
1218 if (cohort.equals(first.cohort)) {
1219 if (cohort.getState() != State.COMMIT_PENDING) {
1220 LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
1221 cohort.getIdentifier());
1224 if (cohort.getCandidate() != null) {
1225 rebaseTransactions(it, dataTree);
1228 processNextPending();
1232 LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
1236 DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
1237 while (it.hasNext()) {
1238 final CommitEntry e = it.next();
1239 if (cohort.equals(e.cohort)) {
1240 LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
1243 if (cohort.getCandidate() != null) {
1244 rebaseTransactions(it, newTip);
1249 newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
1253 LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
1257 @SuppressWarnings("checkstyle:IllegalCatch")
1258 private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
1259 tip = requireNonNull(newTip);
1260 while (iter.hasNext()) {
1261 final SimpleShardDataTreeCohort cohort = iter.next().cohort;
1262 if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
1263 LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
1266 tip.validate(cohort.getDataTreeModification());
1267 } catch (DataValidationFailedException | RuntimeException e) {
1268 LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
1269 cohort.reportFailure(e);
1271 } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
1272 LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
1275 tip.validate(cohort.getDataTreeModification());
1276 DataTreeCandidateTip candidate = tip.prepare(cohort.getDataTreeModification());
1278 cohort.setNewCandidate(candidate);
1280 } catch (RuntimeException | DataValidationFailedException e) {
1281 LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
1282 cohort.reportFailure(e);
1288 void setRunOnPendingTransactionsComplete(final Runnable operation) {
1289 runOnPendingTransactionsComplete = operation;
1290 maybeRunOperationOnPendingTransactionsComplete();
1293 private void maybeRunOperationOnPendingTransactionsComplete() {
1294 if (runOnPendingTransactionsComplete != null && !anyPendingTransactions()) {
1295 LOG.debug("{}: Pending transactions complete - running operation {}", logContext,
1296 runOnPendingTransactionsComplete);
1298 runOnPendingTransactionsComplete.run();
1299 runOnPendingTransactionsComplete = null;
1303 ShardStats getStats() {
1304 return shard.getShardMBean();
1307 Iterator<SimpleShardDataTreeCohort> cohortIterator() {
1308 return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
1309 e -> e.cohort).iterator();
1312 void removeTransactionChain(final LocalHistoryIdentifier id) {
1313 if (transactionChains.remove(id) != null) {
1314 LOG.debug("{}: Removed transaction chain {}", logContext, id);