2 * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.util.Timeout;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.MoreObjects;
19 import com.google.common.base.Stopwatch;
20 import com.google.common.collect.ImmutableList;
21 import com.google.common.collect.ImmutableMap;
22 import com.google.common.collect.ImmutableMap.Builder;
23 import com.google.common.collect.Iterables;
24 import com.google.common.primitives.UnsignedLong;
25 import com.google.common.util.concurrent.FutureCallback;
26 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
28 import java.io.IOException;
29 import java.util.ArrayDeque;
30 import java.util.ArrayList;
31 import java.util.Collection;
32 import java.util.Collections;
33 import java.util.Deque;
34 import java.util.HashMap;
35 import java.util.Iterator;
37 import java.util.Map.Entry;
38 import java.util.Optional;
39 import java.util.OptionalLong;
40 import java.util.Queue;
41 import java.util.SortedSet;
42 import java.util.concurrent.TimeUnit;
43 import java.util.concurrent.TimeoutException;
44 import java.util.function.Consumer;
45 import java.util.function.Function;
46 import java.util.function.UnaryOperator;
47 import org.eclipse.jdt.annotation.NonNull;
48 import org.eclipse.jdt.annotation.Nullable;
49 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
50 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
51 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
52 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
53 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
54 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
55 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
56 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
57 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
58 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
59 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
60 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
61 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
62 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
63 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
64 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
65 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
67 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
68 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
69 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
70 import org.opendaylight.yangtools.concepts.Identifier;
71 import org.opendaylight.yangtools.concepts.ListenerRegistration;
72 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
73 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
74 import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
75 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
76 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
77 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
78 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
79 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
80 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
81 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
82 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
83 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
84 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
85 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
86 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
87 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
88 import org.slf4j.Logger;
89 import org.slf4j.LoggerFactory;
90 import scala.concurrent.duration.FiniteDuration;
93 * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
94 * public interfaces and assumes it is only ever called from a single thread.
97 * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
99 public class ShardDataTree extends ShardDataTreeTransactionParent {
100 private static final class CommitEntry {
101 final SimpleShardDataTreeCohort cohort;
104 CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
105 this.cohort = requireNonNull(cohort);
110 public String toString() {
111 return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
115 private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(FiniteDuration.create(5, TimeUnit.SECONDS));
116 private static final Logger LOG = LoggerFactory.getLogger(ShardDataTree.class);
119 * Process this many transactions in a single batched run. If we exceed this limit, we need to schedule later
120 * execution to finish up the batch. This is necessary in case of a long list of transactions which progress
121 * immediately through their preCommit phase -- if that happens, their completion eats up stack frames and could
122 * result in StackOverflowError.
124 private static final int MAX_TRANSACTION_BATCH = 100;
126 private final Map<LocalHistoryIdentifier, ShardDataTreeTransactionChain> transactionChains = new HashMap<>();
127 private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
128 private final Deque<CommitEntry> pendingTransactions = new ArrayDeque<>();
129 private final Queue<CommitEntry> pendingCommits = new ArrayDeque<>();
130 private final Queue<CommitEntry> pendingFinishCommits = new ArrayDeque<>();
133 * Callbacks that need to be invoked once a payload is replicated.
135 private final Map<Payload, Runnable> replicationCallbacks = new HashMap<>();
137 private final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher;
138 private final Collection<ShardDataTreeMetadata<?>> metadata;
139 private final DataTree dataTree;
140 private final String logContext;
141 private final Shard shard;
142 private Runnable runOnPendingTransactionsComplete;
145 * Optimistic {@link DataTreeCandidate} preparation. Since our DataTree implementation is a
146 * {@link DataTree}, each {@link DataTreeCandidate} is also a {@link DataTreeTip}, e.g. another
147 * candidate can be prepared on top of it. They still need to be committed in sequence. Here we track the current
148 * tip of the data tree, which is the last DataTreeCandidate we have in flight, or the DataTree itself.
150 private DataTreeTip tip;
152 private SchemaContext schemaContext;
153 private DataSchemaContextTree dataSchemaContext;
155 private int currentTransactionBatch;
157 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final DataTree dataTree,
158 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
159 final String logContext,
160 final ShardDataTreeMetadata<?>... metadata) {
161 this.dataTree = requireNonNull(dataTree);
162 updateSchemaContext(schemaContext);
164 this.shard = requireNonNull(shard);
165 this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
166 this.logContext = requireNonNull(logContext);
167 this.metadata = ImmutableList.copyOf(metadata);
171 ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType,
172 final YangInstanceIdentifier root,
173 final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
174 final String logContext,
175 final ShardDataTreeMetadata<?>... metadata) {
176 this(shard, schemaContext, createDataTree(treeType, root), treeChangeListenerPublisher, logContext, metadata);
179 private static DataTree createDataTree(final TreeType treeType, final YangInstanceIdentifier root) {
180 final DataTreeConfiguration baseConfig = DataTreeConfiguration.getDefault(treeType);
181 return new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
182 .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
183 .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
189 public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
190 this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY,
191 new DefaultShardDataTreeChangeListenerPublisher(""), "");
194 final String logContext() {
198 final long readTime() {
199 return shard.ticker().read();
202 public DataTree getDataTree() {
206 SchemaContext getSchemaContext() {
207 return schemaContext;
210 void updateSchemaContext(final SchemaContext newSchemaContext) {
211 dataTree.setSchemaContext(newSchemaContext);
212 this.schemaContext = requireNonNull(newSchemaContext);
213 this.dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
216 void resetTransactionBatch() {
217 currentTransactionBatch = 0;
221 * Take a snapshot of current state for later recovery.
223 * @return A state snapshot
225 @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
226 final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY).get();
227 final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
228 ImmutableMap.builder();
230 for (ShardDataTreeMetadata<?> m : metadata) {
231 final ShardDataTreeSnapshotMetadata<?> meta = m.toSnapshot();
233 metaBuilder.put(meta.getType(), meta);
237 return new MetadataShardDataTreeSnapshot(rootNode, metaBuilder.build());
240 private boolean anyPendingTransactions() {
241 return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty();
244 private void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot,
245 final UnaryOperator<DataTreeModification> wrapper) throws DataValidationFailedException {
246 final Stopwatch elapsed = Stopwatch.createStarted();
248 if (anyPendingTransactions()) {
249 LOG.warn("{}: applying state snapshot with pending transactions", logContext);
252 final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
253 if (snapshot instanceof MetadataShardDataTreeSnapshot) {
254 snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
256 snapshotMeta = ImmutableMap.of();
259 for (ShardDataTreeMetadata<?> m : metadata) {
260 final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
268 final DataTreeModification mod = wrapper.apply(dataTree.takeSnapshot().newModification());
269 // delete everything first
270 mod.delete(YangInstanceIdentifier.EMPTY);
272 final Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
273 if (maybeNode.isPresent()) {
274 // Add everything from the remote node back
275 mod.write(YangInstanceIdentifier.EMPTY, maybeNode.get());
279 final DataTreeModification unwrapped = unwrap(mod);
280 dataTree.validate(unwrapped);
281 DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
282 dataTree.commit(candidate);
283 notifyListeners(candidate);
285 LOG.debug("{}: state snapshot applied in {}", logContext, elapsed);
289 * Apply a snapshot coming from the leader. This method assumes the leader and follower SchemaContexts match and
290 * does not perform any pruning.
292 * @param snapshot Snapshot that needs to be applied
293 * @throws DataValidationFailedException when the snapshot fails to apply
295 void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
296 applySnapshot(snapshot, UnaryOperator.identity());
299 private PruningDataTreeModification wrapWithPruning(final DataTreeModification delegate) {
300 return new PruningDataTreeModification(delegate, dataTree, dataSchemaContext);
303 private static DataTreeModification unwrap(final DataTreeModification modification) {
304 if (modification instanceof PruningDataTreeModification) {
305 return ((PruningDataTreeModification)modification).delegate();
311 * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
312 * pruning in an attempt to adjust the state to our current SchemaContext.
314 * @param snapshot Snapshot that needs to be applied
315 * @throws DataValidationFailedException when the snapshot fails to apply
317 void applyRecoverySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
318 applySnapshot(snapshot, this::wrapWithPruning);
321 @SuppressWarnings("checkstyle:IllegalCatch")
322 private void applyRecoveryCandidate(final DataTreeCandidate candidate) {
323 final PruningDataTreeModification mod = wrapWithPruning(dataTree.takeSnapshot().newModification());
324 DataTreeCandidates.applyToModification(mod, candidate);
327 final DataTreeModification unwrapped = mod.delegate();
328 LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
331 dataTree.validate(unwrapped);
332 dataTree.commit(dataTree.prepare(unwrapped));
333 } catch (Exception e) {
334 File file = new File(System.getProperty("karaf.data", "."),
335 "failed-recovery-payload-" + logContext + ".out");
336 DataTreeModificationOutput.toFile(file, unwrapped);
337 throw new IllegalStateException(String.format(
338 "%s: Failed to apply recovery payload. Modification data was written to file %s",
339 logContext, file), e);
344 * Apply a payload coming from recovery. This method does not assume the SchemaContexts match and performs data
345 * pruning in an attempt to adjust the state to our current SchemaContext.
347 * @param payload Payload
348 * @throws IOException when the snapshot fails to deserialize
349 * @throws DataValidationFailedException when the snapshot fails to apply
351 void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
352 if (payload instanceof CommitTransactionPayload) {
353 final Entry<TransactionIdentifier, DataTreeCandidate> e =
354 ((CommitTransactionPayload) payload).getCandidate();
355 applyRecoveryCandidate(e.getValue());
356 allMetadataCommittedTransaction(e.getKey());
357 } else if (payload instanceof AbortTransactionPayload) {
358 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
359 } else if (payload instanceof PurgeTransactionPayload) {
360 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
361 } else if (payload instanceof CreateLocalHistoryPayload) {
362 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
363 } else if (payload instanceof CloseLocalHistoryPayload) {
364 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
365 } else if (payload instanceof PurgeLocalHistoryPayload) {
366 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
368 LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
372 private void applyReplicatedCandidate(final TransactionIdentifier identifier, final DataTreeCandidate foreign)
373 throws DataValidationFailedException {
374 LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
376 final DataTreeModification mod = dataTree.takeSnapshot().newModification();
377 DataTreeCandidates.applyToModification(mod, foreign);
380 LOG.trace("{}: Applying foreign modification {}", logContext, mod);
381 dataTree.validate(mod);
382 final DataTreeCandidate candidate = dataTree.prepare(mod);
383 dataTree.commit(candidate);
385 allMetadataCommittedTransaction(identifier);
386 notifyListeners(candidate);
390 * Apply a payload coming from the leader, which could actually be us. This method assumes the leader and follower
391 * SchemaContexts match and does not perform any pruning.
393 * @param identifier Payload identifier as returned from RaftActor
394 * @param payload Payload
395 * @throws IOException when the snapshot fails to deserialize
396 * @throws DataValidationFailedException when the snapshot fails to apply
398 void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
399 DataValidationFailedException {
401 * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
402 * if we are the leader and it has originated with us.
404 * The identifier will only ever be non-null when we were the leader which achieved consensus. Unfortunately,
405 * though, this may not be the case anymore, as we are being called some time afterwards and we may not be
406 * acting in that capacity anymore.
408 * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
409 * pre-Boron state -- which limits the number of options here.
411 if (payload instanceof CommitTransactionPayload) {
412 if (identifier == null) {
413 final Entry<TransactionIdentifier, DataTreeCandidate> e =
414 ((CommitTransactionPayload) payload).getCandidate();
415 applyReplicatedCandidate(e.getKey(), e.getValue());
417 verify(identifier instanceof TransactionIdentifier);
418 payloadReplicationComplete((TransactionIdentifier) identifier);
420 } else if (payload instanceof AbortTransactionPayload) {
421 if (identifier != null) {
422 payloadReplicationComplete((AbortTransactionPayload) payload);
424 allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
425 } else if (payload instanceof PurgeTransactionPayload) {
426 if (identifier != null) {
427 payloadReplicationComplete((PurgeTransactionPayload) payload);
429 allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
430 } else if (payload instanceof CloseLocalHistoryPayload) {
431 if (identifier != null) {
432 payloadReplicationComplete((CloseLocalHistoryPayload) payload);
434 allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
435 } else if (payload instanceof CreateLocalHistoryPayload) {
436 if (identifier != null) {
437 payloadReplicationComplete((CreateLocalHistoryPayload)payload);
439 allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
440 } else if (payload instanceof PurgeLocalHistoryPayload) {
441 if (identifier != null) {
442 payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
444 allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
446 LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
450 private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
451 if (callback != null) {
452 replicationCallbacks.put(payload, callback);
454 shard.persistPayload(id, payload, true);
457 private void payloadReplicationComplete(final AbstractIdentifiablePayload<?> payload) {
458 final Runnable callback = replicationCallbacks.remove(payload);
459 if (callback != null) {
460 LOG.debug("{}: replication of {} completed, invoking {}", logContext, payload.getIdentifier(), callback);
463 LOG.debug("{}: replication of {} has no callback", logContext, payload.getIdentifier());
467 private void payloadReplicationComplete(final TransactionIdentifier txId) {
468 final CommitEntry current = pendingFinishCommits.peek();
469 if (current == null) {
470 LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
471 allMetadataCommittedTransaction(txId);
475 if (!current.cohort.getIdentifier().equals(txId)) {
476 LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
477 current.cohort.getIdentifier(), txId);
478 allMetadataCommittedTransaction(txId);
482 finishCommit(current.cohort);
485 private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
486 for (ShardDataTreeMetadata<?> m : metadata) {
487 m.onTransactionAborted(txId);
491 private void allMetadataCommittedTransaction(final TransactionIdentifier txId) {
492 for (ShardDataTreeMetadata<?> m : metadata) {
493 m.onTransactionCommitted(txId);
497 private void allMetadataPurgedTransaction(final TransactionIdentifier txId) {
498 for (ShardDataTreeMetadata<?> m : metadata) {
499 m.onTransactionPurged(txId);
503 private void allMetadataCreatedLocalHistory(final LocalHistoryIdentifier historyId) {
504 for (ShardDataTreeMetadata<?> m : metadata) {
505 m.onHistoryCreated(historyId);
509 private void allMetadataClosedLocalHistory(final LocalHistoryIdentifier historyId) {
510 for (ShardDataTreeMetadata<?> m : metadata) {
511 m.onHistoryClosed(historyId);
515 private void allMetadataPurgedLocalHistory(final LocalHistoryIdentifier historyId) {
516 for (ShardDataTreeMetadata<?> m : metadata) {
517 m.onHistoryPurged(historyId);
522 * Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
523 * this method is used for re-establishing state when we are taking over
525 * @param historyId Local history identifier
526 * @param closed True if the chain should be created in closed state (i.e. pending purge)
527 * @return Transaction chain handle
529 ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
530 final boolean closed) {
531 final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
532 final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
533 checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
537 ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
538 final @Nullable Runnable callback) {
539 ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
541 chain = new ShardDataTreeTransactionChain(historyId, this);
542 transactionChains.put(historyId, chain);
543 replicatePayload(historyId, CreateLocalHistoryPayload.create(
544 historyId, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
545 } else if (callback != null) {
552 ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
553 shard.getShardMBean().incrementReadOnlyTransactionCount();
555 if (txId.getHistoryId().getHistoryId() == 0) {
556 return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
559 return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
562 ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
563 shard.getShardMBean().incrementReadWriteTransactionCount();
565 if (txId.getHistoryId().getHistoryId() == 0) {
566 return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
570 return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
574 public void notifyListeners(final DataTreeCandidate candidate) {
575 treeChangeListenerPublisher.publishChanges(candidate);
579 * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
580 * replication callbacks.
582 void purgeLeaderState() {
583 for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
587 transactionChains.clear();
588 replicationCallbacks.clear();
592 * Close a single transaction chain.
594 * @param id History identifier
595 * @param callback Callback to invoke upon completion, may be null
597 void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
598 if (commonCloseTransactionChain(id, callback)) {
599 replicatePayload(id, CloseLocalHistoryPayload.create(id,
600 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
605 * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
607 * @param id History identifier
609 void closeTransactionChain(final LocalHistoryIdentifier id) {
610 commonCloseTransactionChain(id, null);
613 private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
614 final ShardDataTreeTransactionChain chain = transactionChains.get(id);
616 LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
617 if (callback != null) {
628 * Purge a single transaction chain.
630 * @param id History identifier
631 * @param callback Callback to invoke upon completion, may be null
633 void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
634 final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
636 LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
637 if (callback != null) {
643 replicatePayload(id, PurgeLocalHistoryPayload.create(
644 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
647 Optional<DataTreeCandidate> readCurrentData() {
648 return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY)
649 .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.EMPTY, state));
652 public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
653 final Optional<DataTreeCandidate> initialState,
654 final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
655 treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
659 return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
663 void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
664 final TransactionIdentifier id = transaction.getIdentifier();
665 LOG.debug("{}: aborting transaction {}", logContext, id);
666 replicatePayload(id, AbortTransactionPayload.create(
667 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
671 void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
672 // No-op for free-standing transactions
677 ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
678 final Optional<SortedSet<String>> participatingShardNames) {
679 final DataTreeModification snapshot = transaction.getSnapshot();
680 final TransactionIdentifier id = transaction.getIdentifier();
681 LOG.debug("{}: readying transaction {}", logContext, id);
683 LOG.debug("{}: transaction {} ready", logContext, id);
685 return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
688 void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
689 LOG.debug("{}: purging transaction {}", logContext, id);
690 replicatePayload(id, PurgeTransactionPayload.create(
691 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
694 public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
695 return dataTree.takeSnapshot().readNode(path);
698 DataTreeSnapshot takeSnapshot() {
699 return dataTree.takeSnapshot();
703 public DataTreeModification newModification() {
704 return dataTree.takeSnapshot().newModification();
707 public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
708 Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
710 for (CommitEntry entry: pendingFinishCommits) {
711 ret.add(entry.cohort);
714 for (CommitEntry entry: pendingCommits) {
715 ret.add(entry.cohort);
718 for (CommitEntry entry: pendingTransactions) {
719 ret.add(entry.cohort);
722 pendingFinishCommits.clear();
723 pendingCommits.clear();
724 pendingTransactions.clear();
730 * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
732 void resumeNextPendingTransaction() {
733 LOG.debug("{}: attempting to resume transaction processing", logContext);
734 processNextPending();
737 @SuppressWarnings("checkstyle:IllegalCatch")
738 private void processNextPendingTransaction() {
739 ++currentTransactionBatch;
740 if (currentTransactionBatch > MAX_TRANSACTION_BATCH) {
741 LOG.debug("{}: Already processed {}, scheduling continuation", logContext, currentTransactionBatch);
742 shard.scheduleNextPendingTransaction();
746 processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> {
747 final SimpleShardDataTreeCohort cohort = entry.cohort;
748 final DataTreeModification modification = cohort.getDataTreeModification();
750 LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
753 tip.validate(modification);
754 LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
755 cohort.successfulCanCommit();
756 entry.lastAccess = readTime();
758 } catch (ConflictingModificationAppliedException e) {
759 LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
761 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
762 } catch (DataValidationFailedException e) {
763 LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
766 // For debugging purposes, allow dumping of the modification. Coupled with the above
767 // precondition log, it should allow us to understand what went on.
768 LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
769 LOG.trace("{}: Current tree: {}", logContext, dataTree);
770 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
771 } catch (Exception e) {
772 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
776 // Failure path: propagate the failure, remove the transaction from the queue and loop to the next one
777 pendingTransactions.poll().cohort.failedCanCommit(cause);
781 private void processNextPending() {
782 processNextPendingCommit();
783 processNextPendingTransaction();
786 private void processNextPending(final Queue<CommitEntry> queue, final State allowedState,
787 final Consumer<CommitEntry> processor) {
788 while (!queue.isEmpty()) {
789 final CommitEntry entry = queue.peek();
790 final SimpleShardDataTreeCohort cohort = entry.cohort;
792 if (cohort.isFailed()) {
793 LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
798 if (cohort.getState() == allowedState) {
799 processor.accept(entry);
805 maybeRunOperationOnPendingTransactionsComplete();
808 private void processNextPendingCommit() {
809 processNextPending(pendingCommits, State.COMMIT_PENDING,
810 entry -> startCommit(entry.cohort, entry.cohort.getCandidate()));
813 private boolean peekNextPendingCommit() {
814 final CommitEntry first = pendingCommits.peek();
815 return first != null && first.cohort.getState() == State.COMMIT_PENDING;
818 void startCanCommit(final SimpleShardDataTreeCohort cohort) {
819 final CommitEntry head = pendingTransactions.peek();
821 LOG.warn("{}: No transactions enqueued while attempting to start canCommit on {}", logContext, cohort);
824 if (!cohort.equals(head.cohort)) {
825 // The tx isn't at the head of the queue so we can't start canCommit at this point. Here we check if this
826 // tx should be moved ahead of other tx's in the READY state in the pendingTransactions queue. If this tx
827 // has other participating shards, it could deadlock with other tx's accessing the same shards
828 // depending on the order the tx's are readied on each shard
829 // (see https://jira.opendaylight.org/browse/CONTROLLER-1836). Therefore, if the preceding participating
830 // shard names for a preceding pending tx, call it A, in the queue matches that of this tx, then this tx
831 // is allowed to be moved ahead of tx A in the queue so it is processed first to avoid potential deadlock
832 // if tx A is behind this tx in the pendingTransactions queue for a preceding shard. In other words, since
833 // canCommmit for this tx was requested before tx A, honor that request. If this tx is moved to the head of
834 // the queue as a result, then proceed with canCommit.
836 Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
837 if (precedingShardNames.isEmpty()) {
838 LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
842 LOG.debug("{}: Evaluating tx {} for canCommit - preceding participating shard names {}",
843 logContext, cohort.getIdentifier(), precedingShardNames);
844 final Iterator<CommitEntry> iter = pendingTransactions.iterator();
846 int moveToIndex = -1;
847 while (iter.hasNext()) {
848 final CommitEntry entry = iter.next();
851 if (cohort.equals(entry.cohort)) {
852 if (moveToIndex < 0) {
853 LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
854 logContext, cohort.getIdentifier());
858 LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
859 logContext, cohort.getIdentifier(), moveToIndex);
861 insertEntry(pendingTransactions, entry, moveToIndex);
863 if (!cohort.equals(pendingTransactions.peek().cohort)) {
864 LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
865 logContext, cohort.getIdentifier());
869 LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
870 logContext, cohort.getIdentifier());
874 if (entry.cohort.getState() != State.READY) {
875 LOG.debug("{}: Skipping pending transaction {} in state {}",
876 logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
880 final Collection<String> pendingPrecedingShardNames = extractPrecedingShardNames(
881 entry.cohort.getParticipatingShardNames());
883 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
884 if (moveToIndex < 0) {
885 LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
886 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
890 "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
891 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
894 LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
895 logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
900 processNextPendingTransaction();
903 private static void insertEntry(final Deque<CommitEntry> queue, final CommitEntry entry, final int atIndex) {
905 queue.addFirst(entry);
909 LOG.trace("Inserting into Deque at index {}", atIndex);
911 Deque<CommitEntry> tempStack = new ArrayDeque<>(atIndex);
912 for (int i = 0; i < atIndex; i++) {
913 tempStack.push(queue.poll());
916 queue.addFirst(entry);
918 tempStack.forEach(queue::addFirst);
921 private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
922 return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
923 set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
926 private void failPreCommit(final Throwable cause) {
927 shard.getShardMBean().incrementFailedTransactionsCount();
928 pendingTransactions.poll().cohort.failedPreCommit(cause);
929 processNextPendingTransaction();
932 @SuppressWarnings("checkstyle:IllegalCatch")
933 void startPreCommit(final SimpleShardDataTreeCohort cohort) {
934 final CommitEntry entry = pendingTransactions.peek();
935 checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
937 final SimpleShardDataTreeCohort current = entry.cohort;
938 verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
940 final TransactionIdentifier currentId = current.getIdentifier();
941 LOG.debug("{}: Preparing transaction {}", logContext, currentId);
943 final DataTreeCandidateTip candidate;
945 candidate = tip.prepare(cohort.getDataTreeModification());
946 LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
947 } catch (RuntimeException e) {
952 cohort.userPreCommit(candidate, new FutureCallback<Void>() {
954 public void onSuccess(final Void noop) {
955 // Set the tip of the data tree.
956 tip = verifyNotNull(candidate);
958 entry.lastAccess = readTime();
960 pendingTransactions.remove();
961 pendingCommits.add(entry);
963 LOG.debug("{}: Transaction {} prepared", logContext, currentId);
965 cohort.successfulPreCommit(candidate);
967 processNextPendingTransaction();
971 public void onFailure(final Throwable failure) {
972 failPreCommit(failure);
977 private void failCommit(final Exception cause) {
978 shard.getShardMBean().incrementFailedTransactionsCount();
979 pendingFinishCommits.poll().cohort.failedCommit(cause);
980 processNextPending();
983 @SuppressWarnings("checkstyle:IllegalCatch")
984 private void finishCommit(final SimpleShardDataTreeCohort cohort) {
985 final TransactionIdentifier txId = cohort.getIdentifier();
986 final DataTreeCandidate candidate = cohort.getCandidate();
988 LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
990 if (tip == candidate) {
991 // All pending candidates have been committed, reset the tip to the data tree.
996 dataTree.commit(candidate);
997 } catch (Exception e) {
998 LOG.error("{}: Failed to commit transaction {}", logContext, txId, e);
1003 allMetadataCommittedTransaction(txId);
1004 shard.getShardMBean().incrementCommittedTransactionCount();
1005 shard.getShardMBean().setLastCommittedTransactionTime(System.currentTimeMillis());
1007 // FIXME: propagate journal index
1008 pendingFinishCommits.poll().cohort.successfulCommit(UnsignedLong.ZERO, () -> {
1009 LOG.trace("{}: Transaction {} committed, proceeding to notify", logContext, txId);
1010 notifyListeners(candidate);
1012 processNextPending();
1016 void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
1017 final CommitEntry entry = pendingCommits.peek();
1018 checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
1020 final SimpleShardDataTreeCohort current = entry.cohort;
1021 if (!cohort.equals(current)) {
1022 LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
1026 LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
1028 final TransactionIdentifier txId = cohort.getIdentifier();
1029 final Payload payload;
1031 payload = CommitTransactionPayload.create(txId, candidate,
1032 shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
1033 } catch (IOException e) {
1034 LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
1035 pendingCommits.poll().cohort.failedCommit(e);
1036 processNextPending();
1040 // We process next transactions pending canCommit before we call persistPayload to possibly progress subsequent
1041 // transactions to the COMMIT_PENDING state so the payloads can be batched for replication. This is done for
1042 // single-shard transactions that immediately transition from canCommit to preCommit to commit. Note that
1043 // if the next pending transaction is progressed to COMMIT_PENDING and this method (startCommit) is called,
1044 // the next transaction will not attempt to replicate b/c the current transaction is still at the head of the
1045 // pendingCommits queue.
1046 processNextPendingTransaction();
1048 // After processing next pending transactions, we can now remove the current transaction from pendingCommits.
1049 // Note this must be done before the call to peekNextPendingCommit below so we check the next transaction
1050 // in order to properly determine the batchHint flag for the call to persistPayload.
1051 pendingCommits.remove();
1052 pendingFinishCommits.add(entry);
1054 // See if the next transaction is pending commit (ie in the COMMIT_PENDING state) so it can be batched with
1055 // this transaction for replication.
1056 boolean replicationBatchHint = peekNextPendingCommit();
1058 // Once completed, we will continue via payloadReplicationComplete
1059 shard.persistPayload(txId, payload, replicationBatchHint);
1061 entry.lastAccess = shard.ticker().read();
1063 LOG.debug("{}: Transaction {} submitted to persistence", logContext, txId);
1065 // Process the next transaction pending commit, if any. If there is one it will be batched with this
1066 // transaction for replication.
1067 processNextPendingCommit();
1070 Collection<ActorRef> getCohortActors() {
1071 return cohortRegistry.getCohortActors();
1074 void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
1075 cohortRegistry.process(sender, message);
1079 ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1080 final Exception failure) {
1081 final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
1082 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1087 ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1088 final Optional<SortedSet<String>> participatingShardNames) {
1089 SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
1090 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
1091 COMMIT_STEP_TIMEOUT), participatingShardNames);
1092 pendingTransactions.add(new CommitEntry(cohort, readTime()));
1096 // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
1097 // the newReadWriteTransaction()
1098 ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
1099 final Optional<SortedSet<String>> participatingShardNames) {
1100 if (txId.getHistoryId().getHistoryId() == 0) {
1101 return createReadyCohort(txId, mod, participatingShardNames);
1104 return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
1107 @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
1108 void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
1109 final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
1110 final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
1111 final long now = readTime();
1113 final Queue<CommitEntry> currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits :
1114 !pendingCommits.isEmpty() ? pendingCommits : pendingTransactions;
1115 final CommitEntry currentTx = currentQueue.peek();
1116 if (currentTx == null) {
1117 // Empty queue, no-op
1121 long delta = now - currentTx.lastAccess;
1122 if (delta < timeout) {
1123 // Not expired yet, bail
1127 final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
1128 if (updateOpt.isPresent()) {
1129 final long newAccess = updateOpt.getAsLong();
1130 final long newDelta = now - newAccess;
1131 if (newDelta < delta) {
1132 LOG.debug("{}: Updated current transaction {} access time", logContext,
1133 currentTx.cohort.getIdentifier());
1134 currentTx.lastAccess = newAccess;
1138 if (delta < timeout) {
1139 // Not expired yet, bail
1144 final long deltaMillis = TimeUnit.NANOSECONDS.toMillis(delta);
1145 final State state = currentTx.cohort.getState();
1147 LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
1148 currentTx.cohort.getIdentifier(), deltaMillis, state);
1149 boolean processNext = true;
1150 final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
1151 + deltaMillis + "ms");
1154 case CAN_COMMIT_PENDING:
1155 currentQueue.remove().cohort.failedCanCommit(cohortFailure);
1157 case CAN_COMMIT_COMPLETE:
1158 // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
1159 // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
1160 // in PRE_COMMIT_COMPLETE is changed.
1161 currentQueue.remove().cohort.reportFailure(cohortFailure);
1163 case PRE_COMMIT_PENDING:
1164 currentQueue.remove().cohort.failedPreCommit(cohortFailure);
1166 case PRE_COMMIT_COMPLETE:
1167 // FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we
1168 // are ready we should commit the transaction, not abort it. Our current software stack does
1169 // not allow us to do that consistently, because we persist at the time of commit, hence
1170 // we can end up in a state where we have pre-committed a transaction, then a leader failover
1171 // occurred ... the new leader does not see the pre-committed transaction and does not have
1172 // a running timer. To fix this we really need two persistence events.
1174 // The first one, done at pre-commit time will hold the transaction payload. When consensus
1175 // is reached, we exit the pre-commit phase and start the pre-commit timer. Followers do not
1176 // apply the state in this event.
1178 // The second one, done at commit (or abort) time holds only the transaction identifier and
1179 // signals to followers that the state should (or should not) be applied.
1181 // In order to make the pre-commit timer working across failovers, though, we need
1182 // a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately
1183 // restart the timer.
1184 currentQueue.remove().cohort.reportFailure(cohortFailure);
1186 case COMMIT_PENDING:
1187 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
1188 currentTx.cohort.getIdentifier());
1189 currentTx.lastAccess = now;
1190 processNext = false;
1193 currentQueue.remove().cohort.reportFailure(cohortFailure);
1199 currentQueue.remove();
1203 processNextPending();
1207 boolean startAbort(final SimpleShardDataTreeCohort cohort) {
1208 final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
1209 pendingTransactions).iterator();
1210 if (!it.hasNext()) {
1211 LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
1215 // First entry is special, as it may already be committing
1216 final CommitEntry first = it.next();
1217 if (cohort.equals(first.cohort)) {
1218 if (cohort.getState() != State.COMMIT_PENDING) {
1219 LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
1220 cohort.getIdentifier());
1223 if (cohort.getCandidate() != null) {
1224 rebaseTransactions(it, dataTree);
1227 processNextPending();
1231 LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
1235 DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
1236 while (it.hasNext()) {
1237 final CommitEntry e = it.next();
1238 if (cohort.equals(e.cohort)) {
1239 LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
1242 if (cohort.getCandidate() != null) {
1243 rebaseTransactions(it, newTip);
1248 newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
1252 LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
1256 @SuppressWarnings("checkstyle:IllegalCatch")
1257 private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
1258 tip = requireNonNull(newTip);
1259 while (iter.hasNext()) {
1260 final SimpleShardDataTreeCohort cohort = iter.next().cohort;
1261 if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
1262 LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
1265 tip.validate(cohort.getDataTreeModification());
1266 } catch (DataValidationFailedException | RuntimeException e) {
1267 LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
1268 cohort.reportFailure(e);
1270 } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
1271 LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
1274 tip.validate(cohort.getDataTreeModification());
1275 DataTreeCandidateTip candidate = tip.prepare(cohort.getDataTreeModification());
1277 cohort.setNewCandidate(candidate);
1279 } catch (RuntimeException | DataValidationFailedException e) {
1280 LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
1281 cohort.reportFailure(e);
1287 void setRunOnPendingTransactionsComplete(final Runnable operation) {
1288 runOnPendingTransactionsComplete = operation;
1289 maybeRunOperationOnPendingTransactionsComplete();
1292 private void maybeRunOperationOnPendingTransactionsComplete() {
1293 if (runOnPendingTransactionsComplete != null && !anyPendingTransactions()) {
1294 LOG.debug("{}: Pending transactions complete - running operation {}", logContext,
1295 runOnPendingTransactionsComplete);
1297 runOnPendingTransactionsComplete.run();
1298 runOnPendingTransactionsComplete = null;
1302 ShardStats getStats() {
1303 return shard.getShardMBean();
1306 Iterator<SimpleShardDataTreeCohort> cohortIterator() {
1307 return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
1308 e -> e.cohort).iterator();
1311 void removeTransactionChain(final LocalHistoryIdentifier id) {
1312 if (transactionChains.remove(id) != null) {
1313 LOG.debug("{}: Removed transaction chain {}", logContext, id);