X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardDataTree.java;h=d398afefa7869cb70cc2eec74cf9153a918b162f;hb=refs%2Fchanges%2F83%2F51583%2F6;hp=a04bf62a96d9e964208e0ac76cd29f104df649a6;hpb=a0332590d14ab7aad0247ae12bff4205c90cac94;p=controller.git diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java index a04bf62a96..d398afefa7 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java @@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; import akka.util.Timeout; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; @@ -18,6 +19,7 @@ import com.google.common.base.Verify; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap.Builder; +import com.google.common.collect.Iterables; import com.google.common.primitives.UnsignedLong; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.io.File; @@ -34,6 +36,7 @@ import java.util.Queue; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; import java.util.function.UnaryOperator; import javax.annotation.Nonnull; import javax.annotation.concurrent.NotThreadSafe; @@ -66,7 +69,6 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot; import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip; import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException; -import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType; import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree; import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTreeTip; import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType; @@ -102,6 +104,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { private final Map transactionChains = new HashMap<>(); private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry(); private final Queue pendingTransactions = new ArrayDeque<>(); + private final Queue pendingCommits = new ArrayDeque<>(); + private final Queue pendingFinishCommits = new ArrayDeque<>(); private final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher; private final ShardDataChangeListenerPublisher dataChangeListenerPublisher; private final Collection> metadata; @@ -136,15 +140,17 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType, + final YangInstanceIdentifier root, final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher, final ShardDataChangeListenerPublisher dataChangeListenerPublisher, final String logContext) { - this(shard, schemaContext, InMemoryDataTreeFactory.getInstance().create(treeType), + this(shard, schemaContext, InMemoryDataTreeFactory.getInstance().create(treeType, root), treeChangeListenerPublisher, dataChangeListenerPublisher, logContext); } @VisibleForTesting public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) { - this(shard, schemaContext, treeType, new DefaultShardDataTreeChangeListenerPublisher(), + this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY, + new DefaultShardDataTreeChangeListenerPublisher(), new DefaultShardDataChangeListenerPublisher(), ""); } @@ -189,11 +195,15 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return new MetadataShardDataTreeSnapshot(rootNode, metaBuilder.build()); } + private boolean anyPendingTransactions() { + return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty(); + } + private void applySnapshot(@Nonnull final ShardDataTreeSnapshot snapshot, final UnaryOperator wrapper) throws DataValidationFailedException { final Stopwatch elapsed = Stopwatch.createStarted(); - if (!pendingTransactions.isEmpty()) { + if (anyPendingTransactions()) { LOG.warn("{}: applying state snapshot with pending transactions", logContext); } @@ -363,14 +373,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } private void payloadReplicationComplete(final TransactionIdentifier txId) { - final CommitEntry current = pendingTransactions.peek(); + final CommitEntry current = pendingFinishCommits.peek(); if (current == null) { LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId); return; } if (!current.cohort.getIdentifier().equals(txId)) { - LOG.warn("{}: Head of queue is {}, ignoring consensus on transaction {}", logContext, + LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext, current.cohort.getIdentifier(), txId); return; } @@ -479,7 +489,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } int getQueueSize() { - return pendingTransactions.size(); + return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size(); } @Override @@ -527,27 +537,33 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } public Collection getAndClearPendingTransactions() { - Collection ret = new ArrayList<>(pendingTransactions.size()); + Collection ret = new ArrayList<>(getQueueSize()); + + for (CommitEntry entry: pendingFinishCommits) { + ret.add(entry.cohort); + } + + for (CommitEntry entry: pendingCommits) { + ret.add(entry.cohort); + } + for (CommitEntry entry: pendingTransactions) { ret.add(entry.cohort); } + pendingFinishCommits.clear(); + pendingCommits.clear(); pendingTransactions.clear(); tip = dataTree; return ret; } @SuppressWarnings("checkstyle:IllegalCatch") - private void processNextTransaction() { - while (!pendingTransactions.isEmpty()) { - final CommitEntry entry = pendingTransactions.peek(); + private void processNextPendingTransaction() { + processNextPending(pendingTransactions, State.CAN_COMMIT_PENDING, entry -> { final SimpleShardDataTreeCohort cohort = entry.cohort; final DataTreeModification modification = cohort.getDataTreeModification(); - if (cohort.getState() != State.CAN_COMMIT_PENDING) { - break; - } - LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier()); Exception cause; try { @@ -576,11 +592,45 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { // Failure path: propagate the failure, remove the transaction from the queue and loop to the next one pendingTransactions.poll().cohort.failedCanCommit(cause); + }); + } + + private void processNextPending() { + processNextPendingCommit(); + processNextPendingTransaction(); + } + + private void processNextPending(Queue queue, State allowedState, Consumer processor) { + while (!queue.isEmpty()) { + final CommitEntry entry = queue.peek(); + final SimpleShardDataTreeCohort cohort = entry.cohort; + + if (cohort.isFailed()) { + LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier()); + queue.remove(); + continue; + } + + if (cohort.getState() == allowedState) { + processor.accept(entry); + } + + break; } maybeRunOperationOnPendingTransactionsComplete(); } + private void processNextPendingCommit() { + processNextPending(pendingCommits, State.COMMIT_PENDING, + entry -> startCommit(entry.cohort, entry.cohort.getCandidate())); + } + + private boolean peekNextPendingCommit() { + final CommitEntry first = pendingCommits.peek(); + return first != null && first.cohort.getState() == State.COMMIT_PENDING; + } + void startCanCommit(final SimpleShardDataTreeCohort cohort) { final SimpleShardDataTreeCohort current = pendingTransactions.peek().cohort; if (!cohort.equals(current)) { @@ -588,13 +638,13 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return; } - processNextTransaction(); + processNextPendingTransaction(); } private void failPreCommit(final Exception cause) { shard.getShardMBean().incrementFailedTransactionsCount(); pendingTransactions.poll().cohort.failedPreCommit(cause); - processNextTransaction(); + processNextPendingTransaction(); } @SuppressWarnings("checkstyle:IllegalCatch") @@ -604,17 +654,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { final SimpleShardDataTreeCohort current = entry.cohort; Verify.verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current); + + LOG.debug("{}: Preparing transaction {}", logContext, current.getIdentifier()); + final DataTreeCandidateTip candidate; try { candidate = tip.prepare(cohort.getDataTreeModification()); - } catch (Exception e) { - failPreCommit(e); - return; - } - - try { cohort.userPreCommit(candidate); - } catch (ExecutionException | TimeoutException e) { + } catch (ExecutionException | TimeoutException | RuntimeException e) { failPreCommit(e); return; } @@ -623,13 +670,21 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { tip = Verify.verifyNotNull(candidate); entry.lastAccess = shard.ticker().read(); + + pendingTransactions.remove(); + pendingCommits.add(entry); + + LOG.debug("{}: Transaction {} prepared", logContext, current.getIdentifier()); + cohort.successfulPreCommit(candidate); + + processNextPendingTransaction(); } private void failCommit(final Exception cause) { shard.getShardMBean().incrementFailedTransactionsCount(); - pendingTransactions.poll().cohort.failedCommit(cause); - processNextTransaction(); + pendingFinishCommits.poll().cohort.failedCommit(cause); + processNextPending(); } @SuppressWarnings("checkstyle:IllegalCatch") @@ -639,6 +694,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { LOG.debug("{}: Resuming commit of transaction {}", logContext, txId); + if (tip == candidate) { + // All pending candidates have been committed, reset the tip to the data tree. + tip = dataTree; + } + try { dataTree.commit(candidate); } catch (Exception e) { @@ -647,50 +707,73 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return; } - // All pending candidates have been committed, reset the tip to the data tree - if (tip == candidate) { - tip = dataTree; - } - shard.getShardMBean().incrementCommittedTransactionCount(); shard.getShardMBean().setLastCommittedTransactionTime(System.currentTimeMillis()); // FIXME: propagate journal index - pendingTransactions.poll().cohort.successfulCommit(UnsignedLong.ZERO); + pendingFinishCommits.poll().cohort.successfulCommit(UnsignedLong.ZERO); LOG.trace("{}: Transaction {} committed, proceeding to notify", logContext, txId); notifyListeners(candidate); - processNextTransaction(); + processNextPending(); } void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) { - final CommitEntry entry = pendingTransactions.peek(); + final CommitEntry entry = pendingCommits.peek(); Preconditions.checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort); final SimpleShardDataTreeCohort current = entry.cohort; - Verify.verify(cohort.equals(current), "Attempted to commit %s while %s is pending", cohort, current); - - if (shard.canSkipPayload() || candidate.getRootNode().getModificationType() == ModificationType.UNMODIFIED) { - LOG.debug("{}: No replication required, proceeding to finish commit", logContext); - finishCommit(cohort); + if (!cohort.equals(current)) { + LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier()); return; } + LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier()); + final TransactionIdentifier txId = cohort.getIdentifier(); final Payload payload; try { payload = CommitTransactionPayload.create(txId, candidate); } catch (IOException e) { LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e); - pendingTransactions.poll().cohort.failedCommit(e); + pendingCommits.poll().cohort.failedCommit(e); + processNextPending(); return; } + // We process next transactions pending canCommit before we call persistPayload to possibly progress subsequent + // transactions to the COMMIT_PENDING state so the payloads can be batched for replication. This is done for + // single-shard transactions that immediately transition from canCommit to preCommit to commit. Note that + // if the next pending transaction is progressed to COMMIT_PENDING and this method (startCommit) is called, + // the next transaction will not attempt to replicate b/c the current transaction is still at the head of the + // pendingCommits queue. + processNextPendingTransaction(); + + // After processing next pending transactions, we can now remove the current transaction from pendingCommits. + // Note this must be done before the call to peekNextPendingCommit below so we check the next transaction + // in order to properly determine the batchHint flag for the call to persistPayload. + pendingCommits.remove(); + pendingFinishCommits.add(entry); + + // See if the next transaction is pending commit (ie in the COMMIT_PENDING state) so it can be batched with + // this transaction for replication. + boolean replicationBatchHint = peekNextPendingCommit(); + // Once completed, we will continue via payloadReplicationComplete + shard.persistPayload(txId, payload, replicationBatchHint); + entry.lastAccess = shard.ticker().read(); - shard.persistPayload(txId, payload); + LOG.debug("{}: Transaction {} submitted to persistence", logContext, txId); + + // Process the next transaction pending commit, if any. If there is one it will be batched with this + // transaction for replication. + processNextPendingCommit(); + } + + Collection getCohortActors() { + return cohortRegistry.getCohortActors(); } void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) { @@ -710,23 +793,26 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { void checkForExpiredTransactions(final long transactionCommitTimeoutMillis) { final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis); final long now = shard.ticker().read(); - final CommitEntry currentTx = pendingTransactions.peek(); + + final Queue currentQueue = !pendingFinishCommits.isEmpty() ? pendingFinishCommits : + !pendingCommits.isEmpty() ? pendingCommits : pendingTransactions; + final CommitEntry currentTx = currentQueue.peek(); if (currentTx != null && currentTx.lastAccess + timeout < now) { LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext, currentTx.cohort.getIdentifier(), transactionCommitTimeoutMillis, currentTx.cohort.getState()); boolean processNext = true; switch (currentTx.cohort.getState()) { case CAN_COMMIT_PENDING: - pendingTransactions.remove().cohort.failedCanCommit(new TimeoutException()); + currentQueue.remove().cohort.failedCanCommit(new TimeoutException()); break; case CAN_COMMIT_COMPLETE: // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code // in PRE_COMMIT_COMPLETE is changed. - pendingTransactions.remove().cohort.reportFailure(new TimeoutException()); + currentQueue.remove().cohort.reportFailure(new TimeoutException()); break; case PRE_COMMIT_PENDING: - pendingTransactions.remove().cohort.failedPreCommit(new TimeoutException()); + currentQueue.remove().cohort.failedPreCommit(new TimeoutException()); break; case PRE_COMMIT_COMPLETE: // FIXME: this is a legacy behavior problem. Three-phase commit protocol specifies that after we @@ -746,7 +832,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { // In order to make the pre-commit timer working across failovers, though, we need // a per-shard cluster-wide monotonic time, so a follower becoming the leader can accurately // restart the timer. - pendingTransactions.remove().cohort.reportFailure(new TimeoutException()); + currentQueue.remove().cohort.reportFailure(new TimeoutException()); break; case COMMIT_PENDING: LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext, @@ -759,17 +845,18 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { case FAILED: case READY: default: - pendingTransactions.remove(); + currentQueue.remove(); } if (processNext) { - processNextTransaction(); + processNextPending(); } } } boolean startAbort(final SimpleShardDataTreeCohort cohort) { - final Iterator it = pendingTransactions.iterator(); + final Iterator it = Iterables.concat(pendingFinishCommits, pendingCommits, + pendingTransactions).iterator(); if (!it.hasNext()) { LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier()); return true; @@ -783,8 +870,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { cohort.getIdentifier()); it.remove(); - rebasePreCommittedTransactions(it, dataTree); - processNextTransaction(); + if (cohort.getCandidate() != null) { + rebaseTransactions(it, dataTree); + } + + processNextPending(); return true; } @@ -792,16 +882,20 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { return false; } - TipProducingDataTreeTip newTip = dataTree; + TipProducingDataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree); while (it.hasNext()) { final CommitEntry e = it.next(); if (cohort.equals(e.cohort)) { LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier()); + it.remove(); - rebasePreCommittedTransactions(it, newTip); + if (cohort.getCandidate() != null) { + rebaseTransactions(it, newTip); + } + return true; } else { - newTip = cohort.getCandidate(); + newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip); } } @@ -810,8 +904,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } @SuppressWarnings("checkstyle:IllegalCatch") - private void rebasePreCommittedTransactions(Iterator iter, TipProducingDataTreeTip newTip) { - tip = newTip; + private void rebaseTransactions(Iterator iter, @Nonnull TipProducingDataTreeTip newTip) { + tip = Preconditions.checkNotNull(newTip); while (iter.hasNext()) { final SimpleShardDataTreeCohort cohort = iter.next().cohort; if (cohort.getState() == State.CAN_COMMIT_COMPLETE) { @@ -847,7 +941,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent { } private void maybeRunOperationOnPendingTransactionsComplete() { - if (runOnPendingTransactionsComplete != null && pendingTransactions.isEmpty()) { + if (runOnPendingTransactionsComplete != null && !anyPendingTransactions()) { LOG.debug("{}: Pending transactions complete - running operation {}", logContext, runOnPendingTransactionsComplete);