X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardCommitCoordinator.java;h=45fa7727e4f125e11d73658c699ecbc779715ceb;hb=376932ba0a2feaee720d8e215a71b6fdbc7ebc80;hp=1b838ae0e6c6e3c32ae604846d32ae280e32bc78;hpb=c04117c66b63366a2f402a06d20f9980bb7283cb;p=controller.git diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java index 1b838ae0e6..45fa7727e4 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java @@ -8,29 +8,43 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; -import akka.actor.Status; +import akka.actor.Status.Failure; import akka.serialization.Serialization; +import akka.util.Timeout; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort; +import java.util.concurrent.TimeoutException; +import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand; +import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry.State; +import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply; import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications; import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply; +import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction; import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply; +import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction; import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction; import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction; import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply; import org.opendaylight.controller.cluster.datastore.modification.Modification; -import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification; +import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; +import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate; +import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; +import scala.concurrent.duration.Duration; /** * Coordinates commits for a shard ensuring only one concurrent 3-phase commit. @@ -50,6 +64,8 @@ class ShardCommitCoordinator { private final ShardDataTree dataTree; + private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry(); + // We use a LinkedList here to avoid synchronization overhead with concurrent queue impls // since this should only be accessed on the shard's dispatcher. private final Queue queuedCohortEntries = new LinkedList<>(); @@ -67,8 +83,13 @@ class ShardCommitCoordinator { private ReadyTransactionReply readyTransactionReply; - ShardCommitCoordinator(ShardDataTree dataTree, - long cacheExpiryTimeoutInMillis, int queueCapacity, ActorRef shardActor, Logger log, String name) { + private Runnable runOnPendingTransactionsComplete; + + + private static final Timeout COMMIT_STEP_TIMEOUT = new Timeout(Duration.create(5, TimeUnit.SECONDS)); + + ShardCommitCoordinator(ShardDataTree dataTree, long cacheExpiryTimeoutInMillis, int queueCapacity, Logger log, + String name) { this.queueCapacity = queueCapacity; this.log = log; @@ -77,6 +98,14 @@ class ShardCommitCoordinator { this.cacheExpiryTimeoutInMillis = cacheExpiryTimeoutInMillis; } + int getQueueSize() { + return queuedCohortEntries.size(); + } + + int getCohortCacheSize() { + return cohortCache.size(); + } + void setQueueCapacity(int queueCapacity) { this.queueCapacity = queueCapacity; } @@ -92,16 +121,20 @@ class ShardCommitCoordinator { private boolean queueCohortEntry(CohortEntry cohortEntry, ActorRef sender, Shard shard) { if(queuedCohortEntries.size() < queueCapacity) { queuedCohortEntries.offer(cohortEntry); + + log.debug("{}: Enqueued transaction {}, queue size {}", name, cohortEntry.getTransactionID(), + queuedCohortEntries.size()); + return true; } else { cohortCache.remove(cohortEntry.getTransactionID()); - RuntimeException ex = new RuntimeException( + final RuntimeException ex = new RuntimeException( String.format("%s: Could not enqueue transaction %s - the maximum commit queue"+ " capacity %d has been reached.", name, cohortEntry.getTransactionID(), queueCapacity)); log.error(ex.getMessage()); - sender.tell(new Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return false; } } @@ -113,47 +146,30 @@ class ShardCommitCoordinator { * @param ready the ForwardedReadyTransaction message to process * @param sender the sender of the message * @param shard the transaction's shard actor + * @param schema */ - void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard) { + void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard, + SchemaContext schema) { log.debug("{}: Readying transaction {}, client version {}", name, ready.getTransactionID(), ready.getTxnClientVersion()); - CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), ready.getCohort(), - (MutableCompositeModification) ready.getModification()); + final ShardDataTreeCohort cohort = ready.getTransaction().ready(); + final CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), cohort, cohortRegistry, schema, ready.getTxnClientVersion()); cohortCache.put(ready.getTransactionID(), cohortEntry); if(!queueCohortEntry(cohortEntry, sender, shard)) { return; } - if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) { - // Return our actor path as we'll handle the three phase commit except if the Tx client - // version < Helium-1 version which means the Tx was initiated by a base Helium version node. - // In that case, the subsequent 3-phase commit messages won't contain the transactionId so to - // maintain backwards compatibility, we create a separate cohort actor to provide the compatible behavior. - ActorRef replyActorPath = shard.self(); - if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) { - log.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", name); - replyActorPath = shard.getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props( - ready.getTransactionID())); - } - - ReadyTransactionReply readyTransactionReply = - new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath), - ready.getTxnClientVersion()); - sender.tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() : - readyTransactionReply, shard.self()); + if(ready.isDoImmediateCommit()) { + cohortEntry.setDoImmediateCommit(true); + cohortEntry.setReplySender(sender); + cohortEntry.setShard(shard); + handleCanCommit(cohortEntry); } else { - if(ready.isDoImmediateCommit()) { - cohortEntry.setDoImmediateCommit(true); - cohortEntry.setReplySender(sender); - cohortEntry.setShard(shard); - handleCanCommit(cohortEntry); - } else { - // The caller does not want immediate commit - the 3-phase commit will be coordinated by the - // front-end so send back a ReadyTransactionReply with our actor path. - sender.tell(readyTransactionReply(shard), shard.self()); - } + // The caller does not want immediate commit - the 3-phase commit will be coordinated by the + // front-end so send back a ReadyTransactionReply with our actor path. + sender.tell(readyTransactionReply(shard), shard.self()); } } @@ -167,12 +183,12 @@ class ShardCommitCoordinator { * @param sender the sender of the message * @param shard the transaction's shard actor */ - void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard) { + void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard, SchemaContext schema) { CohortEntry cohortEntry = cohortCache.get(batched.getTransactionID()); if(cohortEntry == null) { cohortEntry = new CohortEntry(batched.getTransactionID(), - dataTree.newReadWriteTransaction(batched.getTransactionID(), - batched.getTransactionChainID())); + dataTree.newReadWriteTransaction(batched.getTransactionID(), batched.getTransactionChainID()), + cohortRegistry, schema, batched.getVersion()); cohortCache.put(batched.getTransactionID(), cohortEntry); } @@ -184,6 +200,18 @@ class ShardCommitCoordinator { cohortEntry.applyModifications(batched.getModifications()); if(batched.isReady()) { + if(cohortEntry.getLastBatchedModificationsException() != null) { + cohortCache.remove(cohortEntry.getTransactionID()); + throw cohortEntry.getLastBatchedModificationsException(); + } + + if(cohortEntry.getTotalBatchedModificationsReceived() != batched.getTotalMessagesSent()) { + cohortCache.remove(cohortEntry.getTransactionID()); + throw new IllegalStateException(String.format( + "The total number of batched messages received %d does not match the number sent %d", + cohortEntry.getTotalBatchedModificationsReceived(), batched.getTotalMessagesSent())); + } + if(!queueCohortEntry(cohortEntry, sender, shard)) { return; } @@ -209,15 +237,19 @@ class ShardCommitCoordinator { /** * This method handles {@link ReadyLocalTransaction} message. All transaction modifications have - * been prepared beforehand by the sender and we just need to drive them through into the dataTree. + * been prepared beforehand by the sender and we just need to drive them through into the + * dataTree. * * @param message the ReadyLocalTransaction message to process * @param sender the sender of the message * @param shard the transaction's shard actor */ - void handleReadyLocalTransaction(ReadyLocalTransaction message, ActorRef sender, Shard shard) { - final ShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(dataTree, message.getModification()); - final CohortEntry cohortEntry = new CohortEntry(message.getTransactionID(), cohort); + void handleReadyLocalTransaction(ReadyLocalTransaction message, ActorRef sender, Shard shard, + SchemaContext schema) { + final ShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(dataTree, message.getModification(), + message.getTransactionID()); + final CohortEntry cohortEntry = new CohortEntry(message.getTransactionID(), cohort, cohortRegistry, schema, + DataStoreVersions.CURRENT_VERSION); cohortCache.put(message.getTransactionID(), cohortEntry); cohortEntry.setDoImmediateCommit(message.isDoCommitOnReady()); @@ -236,6 +268,36 @@ class ShardCommitCoordinator { } } + Collection createForwardedBatchedModifications(final BatchedModifications from, + final int maxModificationsPerBatch) { + CohortEntry cohortEntry = getAndRemoveCohortEntry(from.getTransactionID()); + if(cohortEntry == null || cohortEntry.getTransaction() == null) { + return Collections.singletonList(from); + } + + cohortEntry.applyModifications(from.getModifications()); + + final LinkedList newModifications = new LinkedList<>(); + cohortEntry.getTransaction().getSnapshot().applyToCursor(new AbstractBatchedModificationsCursor() { + @Override + protected BatchedModifications getModifications() { + if(newModifications.isEmpty() || + newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) { + newModifications.add(new BatchedModifications(from.getTransactionID(), + from.getVersion(), from.getTransactionChainID())); + } + + return newModifications.getLast(); + } + }); + + BatchedModifications last = newModifications.getLast(); + last.setDoCommitOnReady(from.isDoCommitOnReady()); + last.setReady(from.isReady()); + last.setTotalMessagesSent(newModifications.size()); + return newModifications; + } + private void handleCanCommit(CohortEntry cohortEntry) { String transactionID = cohortEntry.getTransactionID(); @@ -261,8 +323,9 @@ class ShardCommitCoordinator { doCanCommit(currentCohortEntry); } else { if(log.isDebugEnabled()) { - log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now", - name, queuedCohortEntries.peek().getTransactionID(), transactionID); + log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now", name, + queuedCohortEntries.peek() != null ? queuedCohortEntries.peek().getTransactionID() : "???", + transactionID); } } } @@ -284,7 +347,7 @@ class ShardCommitCoordinator { IllegalStateException ex = new IllegalStateException( String.format("%s: No cohort entry found for transaction %s", name, transactionID)); log.error(ex.getMessage()); - sender.tell(new Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return; } @@ -297,10 +360,7 @@ class ShardCommitCoordinator { private void doCanCommit(final CohortEntry cohortEntry) { boolean canCommit = false; try { - // We block on the future here so we don't have to worry about possibly accessing our - // state on a different thread outside of our dispatcher. Also, the data store - // currently uses a same thread executor anyway. - canCommit = cohortEntry.getCohort().canCommit().get(); + canCommit = cohortEntry.canCommit(); log.debug("{}: canCommit for {}: {}", name, cohortEntry.getTransactionID(), canCommit); @@ -308,13 +368,14 @@ class ShardCommitCoordinator { if(canCommit) { doCommit(cohortEntry); } else { - cohortEntry.getReplySender().tell(new Status.Failure(new TransactionCommitFailedException( + cohortEntry.getReplySender().tell(new Failure(new TransactionCommitFailedException( "Can Commit failed, no detailed cause available.")), cohortEntry.getShard().self()); } } else { cohortEntry.getReplySender().tell( - canCommit ? CanCommitTransactionReply.YES.toSerializable() : - CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard().self()); + canCommit ? CanCommitTransactionReply.yes(cohortEntry.getClientVersion()).toSerializable() : + CanCommitTransactionReply.no(cohortEntry.getClientVersion()).toSerializable(), + cohortEntry.getShard().self()); } } catch (Exception e) { log.debug("{}: An exception occurred during canCommit", name, e); @@ -324,7 +385,7 @@ class ShardCommitCoordinator { failure = e.getCause(); } - cohortEntry.getReplySender().tell(new Status.Failure(failure), cohortEntry.getShard().self()); + cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self()); } finally { if(!canCommit) { // Remove the entry from the cache now. @@ -344,10 +405,7 @@ class ShardCommitCoordinator { // normally fail since we ensure only one concurrent 3-phase commit. try { - // We block on the future here so we don't have to worry about possibly accessing our - // state on a different thread outside of our dispatcher. Also, the data store - // currently uses a same thread executor anyway. - cohortEntry.getCohort().preCommit().get(); + cohortEntry.preCommit(); cohortEntry.getShard().continueCommit(cohortEntry); @@ -357,7 +415,7 @@ class ShardCommitCoordinator { } catch (Exception e) { log.error("{} An exception occurred while preCommitting transaction {}", name, cohortEntry.getTransactionID(), e); - cohortEntry.getReplySender().tell(new akka.actor.Status.Failure(e), cohortEntry.getShard().self()); + cohortEntry.getReplySender().tell(new Failure(e), cohortEntry.getShard().self()); currentTransactionComplete(cohortEntry.getTransactionID(), true); } @@ -384,7 +442,7 @@ class ShardCommitCoordinator { String.format("%s: Cannot commit transaction %s - it is not the current transaction", name, transactionID)); log.error(ex.getMessage()); - sender.tell(new akka.actor.Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return false; } @@ -392,6 +450,137 @@ class ShardCommitCoordinator { return doCommit(cohortEntry); } + void handleAbort(final String transactionID, final ActorRef sender, final Shard shard) { + CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID); + if(cohortEntry != null) { + // We don't remove the cached cohort entry here (ie pass false) in case the Tx was + // aborted during replication in which case we may still commit locally if replication + // succeeds. + currentTransactionComplete(transactionID, false); + } else { + cohortEntry = getAndRemoveCohortEntry(transactionID); + } + + if(cohortEntry == null) { + return; + } + + log.debug("{}: Aborting transaction {}", name, transactionID); + + final ActorRef self = shard.getSelf(); + try { + cohortEntry.abort(); + + shard.getShardMBean().incrementAbortTransactionsCount(); + + if(sender != null) { + sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self); + } + } catch (Exception e) { + log.error("{}: An exception happened during abort", name, e); + + if(sender != null) { + sender.tell(new Failure(e), self); + } + } + } + + void checkForExpiredTransactions(final long timeout, final Shard shard) { + CohortEntry cohortEntry = getCurrentCohortEntry(); + if(cohortEntry != null) { + if(cohortEntry.isExpired(timeout)) { + log.warn("{}: Current transaction {} has timed out after {} ms - aborting", + name, cohortEntry.getTransactionID(), timeout); + + handleAbort(cohortEntry.getTransactionID(), null, shard); + } + } + + cleanupExpiredCohortEntries(); + } + + void abortPendingTransactions(final String reason, final Shard shard) { + if(currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + return; + } + + List cohortEntries = getAndClearPendingCohortEntries(); + + log.debug("{}: Aborting {} pending queued transactions", name, cohortEntries.size()); + + for(CohortEntry cohortEntry: cohortEntries) { + if(cohortEntry.getReplySender() != null) { + cohortEntry.getReplySender().tell(new Failure(new RuntimeException(reason)), shard.self()); + } + } + } + + private List getAndClearPendingCohortEntries() { + List cohortEntries = new ArrayList<>(); + + if(currentCohortEntry != null) { + cohortEntries.add(currentCohortEntry); + cohortCache.remove(currentCohortEntry.getTransactionID()); + currentCohortEntry = null; + } + + for(CohortEntry cohortEntry: queuedCohortEntries) { + cohortEntries.add(cohortEntry); + cohortCache.remove(cohortEntry.getTransactionID()); + } + + queuedCohortEntries.clear(); + return cohortEntries; + } + + Collection convertPendingTransactionsToMessages(final int maxModificationsPerBatch) { + if(currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + return Collections.emptyList(); + } + + Collection messages = new ArrayList<>(); + List cohortEntries = getAndClearPendingCohortEntries(); + for(CohortEntry cohortEntry: cohortEntries) { + if(cohortEntry.isExpired(cacheExpiryTimeoutInMillis) || cohortEntry.isAborted()) { + continue; + } + + final LinkedList newModifications = new LinkedList<>(); + cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() { + @Override + protected BatchedModifications getModifications() { + if(newModifications.isEmpty() || + newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) { + newModifications.add(new BatchedModifications(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion(), "")); + } + + return newModifications.getLast(); + } + }); + + if(!newModifications.isEmpty()) { + BatchedModifications last = newModifications.getLast(); + last.setDoCommitOnReady(cohortEntry.isDoImmediateCommit()); + last.setReady(true); + last.setTotalMessagesSent(newModifications.size()); + messages.addAll(newModifications); + + if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == State.CAN_COMMITTED) { + messages.add(new CanCommitTransaction(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion())); + } + + if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == State.PRE_COMMITTED) { + messages.add(new CommitTransaction(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion())); + } + } + } + + return messages; + } + /** * Returns the cohort entry for the Tx commit currently in progress if the given transaction ID * matches the current entry. @@ -400,7 +589,7 @@ class ShardCommitCoordinator { * @return the current CohortEntry or null if the given transaction ID does not match the * current entry. */ - public CohortEntry getCohortEntryIfCurrent(String transactionID) { + CohortEntry getCohortEntryIfCurrent(String transactionID) { if(isCurrentTransaction(transactionID)) { return currentCohortEntry; } @@ -408,15 +597,15 @@ class ShardCommitCoordinator { return null; } - public CohortEntry getCurrentCohortEntry() { + CohortEntry getCurrentCohortEntry() { return currentCohortEntry; } - public CohortEntry getAndRemoveCohortEntry(String transactionID) { + CohortEntry getAndRemoveCohortEntry(String transactionID) { return cohortCache.remove(transactionID); } - public boolean isCurrentTransaction(String transactionID) { + boolean isCurrentTransaction(String transactionID) { return currentCohortEntry != null && currentCohortEntry.getTransactionID().equals(transactionID); } @@ -430,7 +619,7 @@ class ShardCommitCoordinator { * @param removeCohortEntry if true the CohortEntry for the transaction is also removed from * the cache. */ - public void currentTransactionComplete(String transactionID, boolean removeCohortEntry) { + void currentTransactionComplete(String transactionID, boolean removeCohortEntry) { if(removeCohortEntry) { cohortCache.remove(transactionID); } @@ -447,9 +636,9 @@ class ShardCommitCoordinator { private void maybeProcessNextCohortEntry() { // Check if there's a next cohort entry waiting in the queue and if it is ready to commit. Also // clean out expired entries. - Iterator iter = queuedCohortEntries.iterator(); + final Iterator iter = queuedCohortEntries.iterator(); while(iter.hasNext()) { - CohortEntry next = iter.next(); + final CohortEntry next = iter.next(); if(next.isReadyToCommit()) { if(currentCohortEntry == null) { if(log.isDebugEnabled()) { @@ -466,49 +655,81 @@ class ShardCommitCoordinator { } else if(next.isExpired(cacheExpiryTimeoutInMillis)) { log.warn("{}: canCommit for transaction {} was not received within {} ms - entry removed from cache", name, next.getTransactionID(), cacheExpiryTimeoutInMillis); - - iter.remove(); - cohortCache.remove(next.getTransactionID()); - } else { + } else if(!next.isAborted()) { break; } + + iter.remove(); + cohortCache.remove(next.getTransactionID()); } + + maybeRunOperationOnPendingTransactionsComplete(); } void cleanupExpiredCohortEntries() { maybeProcessNextCohortEntry(); } + void setRunOnPendingTransactionsComplete(Runnable operation) { + runOnPendingTransactionsComplete = operation; + maybeRunOperationOnPendingTransactionsComplete(); + } + + private void maybeRunOperationOnPendingTransactionsComplete() { + if(runOnPendingTransactionsComplete != null && currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + log.debug("{}: Pending transactions complete - running operation {}", name, runOnPendingTransactionsComplete); + + runOnPendingTransactionsComplete.run(); + runOnPendingTransactionsComplete = null; + } + } + @VisibleForTesting void setCohortDecorator(CohortDecorator cohortDecorator) { this.cohortDecorator = cohortDecorator; } + void processCohortRegistryCommand(ActorRef sender, CohortRegistryCommand message) { + cohortRegistry.process(sender, message); + } + static class CohortEntry { + enum State { + PENDING, + CAN_COMMITTED, + PRE_COMMITTED, + COMMITTED, + ABORTED + } + private final String transactionID; private ShardDataTreeCohort cohort; private final ReadWriteShardDataTreeTransaction transaction; + private RuntimeException lastBatchedModificationsException; private ActorRef replySender; private Shard shard; private boolean doImmediateCommit; private final Stopwatch lastAccessTimer = Stopwatch.createStarted(); + private int totalBatchedModificationsReceived; + private State state = State.PENDING; + private final short clientVersion; + private final CompositeDataTreeCohort userCohorts; - CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) { + CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction, + DataTreeCohortActorRegistry cohortRegistry, SchemaContext schema, short clientVersion) { this.transaction = Preconditions.checkNotNull(transaction); this.transactionID = transactionID; + this.clientVersion = clientVersion; + this.userCohorts = new CompositeDataTreeCohort(cohortRegistry, transactionID, schema, COMMIT_STEP_TIMEOUT); } - CohortEntry(String transactionID, ShardDataTreeCohort cohort, - MutableCompositeModification compositeModification) { - this.transactionID = transactionID; - this.cohort = cohort; - this.transaction = null; - } - - CohortEntry(String transactionID, ShardDataTreeCohort cohort) { + CohortEntry(String transactionID, ShardDataTreeCohort cohort, DataTreeCohortActorRegistry cohortRegistry, + SchemaContext schema, short clientVersion) { this.transactionID = transactionID; this.cohort = cohort; this.transaction = null; + this.clientVersion = clientVersion; + this.userCohorts = new CompositeDataTreeCohort(cohortRegistry, transactionID, schema, COMMIT_STEP_TIMEOUT); } void updateLastAccessTime() { @@ -520,16 +741,80 @@ class ShardCommitCoordinator { return transactionID; } - ShardDataTreeCohort getCohort() { - return cohort; + short getClientVersion() { + return clientVersion; + } + + State getState() { + return state; + } + + DataTreeCandidate getCandidate() { + return cohort.getCandidate(); + } + + DataTreeModification getDataTreeModification() { + return cohort.getDataTreeModification(); + } + + ReadWriteShardDataTreeTransaction getTransaction() { + return transaction; + } + + int getTotalBatchedModificationsReceived() { + return totalBatchedModificationsReceived; + } + + RuntimeException getLastBatchedModificationsException() { + return lastBatchedModificationsException; } void applyModifications(Iterable modifications) { - for (Modification modification : modifications) { - modification.apply(transaction.getSnapshot()); + totalBatchedModificationsReceived++; + if(lastBatchedModificationsException == null) { + for (Modification modification : modifications) { + try { + modification.apply(transaction.getSnapshot()); + } catch (RuntimeException e) { + lastBatchedModificationsException = e; + throw e; + } + } } } + boolean canCommit() throws InterruptedException, ExecutionException { + state = State.CAN_COMMITTED; + + // We block on the future here (and also preCommit(), commit(), abort()) so we don't have to worry + // about possibly accessing our state on a different thread outside of our dispatcher. + // TODO: the ShardDataTreeCohort returns immediate Futures anyway which begs the question - why + // bother even returning Futures from ShardDataTreeCohort if we have to treat them synchronously + // anyway?. The Futures are really a remnant from when we were using the InMemoryDataBroker. + return cohort.canCommit().get(); + } + + + + void preCommit() throws InterruptedException, ExecutionException, TimeoutException { + state = State.PRE_COMMITTED; + cohort.preCommit().get(); + userCohorts.canCommit(cohort.getCandidate()); + userCohorts.preCommit(); + } + + void commit() throws InterruptedException, ExecutionException, TimeoutException { + state = State.COMMITTED; + cohort.commit().get(); + userCohorts.commit(); + } + + void abort() throws InterruptedException, ExecutionException, TimeoutException { + state = State.ABORTED; + cohort.abort().get(); + userCohorts.abort(); + } + void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) { Preconditions.checkState(cohort == null, "cohort was already set"); @@ -575,9 +860,14 @@ class ShardCommitCoordinator { this.shard = shard; } + + boolean isAborted() { + return state == State.ABORTED; + } + @Override public String toString() { - StringBuilder builder = new StringBuilder(); + final StringBuilder builder = new StringBuilder(); builder.append("CohortEntry [transactionID=").append(transactionID).append(", doImmediateCommit=") .append(doImmediateCommit).append("]"); return builder.toString();