X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardCommitCoordinator.java;h=eb0c04dbbd86eaaabde73326baf1b35086073ce1;hb=refs%2Fchanges%2F28%2F41428%2F3;hp=f3e1e33e347f7760820bce49897bf2aa3c20a5b6;hpb=103ceecd0195cca6c87fbd62a687d8addf128784;p=controller.git diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java index f3e1e33e34..eb0c04dbbd 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java @@ -8,28 +8,34 @@ package org.opendaylight.controller.cluster.datastore; import akka.actor.ActorRef; -import akka.actor.Status; +import akka.actor.Status.Failure; import akka.serialization.Serialization; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort; +import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand; +import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply; import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications; import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply; +import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction; import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply; +import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction; import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction; import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction; import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply; -import org.opendaylight.controller.cluster.datastore.modification.Modification; -import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification; +import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor; import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException; +import org.opendaylight.yangtools.concepts.Identifier; +import org.opendaylight.yangtools.yang.model.api.SchemaContext; import org.slf4j.Logger; /** @@ -37,19 +43,21 @@ import org.slf4j.Logger; * * @author Thomas Pantelis */ -public class ShardCommitCoordinator { +final class ShardCommitCoordinator { // Interface hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts. public interface CohortDecorator { - ShardDataTreeCohort decorate(String transactionID, ShardDataTreeCohort actual); + ShardDataTreeCohort decorate(Identifier transactionID, ShardDataTreeCohort actual); } - private final Map cohortCache = new HashMap<>(); + private final Map cohortCache = new HashMap<>(); private CohortEntry currentCohortEntry; private final ShardDataTree dataTree; + private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry(); + // We use a LinkedList here to avoid synchronization overhead with concurrent queue impls // since this should only be accessed on the shard's dispatcher. private final Queue queuedCohortEntries = new LinkedList<>(); @@ -67,8 +75,10 @@ public class ShardCommitCoordinator { private ReadyTransactionReply readyTransactionReply; - public ShardCommitCoordinator(ShardDataTree dataTree, - long cacheExpiryTimeoutInMillis, int queueCapacity, ActorRef shardActor, Logger log, String name) { + private Runnable runOnPendingTransactionsComplete; + + ShardCommitCoordinator(ShardDataTree dataTree, long cacheExpiryTimeoutInMillis, int queueCapacity, Logger log, + String name) { this.queueCapacity = queueCapacity; this.log = log; @@ -77,7 +87,15 @@ public class ShardCommitCoordinator { this.cacheExpiryTimeoutInMillis = cacheExpiryTimeoutInMillis; } - public void setQueueCapacity(int queueCapacity) { + int getQueueSize() { + return queuedCohortEntries.size(); + } + + int getCohortCacheSize() { + return cohortCache.size(); + } + + void setQueueCapacity(int queueCapacity) { this.queueCapacity = queueCapacity; } @@ -92,16 +110,20 @@ public class ShardCommitCoordinator { private boolean queueCohortEntry(CohortEntry cohortEntry, ActorRef sender, Shard shard) { if(queuedCohortEntries.size() < queueCapacity) { queuedCohortEntries.offer(cohortEntry); + + log.debug("{}: Enqueued transaction {}, queue size {}", name, cohortEntry.getTransactionID(), + queuedCohortEntries.size()); + return true; } else { cohortCache.remove(cohortEntry.getTransactionID()); - RuntimeException ex = new RuntimeException( + final RuntimeException ex = new RuntimeException( String.format("%s: Could not enqueue transaction %s - the maximum commit queue"+ " capacity %d has been reached.", name, cohortEntry.getTransactionID(), queueCapacity)); log.error(ex.getMessage()); - sender.tell(new Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return false; } } @@ -109,47 +131,35 @@ public class ShardCommitCoordinator { /** * This method is called to ready a transaction that was prepared by ShardTransaction actor. It caches * the prepared cohort entry for the given transactions ID in preparation for the subsequent 3-phase commit. + * + * @param ready the ForwardedReadyTransaction message to process + * @param sender the sender of the message + * @param shard the transaction's shard actor + * @param schema */ - public void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard) { + void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard, + SchemaContext schema) { log.debug("{}: Readying transaction {}, client version {}", name, ready.getTransactionID(), ready.getTxnClientVersion()); - CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), ready.getCohort(), - (MutableCompositeModification) ready.getModification()); - cohortCache.put(ready.getTransactionID(), cohortEntry); + final ShardDataTreeCohort cohort = ready.getTransaction().ready(); + final CohortEntry cohortEntry = CohortEntry.createReady(ready.getTransactionID(), cohort, cohortRegistry, + schema, ready.getTxnClientVersion()); + cohortCache.put(cohortEntry.getTransactionID(), cohortEntry); if(!queueCohortEntry(cohortEntry, sender, shard)) { return; } - if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) { - // Return our actor path as we'll handle the three phase commit except if the Tx client - // version < Helium-1 version which means the Tx was initiated by a base Helium version node. - // In that case, the subsequent 3-phase commit messages won't contain the transactionId so to - // maintain backwards compatibility, we create a separate cohort actor to provide the compatible behavior. - ActorRef replyActorPath = shard.self(); - if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) { - log.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", name); - replyActorPath = shard.getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props( - ready.getTransactionID())); - } - - ReadyTransactionReply readyTransactionReply = - new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath), - ready.getTxnClientVersion()); - sender.tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() : - readyTransactionReply, shard.self()); + if(ready.isDoImmediateCommit()) { + cohortEntry.setDoImmediateCommit(true); + cohortEntry.setReplySender(sender); + cohortEntry.setShard(shard); + handleCanCommit(cohortEntry); } else { - if(ready.isDoImmediateCommit()) { - cohortEntry.setDoImmediateCommit(true); - cohortEntry.setReplySender(sender); - cohortEntry.setShard(shard); - handleCanCommit(cohortEntry); - } else { - // The caller does not want immediate commit - the 3-phase commit will be coordinated by the - // front-end so send back a ReadyTransactionReply with our actor path. - sender.tell(readyTransactionReply(shard), shard.self()); - } + // The caller does not want immediate commit - the 3-phase commit will be coordinated by the + // front-end so send back a ReadyTransactionReply with our actor path. + sender.tell(readyTransactionReply(shard), shard.self()); } } @@ -159,19 +169,16 @@ public class ShardCommitCoordinator { * DOMStoreWriteTransaction, one is created. The batched modifications are applied to the write Tx. If * the BatchedModifications is ready to commit then a DOMStoreThreePhaseCommitCohort is created. * - * @param batched the BatchedModifications - * @param shardActor the transaction's shard actor - * - * @throws ExecutionException if an error occurs loading the cache + * @param batched the BatchedModifications message to process + * @param sender the sender of the message */ - void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard) - throws ExecutionException { + void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard) { CohortEntry cohortEntry = cohortCache.get(batched.getTransactionID()); if(cohortEntry == null) { - cohortEntry = new CohortEntry(batched.getTransactionID(), - dataTree.newReadWriteTransaction(batched.getTransactionID(), - batched.getTransactionChainID())); - cohortCache.put(batched.getTransactionID(), cohortEntry); + cohortEntry = CohortEntry.createOpen(batched.getTransactionID(), + dataTree.newReadWriteTransaction(batched.getTransactionID()), + cohortRegistry, dataTree.getSchemaContext(), batched.getVersion()); + cohortCache.put(cohortEntry.getTransactionID(), cohortEntry); } if(log.isDebugEnabled()) { @@ -182,6 +189,18 @@ public class ShardCommitCoordinator { cohortEntry.applyModifications(batched.getModifications()); if(batched.isReady()) { + if(cohortEntry.getLastBatchedModificationsException() != null) { + cohortCache.remove(cohortEntry.getTransactionID()); + throw cohortEntry.getLastBatchedModificationsException(); + } + + if(cohortEntry.getTotalBatchedModificationsReceived() != batched.getTotalMessagesSent()) { + cohortCache.remove(cohortEntry.getTransactionID()); + throw new IllegalStateException(String.format( + "The total number of batched messages received %d does not match the number sent %d", + cohortEntry.getTotalBatchedModificationsReceived(), batched.getTotalMessagesSent())); + } + if(!queueCohortEntry(cohortEntry, sender, shard)) { return; } @@ -207,16 +226,19 @@ public class ShardCommitCoordinator { /** * This method handles {@link ReadyLocalTransaction} message. All transaction modifications have - * been prepared beforehand by the sender and we just need to drive them through into the dataTree. + * been prepared beforehand by the sender and we just need to drive them through into the + * dataTree. * - * @param message - * @param sender - * @param shard + * @param message the ReadyLocalTransaction message to process + * @param sender the sender of the message + * @param shard the transaction's shard actor */ void handleReadyLocalTransaction(ReadyLocalTransaction message, ActorRef sender, Shard shard) { - final ShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(dataTree, message.getModification()); - final CohortEntry cohortEntry = new CohortEntry(message.getTransactionID(), cohort); - cohortCache.put(message.getTransactionID(), cohortEntry); + final ShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(dataTree, message.getModification(), + message.getTransactionID()); + final CohortEntry cohortEntry = CohortEntry.createReady(message.getTransactionID(), cohort, cohortRegistry, + dataTree.getSchemaContext(), DataStoreVersions.CURRENT_VERSION); + cohortCache.put(cohortEntry.getTransactionID(), cohortEntry); cohortEntry.setDoImmediateCommit(message.isDoCommitOnReady()); if(!queueCohortEntry(cohortEntry, sender, shard)) { @@ -234,9 +256,36 @@ public class ShardCommitCoordinator { } } - private void handleCanCommit(CohortEntry cohortEntry) { - String transactionID = cohortEntry.getTransactionID(); + Collection createForwardedBatchedModifications(final BatchedModifications from, + final int maxModificationsPerBatch) { + CohortEntry cohortEntry = getAndRemoveCohortEntry(from.getTransactionID()); + if(cohortEntry == null || cohortEntry.getTransaction() == null) { + return Collections.singletonList(from); + } + + cohortEntry.applyModifications(from.getModifications()); + + final LinkedList newModifications = new LinkedList<>(); + cohortEntry.getTransaction().getSnapshot().applyToCursor(new AbstractBatchedModificationsCursor() { + @Override + protected BatchedModifications getModifications() { + if(newModifications.isEmpty() || + newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) { + newModifications.add(new BatchedModifications(from.getTransactionID(), from.getVersion())); + } + + return newModifications.getLast(); + } + }); + + BatchedModifications last = newModifications.getLast(); + last.setDoCommitOnReady(from.isDoCommitOnReady()); + last.setReady(from.isReady()); + last.setTotalMessagesSent(newModifications.size()); + return newModifications; + } + private void handleCanCommit(CohortEntry cohortEntry) { cohortEntry.updateLastAccessTime(); if(currentCohortEntry != null) { @@ -245,7 +294,7 @@ public class ShardCommitCoordinator { if(log.isDebugEnabled()) { log.debug("{}: Commit for Tx {} already in progress - skipping canCommit for {} for now", - name, currentCohortEntry.getTransactionID(), transactionID); + name, currentCohortEntry.getTransactionID(), cohortEntry.getTransactionID()); } return; @@ -259,8 +308,9 @@ public class ShardCommitCoordinator { doCanCommit(currentCohortEntry); } else { if(log.isDebugEnabled()) { - log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now", - name, queuedCohortEntries.peek().getTransactionID(), transactionID); + log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now", name, + queuedCohortEntries.peek() != null ? queuedCohortEntries.peek().getTransactionID() : "???", + cohortEntry.getTransactionID()); } } } @@ -268,11 +318,11 @@ public class ShardCommitCoordinator { /** * This method handles the canCommit phase for a transaction. * - * @param canCommit the CanCommitTransaction message - * @param sender the actor that sent the message + * @param transactionID the ID of the transaction to canCommit + * @param sender the actor to which to send the response * @param shard the transaction's shard actor */ - public void handleCanCommit(String transactionID, final ActorRef sender, final Shard shard) { + void handleCanCommit(Identifier transactionID, final ActorRef sender, final Shard shard) { // Lookup the cohort entry that was cached previously (or should have been) by // transactionReady (via the ForwardedReadyTransaction message). final CohortEntry cohortEntry = cohortCache.get(transactionID); @@ -282,7 +332,7 @@ public class ShardCommitCoordinator { IllegalStateException ex = new IllegalStateException( String.format("%s: No cohort entry found for transaction %s", name, transactionID)); log.error(ex.getMessage()); - sender.tell(new Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return; } @@ -295,10 +345,7 @@ public class ShardCommitCoordinator { private void doCanCommit(final CohortEntry cohortEntry) { boolean canCommit = false; try { - // We block on the future here so we don't have to worry about possibly accessing our - // state on a different thread outside of our dispatcher. Also, the data store - // currently uses a same thread executor anyway. - canCommit = cohortEntry.getCohort().canCommit().get(); + canCommit = cohortEntry.canCommit(); log.debug("{}: canCommit for {}: {}", name, cohortEntry.getTransactionID(), canCommit); @@ -306,13 +353,14 @@ public class ShardCommitCoordinator { if(canCommit) { doCommit(cohortEntry); } else { - cohortEntry.getReplySender().tell(new Status.Failure(new TransactionCommitFailedException( + cohortEntry.getReplySender().tell(new Failure(new TransactionCommitFailedException( "Can Commit failed, no detailed cause available.")), cohortEntry.getShard().self()); } } else { cohortEntry.getReplySender().tell( - canCommit ? CanCommitTransactionReply.YES.toSerializable() : - CanCommitTransactionReply.NO.toSerializable(), cohortEntry.getShard().self()); + canCommit ? CanCommitTransactionReply.yes(cohortEntry.getClientVersion()).toSerializable() : + CanCommitTransactionReply.no(cohortEntry.getClientVersion()).toSerializable(), + cohortEntry.getShard().self()); } } catch (Exception e) { log.debug("{}: An exception occurred during canCommit", name, e); @@ -322,7 +370,7 @@ public class ShardCommitCoordinator { failure = e.getCause(); } - cohortEntry.getReplySender().tell(new Status.Failure(failure), cohortEntry.getShard().self()); + cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self()); } finally { if(!canCommit) { // Remove the entry from the cache now. @@ -342,10 +390,7 @@ public class ShardCommitCoordinator { // normally fail since we ensure only one concurrent 3-phase commit. try { - // We block on the future here so we don't have to worry about possibly accessing our - // state on a different thread outside of our dispatcher. Also, the data store - // currently uses a same thread executor anyway. - cohortEntry.getCohort().preCommit().get(); + cohortEntry.preCommit(); cohortEntry.getShard().continueCommit(cohortEntry); @@ -355,7 +400,7 @@ public class ShardCommitCoordinator { } catch (Exception e) { log.error("{} An exception occurred while preCommitting transaction {}", name, cohortEntry.getTransactionID(), e); - cohortEntry.getReplySender().tell(new akka.actor.Status.Failure(e), cohortEntry.getShard().self()); + cohortEntry.getReplySender().tell(new Failure(e), cohortEntry.getShard().self()); currentTransactionComplete(cohortEntry.getTransactionID(), true); } @@ -363,7 +408,15 @@ public class ShardCommitCoordinator { return success; } - boolean handleCommit(final String transactionID, final ActorRef sender, final Shard shard) { + /** + * This method handles the preCommit and commit phases for a transaction. + * + * @param transactionID the ID of the transaction to commit + * @param sender the actor to which to send the response + * @param shard the transaction's shard actor + * @return true if the transaction was successfully prepared, false otherwise. + */ + boolean handleCommit(final Identifier transactionID, final ActorRef sender, final Shard shard) { // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to // this transaction. final CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID); @@ -374,7 +427,7 @@ public class ShardCommitCoordinator { String.format("%s: Cannot commit transaction %s - it is not the current transaction", name, transactionID)); log.error(ex.getMessage()); - sender.tell(new akka.actor.Status.Failure(ex), shard.self()); + sender.tell(new Failure(ex), shard.self()); return false; } @@ -382,6 +435,137 @@ public class ShardCommitCoordinator { return doCommit(cohortEntry); } + void handleAbort(final Identifier transactionID, final ActorRef sender, final Shard shard) { + CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID); + if(cohortEntry != null) { + // We don't remove the cached cohort entry here (ie pass false) in case the Tx was + // aborted during replication in which case we may still commit locally if replication + // succeeds. + currentTransactionComplete(transactionID, false); + } else { + cohortEntry = getAndRemoveCohortEntry(transactionID); + } + + if(cohortEntry == null) { + return; + } + + log.debug("{}: Aborting transaction {}", name, transactionID); + + final ActorRef self = shard.getSelf(); + try { + cohortEntry.abort(); + + shard.getShardMBean().incrementAbortTransactionsCount(); + + if(sender != null) { + sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self); + } + } catch (Exception e) { + log.error("{}: An exception happened during abort", name, e); + + if(sender != null) { + sender.tell(new Failure(e), self); + } + } + } + + void checkForExpiredTransactions(final long timeout, final Shard shard) { + CohortEntry cohortEntry = getCurrentCohortEntry(); + if(cohortEntry != null) { + if(cohortEntry.isExpired(timeout)) { + log.warn("{}: Current transaction {} has timed out after {} ms - aborting", + name, cohortEntry.getTransactionID(), timeout); + + handleAbort(cohortEntry.getTransactionID(), null, shard); + } + } + + cleanupExpiredCohortEntries(); + } + + void abortPendingTransactions(final String reason, final Shard shard) { + if(currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + return; + } + + List cohortEntries = getAndClearPendingCohortEntries(); + + log.debug("{}: Aborting {} pending queued transactions", name, cohortEntries.size()); + + for(CohortEntry cohortEntry: cohortEntries) { + if(cohortEntry.getReplySender() != null) { + cohortEntry.getReplySender().tell(new Failure(new RuntimeException(reason)), shard.self()); + } + } + } + + private List getAndClearPendingCohortEntries() { + List cohortEntries = new ArrayList<>(); + + if(currentCohortEntry != null) { + cohortEntries.add(currentCohortEntry); + cohortCache.remove(currentCohortEntry.getTransactionID()); + currentCohortEntry = null; + } + + for(CohortEntry cohortEntry: queuedCohortEntries) { + cohortEntries.add(cohortEntry); + cohortCache.remove(cohortEntry.getTransactionID()); + } + + queuedCohortEntries.clear(); + return cohortEntries; + } + + Collection convertPendingTransactionsToMessages(final int maxModificationsPerBatch) { + if(currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + return Collections.emptyList(); + } + + Collection messages = new ArrayList<>(); + List cohortEntries = getAndClearPendingCohortEntries(); + for(CohortEntry cohortEntry: cohortEntries) { + if(cohortEntry.isExpired(cacheExpiryTimeoutInMillis) || cohortEntry.isAborted()) { + continue; + } + + final LinkedList newModifications = new LinkedList<>(); + cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() { + @Override + protected BatchedModifications getModifications() { + if(newModifications.isEmpty() || + newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) { + newModifications.add(new BatchedModifications(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion())); + } + + return newModifications.getLast(); + } + }); + + if(!newModifications.isEmpty()) { + BatchedModifications last = newModifications.getLast(); + last.setDoCommitOnReady(cohortEntry.isDoImmediateCommit()); + last.setReady(true); + last.setTotalMessagesSent(newModifications.size()); + messages.addAll(newModifications); + + if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == CohortEntry.State.CAN_COMMITTED) { + messages.add(new CanCommitTransaction(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion())); + } + + if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == CohortEntry.State.PRE_COMMITTED) { + messages.add(new CommitTransaction(cohortEntry.getTransactionID(), + cohortEntry.getClientVersion())); + } + } + } + + return messages; + } + /** * Returns the cohort entry for the Tx commit currently in progress if the given transaction ID * matches the current entry. @@ -390,7 +574,7 @@ public class ShardCommitCoordinator { * @return the current CohortEntry or null if the given transaction ID does not match the * current entry. */ - public CohortEntry getCohortEntryIfCurrent(String transactionID) { + CohortEntry getCohortEntryIfCurrent(Identifier transactionID) { if(isCurrentTransaction(transactionID)) { return currentCohortEntry; } @@ -398,15 +582,15 @@ public class ShardCommitCoordinator { return null; } - public CohortEntry getCurrentCohortEntry() { + CohortEntry getCurrentCohortEntry() { return currentCohortEntry; } - public CohortEntry getAndRemoveCohortEntry(String transactionID) { + CohortEntry getAndRemoveCohortEntry(Identifier transactionID) { return cohortCache.remove(transactionID); } - public boolean isCurrentTransaction(String transactionID) { + boolean isCurrentTransaction(Identifier transactionID) { return currentCohortEntry != null && currentCohortEntry.getTransactionID().equals(transactionID); } @@ -420,7 +604,7 @@ public class ShardCommitCoordinator { * @param removeCohortEntry if true the CohortEntry for the transaction is also removed from * the cache. */ - public void currentTransactionComplete(String transactionID, boolean removeCohortEntry) { + void currentTransactionComplete(Identifier transactionID, boolean removeCohortEntry) { if(removeCohortEntry) { cohortCache.remove(transactionID); } @@ -437,9 +621,9 @@ public class ShardCommitCoordinator { private void maybeProcessNextCohortEntry() { // Check if there's a next cohort entry waiting in the queue and if it is ready to commit. Also // clean out expired entries. - Iterator iter = queuedCohortEntries.iterator(); + final Iterator iter = queuedCohortEntries.iterator(); while(iter.hasNext()) { - CohortEntry next = iter.next(); + final CohortEntry next = iter.next(); if(next.isReadyToCommit()) { if(currentCohortEntry == null) { if(log.isDebugEnabled()) { @@ -456,121 +640,41 @@ public class ShardCommitCoordinator { } else if(next.isExpired(cacheExpiryTimeoutInMillis)) { log.warn("{}: canCommit for transaction {} was not received within {} ms - entry removed from cache", name, next.getTransactionID(), cacheExpiryTimeoutInMillis); - - iter.remove(); - cohortCache.remove(next.getTransactionID()); - } else { + } else if(!next.isAborted()) { break; } + + iter.remove(); + cohortCache.remove(next.getTransactionID()); } + + maybeRunOperationOnPendingTransactionsComplete(); } void cleanupExpiredCohortEntries() { maybeProcessNextCohortEntry(); } - @VisibleForTesting - void setCohortDecorator(CohortDecorator cohortDecorator) { - this.cohortDecorator = cohortDecorator; + void setRunOnPendingTransactionsComplete(Runnable operation) { + runOnPendingTransactionsComplete = operation; + maybeRunOperationOnPendingTransactionsComplete(); } - static class CohortEntry { - private final String transactionID; - private ShardDataTreeCohort cohort; - private final ReadWriteShardDataTreeTransaction transaction; - private ActorRef replySender; - private Shard shard; - private boolean doImmediateCommit; - private final Stopwatch lastAccessTimer = Stopwatch.createStarted(); - - CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) { - this.transaction = Preconditions.checkNotNull(transaction); - this.transactionID = transactionID; - } - - CohortEntry(String transactionID, ShardDataTreeCohort cohort, - MutableCompositeModification compositeModification) { - this.transactionID = transactionID; - this.cohort = cohort; - this.transaction = null; - } + private void maybeRunOperationOnPendingTransactionsComplete() { + if(runOnPendingTransactionsComplete != null && currentCohortEntry == null && queuedCohortEntries.isEmpty()) { + log.debug("{}: Pending transactions complete - running operation {}", name, runOnPendingTransactionsComplete); - CohortEntry(String transactionID, ShardDataTreeCohort cohort) { - this.transactionID = transactionID; - this.cohort = cohort; - this.transaction = null; - } - - void updateLastAccessTime() { - lastAccessTimer.reset(); - lastAccessTimer.start(); - } - - String getTransactionID() { - return transactionID; - } - - ShardDataTreeCohort getCohort() { - return cohort; - } - - void applyModifications(Iterable modifications) { - for (Modification modification : modifications) { - modification.apply(transaction.getSnapshot()); - } - } - - void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) { - Preconditions.checkState(cohort == null, "cohort was already set"); - - setDoImmediateCommit(doImmediateCommit); - - cohort = transaction.ready(); - - if(cohortDecorator != null) { - // Call the hook for unit tests. - cohort = cohortDecorator.decorate(transactionID, cohort); - } - } - - boolean isReadyToCommit() { - return replySender != null; - } - - boolean isExpired(long expireTimeInMillis) { - return lastAccessTimer.elapsed(TimeUnit.MILLISECONDS) >= expireTimeInMillis; - } - - boolean isDoImmediateCommit() { - return doImmediateCommit; - } - - void setDoImmediateCommit(boolean doImmediateCommit) { - this.doImmediateCommit = doImmediateCommit; - } - - ActorRef getReplySender() { - return replySender; - } - - void setReplySender(ActorRef replySender) { - this.replySender = replySender; - } - - Shard getShard() { - return shard; + runOnPendingTransactionsComplete.run(); + runOnPendingTransactionsComplete = null; } + } - void setShard(Shard shard) { - this.shard = shard; - } + @VisibleForTesting + void setCohortDecorator(CohortDecorator cohortDecorator) { + this.cohortDecorator = cohortDecorator; + } - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("CohortEntry [transactionID=").append(transactionID).append(", doImmediateCommit=") - .append(doImmediateCommit).append("]"); - return builder.toString(); - } + void processCohortRegistryCommand(ActorRef sender, CohortRegistryCommand message) { + cohortRegistry.process(sender, message); } }