import akka.serialization.Serialization;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
/**
*
* @author Thomas Pantelis
*/
-class ShardCommitCoordinator {
+final class ShardCommitCoordinator {
// Interface hook for unit tests to replace or decorate the DOMStoreThreePhaseCommitCohorts.
public interface CohortDecorator {
private final ShardDataTree dataTree;
+ private final DataTreeCohortActorRegistry cohortRegistry = new DataTreeCohortActorRegistry();
+
// We use a LinkedList here to avoid synchronization overhead with concurrent queue impls
// since this should only be accessed on the shard's dispatcher.
private final Queue<CohortEntry> queuedCohortEntries = new LinkedList<>();
private Runnable runOnPendingTransactionsComplete;
- ShardCommitCoordinator(ShardDataTree dataTree,
- long cacheExpiryTimeoutInMillis, int queueCapacity, Logger log, String name) {
+ ShardCommitCoordinator(ShardDataTree dataTree, long cacheExpiryTimeoutInMillis, int queueCapacity, Logger log,
+ String name) {
this.queueCapacity = queueCapacity;
this.log = log;
} else {
cohortCache.remove(cohortEntry.getTransactionID());
- RuntimeException ex = new RuntimeException(
+ final RuntimeException ex = new RuntimeException(
String.format("%s: Could not enqueue transaction %s - the maximum commit queue"+
" capacity %d has been reached.",
name, cohortEntry.getTransactionID(), queueCapacity));
* @param ready the ForwardedReadyTransaction message to process
* @param sender the sender of the message
* @param shard the transaction's shard actor
+ * @param schema
*/
- void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard) {
+ void handleForwardedReadyTransaction(ForwardedReadyTransaction ready, ActorRef sender, Shard shard,
+ SchemaContext schema) {
log.debug("{}: Readying transaction {}, client version {}", name,
ready.getTransactionID(), ready.getTxnClientVersion());
- ShardDataTreeCohort cohort = ready.getTransaction().ready();
- CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), cohort);
+ final ShardDataTreeCohort cohort = ready.getTransaction().ready();
+ final CohortEntry cohortEntry = new CohortEntry(ready.getTransactionID(), cohort, cohortRegistry, schema, ready.getTxnClientVersion());
cohortCache.put(ready.getTransactionID(), cohortEntry);
if(!queueCohortEntry(cohortEntry, sender, shard)) {
* @param sender the sender of the message
* @param shard the transaction's shard actor
*/
- void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard) {
+ void handleBatchedModifications(BatchedModifications batched, ActorRef sender, Shard shard, SchemaContext schema) {
CohortEntry cohortEntry = cohortCache.get(batched.getTransactionID());
if(cohortEntry == null) {
cohortEntry = new CohortEntry(batched.getTransactionID(),
- dataTree.newReadWriteTransaction(batched.getTransactionID(),
- batched.getTransactionChainID()));
+ dataTree.newReadWriteTransaction(batched.getTransactionID(), batched.getTransactionChainID()),
+ cohortRegistry, schema, batched.getVersion());
cohortCache.put(batched.getTransactionID(), cohortEntry);
}
/**
* This method handles {@link ReadyLocalTransaction} message. All transaction modifications have
- * been prepared beforehand by the sender and we just need to drive them through into the dataTree.
+ * been prepared beforehand by the sender and we just need to drive them through into the
+ * dataTree.
*
* @param message the ReadyLocalTransaction message to process
* @param sender the sender of the message
* @param shard the transaction's shard actor
*/
- void handleReadyLocalTransaction(ReadyLocalTransaction message, ActorRef sender, Shard shard) {
+ void handleReadyLocalTransaction(ReadyLocalTransaction message, ActorRef sender, Shard shard,
+ SchemaContext schema) {
final ShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(dataTree, message.getModification(),
message.getTransactionID());
- final CohortEntry cohortEntry = new CohortEntry(message.getTransactionID(), cohort);
+ final CohortEntry cohortEntry = new CohortEntry(message.getTransactionID(), cohort, cohortRegistry, schema,
+ DataStoreVersions.CURRENT_VERSION);
cohortCache.put(message.getTransactionID(), cohortEntry);
cohortEntry.setDoImmediateCommit(message.isDoCommitOnReady());
}
}
+ Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
+ final int maxModificationsPerBatch) {
+ CohortEntry cohortEntry = getAndRemoveCohortEntry(from.getTransactionID());
+ if(cohortEntry == null || cohortEntry.getTransaction() == null) {
+ return Collections.singletonList(from);
+ }
+
+ cohortEntry.applyModifications(from.getModifications());
+
+ final LinkedList<BatchedModifications> newModifications = new LinkedList<>();
+ cohortEntry.getTransaction().getSnapshot().applyToCursor(new AbstractBatchedModificationsCursor() {
+ @Override
+ protected BatchedModifications getModifications() {
+ if(newModifications.isEmpty() ||
+ newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) {
+ newModifications.add(new BatchedModifications(from.getTransactionID(),
+ from.getVersion(), from.getTransactionChainID()));
+ }
+
+ return newModifications.getLast();
+ }
+ });
+
+ BatchedModifications last = newModifications.getLast();
+ last.setDoCommitOnReady(from.isDoCommitOnReady());
+ last.setReady(from.isReady());
+ last.setTotalMessagesSent(newModifications.size());
+ return newModifications;
+ }
+
private void handleCanCommit(CohortEntry cohortEntry) {
String transactionID = cohortEntry.getTransactionID();
doCanCommit(currentCohortEntry);
} else {
if(log.isDebugEnabled()) {
- log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now",
- name, queuedCohortEntries.peek().getTransactionID(), transactionID);
+ log.debug("{}: Tx {} is the next pending canCommit - skipping {} for now", name,
+ queuedCohortEntries.peek() != null ? queuedCohortEntries.peek().getTransactionID() : "???",
+ transactionID);
}
}
}
"Can Commit failed, no detailed cause available.")), cohortEntry.getShard().self());
}
} else {
- // FIXME - use caller's version
cohortEntry.getReplySender().tell(
- canCommit ? CanCommitTransactionReply.yes(DataStoreVersions.CURRENT_VERSION).toSerializable() :
- CanCommitTransactionReply.no(DataStoreVersions.CURRENT_VERSION).toSerializable(), cohortEntry.getShard().self());
+ canCommit ? CanCommitTransactionReply.yes(cohortEntry.getClientVersion()).toSerializable() :
+ CanCommitTransactionReply.no(cohortEntry.getClientVersion()).toSerializable(),
+ cohortEntry.getShard().self());
}
} catch (Exception e) {
log.debug("{}: An exception occurred during canCommit", name, e);
shard.getShardMBean().incrementAbortTransactionsCount();
if(sender != null) {
- sender.tell(new AbortTransactionReply().toSerializable(), self);
+ sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
}
} catch (Exception e) {
log.error("{}: An exception happened during abort", name, e);
return;
}
+ List<CohortEntry> cohortEntries = getAndClearPendingCohortEntries();
+
+ log.debug("{}: Aborting {} pending queued transactions", name, cohortEntries.size());
+
+ for(CohortEntry cohortEntry: cohortEntries) {
+ if(cohortEntry.getReplySender() != null) {
+ cohortEntry.getReplySender().tell(new Failure(new RuntimeException(reason)), shard.self());
+ }
+ }
+ }
+
+ private List<CohortEntry> getAndClearPendingCohortEntries() {
List<CohortEntry> cohortEntries = new ArrayList<>();
if(currentCohortEntry != null) {
cohortEntries.add(currentCohortEntry);
+ cohortCache.remove(currentCohortEntry.getTransactionID());
currentCohortEntry = null;
}
- cohortEntries.addAll(queuedCohortEntries);
+ for(CohortEntry cohortEntry: queuedCohortEntries) {
+ cohortEntries.add(cohortEntry);
+ cohortCache.remove(cohortEntry.getTransactionID());
+ }
+
queuedCohortEntries.clear();
+ return cohortEntries;
+ }
+ Collection<Object> convertPendingTransactionsToMessages(final int maxModificationsPerBatch) {
+ if(currentCohortEntry == null && queuedCohortEntries.isEmpty()) {
+ return Collections.emptyList();
+ }
+
+ Collection<Object> messages = new ArrayList<>();
+ List<CohortEntry> cohortEntries = getAndClearPendingCohortEntries();
for(CohortEntry cohortEntry: cohortEntries) {
- if(cohortEntry.getReplySender() != null) {
- cohortEntry.getReplySender().tell(new Failure(new RuntimeException(reason)), shard.self());
+ if(cohortEntry.isExpired(cacheExpiryTimeoutInMillis) || cohortEntry.isAborted()) {
+ continue;
+ }
+
+ final LinkedList<BatchedModifications> newModifications = new LinkedList<>();
+ cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() {
+ @Override
+ protected BatchedModifications getModifications() {
+ if(newModifications.isEmpty() ||
+ newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) {
+ newModifications.add(new BatchedModifications(cohortEntry.getTransactionID(),
+ cohortEntry.getClientVersion(), ""));
+ }
+
+ return newModifications.getLast();
+ }
+ });
+
+ if(!newModifications.isEmpty()) {
+ BatchedModifications last = newModifications.getLast();
+ last.setDoCommitOnReady(cohortEntry.isDoImmediateCommit());
+ last.setReady(true);
+ last.setTotalMessagesSent(newModifications.size());
+ messages.addAll(newModifications);
+
+ if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == CohortEntry.State.CAN_COMMITTED) {
+ messages.add(new CanCommitTransaction(cohortEntry.getTransactionID(),
+ cohortEntry.getClientVersion()));
+ }
+
+ if(!cohortEntry.isDoImmediateCommit() && cohortEntry.getState() == CohortEntry.State.PRE_COMMITTED) {
+ messages.add(new CommitTransaction(cohortEntry.getTransactionID(),
+ cohortEntry.getClientVersion()));
+ }
}
}
+
+ return messages;
}
/**
private void maybeProcessNextCohortEntry() {
// Check if there's a next cohort entry waiting in the queue and if it is ready to commit. Also
// clean out expired entries.
- Iterator<CohortEntry> iter = queuedCohortEntries.iterator();
+ final Iterator<CohortEntry> iter = queuedCohortEntries.iterator();
while(iter.hasNext()) {
- CohortEntry next = iter.next();
+ final CohortEntry next = iter.next();
if(next.isReadyToCommit()) {
if(currentCohortEntry == null) {
if(log.isDebugEnabled()) {
this.cohortDecorator = cohortDecorator;
}
- static class CohortEntry {
- private final String transactionID;
- private ShardDataTreeCohort cohort;
- private final ReadWriteShardDataTreeTransaction transaction;
- private RuntimeException lastBatchedModificationsException;
- private ActorRef replySender;
- private Shard shard;
- private boolean doImmediateCommit;
- private final Stopwatch lastAccessTimer = Stopwatch.createStarted();
- private int totalBatchedModificationsReceived;
- private boolean aborted;
-
- CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) {
- this.transaction = Preconditions.checkNotNull(transaction);
- this.transactionID = transactionID;
- }
-
- CohortEntry(String transactionID, ShardDataTreeCohort cohort) {
- this.transactionID = transactionID;
- this.cohort = cohort;
- this.transaction = null;
- }
-
- void updateLastAccessTime() {
- lastAccessTimer.reset();
- lastAccessTimer.start();
- }
-
- String getTransactionID() {
- return transactionID;
- }
-
- DataTreeCandidate getCandidate() {
- return cohort.getCandidate();
- }
-
- int getTotalBatchedModificationsReceived() {
- return totalBatchedModificationsReceived;
- }
-
- RuntimeException getLastBatchedModificationsException() {
- return lastBatchedModificationsException;
- }
-
- void applyModifications(Iterable<Modification> modifications) {
- totalBatchedModificationsReceived++;
- if(lastBatchedModificationsException == null) {
- for (Modification modification : modifications) {
- try {
- modification.apply(transaction.getSnapshot());
- } catch (RuntimeException e) {
- lastBatchedModificationsException = e;
- throw e;
- }
- }
- }
- }
-
- boolean canCommit() throws InterruptedException, ExecutionException {
- // We block on the future here (and also preCommit(), commit(), abort()) so we don't have to worry
- // about possibly accessing our state on a different thread outside of our dispatcher.
- // TODO: the ShardDataTreeCohort returns immediate Futures anyway which begs the question - why
- // bother even returning Futures from ShardDataTreeCohort if we have to treat them synchronously
- // anyway?. The Futures are really a remnant from when we were using the InMemoryDataBroker.
- return cohort.canCommit().get();
- }
-
- void preCommit() throws InterruptedException, ExecutionException {
- cohort.preCommit().get();
- }
-
- void commit() throws InterruptedException, ExecutionException {
- cohort.commit().get();
- }
-
- void abort() throws InterruptedException, ExecutionException {
- aborted = true;
- cohort.abort().get();
- }
-
- void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) {
- Preconditions.checkState(cohort == null, "cohort was already set");
-
- setDoImmediateCommit(doImmediateCommit);
-
- cohort = transaction.ready();
-
- if(cohortDecorator != null) {
- // Call the hook for unit tests.
- cohort = cohortDecorator.decorate(transactionID, cohort);
- }
- }
-
- boolean isReadyToCommit() {
- return replySender != null;
- }
-
- boolean isExpired(long expireTimeInMillis) {
- return lastAccessTimer.elapsed(TimeUnit.MILLISECONDS) >= expireTimeInMillis;
- }
-
- boolean isDoImmediateCommit() {
- return doImmediateCommit;
- }
-
- void setDoImmediateCommit(boolean doImmediateCommit) {
- this.doImmediateCommit = doImmediateCommit;
- }
-
- ActorRef getReplySender() {
- return replySender;
- }
-
- void setReplySender(ActorRef replySender) {
- this.replySender = replySender;
- }
-
- Shard getShard() {
- return shard;
- }
-
- void setShard(Shard shard) {
- this.shard = shard;
- }
-
-
- boolean isAborted() {
- return aborted;
- }
-
- @Override
- public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append("CohortEntry [transactionID=").append(transactionID).append(", doImmediateCommit=")
- .append(doImmediateCommit).append("]");
- return builder.toString();
- }
+ void processCohortRegistryCommand(ActorRef sender, CohortRegistryCommand message) {
+ cohortRegistry.process(sender, message);
}
}