import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.slf4j.Logger;
/**
private boolean queueCohortEntry(CohortEntry cohortEntry, ActorRef sender, Shard shard) {
if(queuedCohortEntries.size() < queueCapacity) {
queuedCohortEntries.offer(cohortEntry);
+
+ log.debug("{}: Enqueued transaction {}, queue size {}", name, cohortEntry.getTransactionID(),
+ queuedCohortEntries.size());
+
return true;
} else {
cohortCache.remove(cohortEntry.getTransactionID());
private void doCanCommit(final CohortEntry cohortEntry) {
boolean canCommit = false;
try {
- // We block on the future here so we don't have to worry about possibly accessing our
- // state on a different thread outside of our dispatcher. Also, the data store
- // currently uses a same thread executor anyway.
- canCommit = cohortEntry.getCohort().canCommit().get();
+ canCommit = cohortEntry.canCommit();
log.debug("{}: canCommit for {}: {}", name, cohortEntry.getTransactionID(), canCommit);
// normally fail since we ensure only one concurrent 3-phase commit.
try {
- // We block on the future here so we don't have to worry about possibly accessing our
- // state on a different thread outside of our dispatcher. Also, the data store
- // currently uses a same thread executor anyway.
- cohortEntry.getCohort().preCommit().get();
+ cohortEntry.preCommit();
cohortEntry.getShard().continueCommit(cohortEntry);
return doCommit(cohortEntry);
}
+ void handleAbort(final String transactionID, final ActorRef sender, final Shard shard) {
+ CohortEntry cohortEntry = getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry != null) {
+ // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
+ // aborted during replication in which case we may still commit locally if replication
+ // succeeds.
+ currentTransactionComplete(transactionID, false);
+ } else {
+ cohortEntry = getAndRemoveCohortEntry(transactionID);
+ }
+
+ if(cohortEntry == null) {
+ return;
+ }
+
+ log.debug("{}: Aborting transaction {}", name, transactionID);
+
+ final ActorRef self = shard.getSelf();
+ try {
+ cohortEntry.abort();
+
+ shard.getShardMBean().incrementAbortTransactionsCount();
+
+ if(sender != null) {
+ sender.tell(new AbortTransactionReply().toSerializable(), self);
+ }
+ } catch (Exception e) {
+ log.error("{}: An exception happened during abort", name, e);
+
+ if(sender != null) {
+ sender.tell(new akka.actor.Status.Failure(e), self);
+ }
+ }
+ }
+
/**
* Returns the cohort entry for the Tx commit currently in progress if the given transaction ID
* matches the current entry.
} else if(next.isExpired(cacheExpiryTimeoutInMillis)) {
log.warn("{}: canCommit for transaction {} was not received within {} ms - entry removed from cache",
name, next.getTransactionID(), cacheExpiryTimeoutInMillis);
-
- iter.remove();
- cohortCache.remove(next.getTransactionID());
- } else {
+ } else if(!next.isAborted()) {
break;
}
+
+ iter.remove();
+ cohortCache.remove(next.getTransactionID());
}
}
private boolean doImmediateCommit;
private final Stopwatch lastAccessTimer = Stopwatch.createStarted();
private int totalBatchedModificationsReceived;
+ private boolean aborted;
CohortEntry(String transactionID, ReadWriteShardDataTreeTransaction transaction) {
this.transaction = Preconditions.checkNotNull(transaction);
return transactionID;
}
- ShardDataTreeCohort getCohort() {
- return cohort;
+ DataTreeCandidate getCandidate() {
+ return cohort.getCandidate();
}
int getTotalBatchedModificationsReceived() {
}
}
+ boolean canCommit() throws InterruptedException, ExecutionException {
+ // We block on the future here (and also preCommit(), commit(), abort()) so we don't have to worry
+ // about possibly accessing our state on a different thread outside of our dispatcher.
+ // TODO: the ShardDataTreeCohort returns immediate Futures anyway which begs the question - why
+ // bother even returning Futures from ShardDataTreeCohort if we have to treat them synchronously
+ // anyway?. The Futures are really a remnant from when we were using the InMemoryDataBroker.
+ return cohort.canCommit().get();
+ }
+
+ void preCommit() throws InterruptedException, ExecutionException {
+ cohort.preCommit().get();
+ }
+
+ void commit() throws InterruptedException, ExecutionException {
+ cohort.commit().get();
+ }
+
+ void abort() throws InterruptedException, ExecutionException {
+ aborted = true;
+ cohort.abort().get();
+ }
+
void ready(CohortDecorator cohortDecorator, boolean doImmediateCommit) {
Preconditions.checkState(cohort == null, "cohort was already set");
this.shard = shard;
}
+
+ boolean isAborted() {
+ return aborted;
+ }
+
@Override
public String toString() {
StringBuilder builder = new StringBuilder();