import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
*/
public class Shard extends RaftActor {
- private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
+ protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
@VisibleForTesting
static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
}
- public static Props props(final ShardIdentifier name,
- final Map<String, String> peerAddresses,
- final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
- Preconditions.checkNotNull(name, "name should not be null");
- Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
- Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
- Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
-
+ public static Props props(final ShardIdentifier name, final Map<String, String> peerAddresses,
+ final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
}
if(context.error().isPresent()){
LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
- context.error());
+ context.error());
}
try {
} else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
- changeSupport.onMessage((RegisterChangeListener) message, isLeader());
+ changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
} else if (message instanceof RegisterDataTreeChangeListener) {
- treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
+ treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof PeerAddressResolved) {
}
}
+ private boolean hasLeader() {
+ return getLeaderId() != null;
+ }
+
public int getPendingTxCommitQueueSize() {
return commitCoordinator.getQueueSize();
}
leaderPayloadVersion);
}
- private void onDatastoreContext(DatastoreContext context) {
+ protected void onDatastoreContext(DatastoreContext context) {
datastoreContext = context;
commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
}
void continueCommit(final CohortEntry cohortEntry) throws Exception {
- final DataTreeCandidate candidate = cohortEntry.getCohort().getCandidate();
+ final DataTreeCandidate candidate = cohortEntry.getCandidate();
// If we do not have any followers and we are not using persistence
// or if cohortEntry has no modifications
LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
try {
- // We block on the future here so we don't have to worry about possibly accessing our
- // state on a different thread outside of our dispatcher. Also, the data store
- // currently uses a same thread executor anyway.
- cohortEntry.getCohort().commit().get();
+ cohortEntry.commit();
sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
if(cohortEntry != null) {
try {
- store.applyForeignCandidate(transactionID, cohortEntry.getCohort().getCandidate());
+ store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
} catch (DataValidationFailedException e) {
shardMBean.incrementFailedTransactionsCount();
LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(errMessage, persistenceId())), getSelf());
}
+ protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
+ try {
+ commitCoordinator.handleBatchedModifications(batched, sender, this);
+ } catch (Exception e) {
+ LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
+ batched.getTransactionID(), e);
+ sender.tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
private void handleBatchedModifications(BatchedModifications batched) {
// This message is sent to prepare the modifications transaction directly on the Shard as an
// optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
if(isLeader()) {
failIfIsolatedLeader(getSender());
- try {
- commitCoordinator.handleBatchedModifications(batched, getSender(), this);
- } catch (Exception e) {
- LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
- batched.getTransactionID(), e);
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
- }
+ handleBatchedModificationsLocal(batched, getSender());
} else {
ActorSelection leader = getLeader();
if(leader != null) {
}
private boolean failIfIsolatedLeader(ActorRef sender) {
- if(getRaftState() == RaftState.IsolatedLeader) {
+ if(isIsolatedLeader()) {
sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
"Shard %s was the leader but has lost contact with all of its followers. Either all" +
" other follower nodes are down or this node is isolated by a network partition.",
return false;
}
+ protected boolean isIsolatedLeader() {
+ return getRaftState() == RaftState.IsolatedLeader;
+ }
+
private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
if (isLeader()) {
failIfIsolatedLeader(getSender());
}
void doAbortTransaction(final String transactionID, final ActorRef sender) {
- final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
- if(cohortEntry != null) {
- LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
-
- // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
- // aborted during replication in which case we may still commit locally if replication
- // succeeds.
- commitCoordinator.currentTransactionComplete(transactionID, false);
-
- final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
- final ActorRef self = getSelf();
-
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void v) {
- shardMBean.incrementAbortTransactionsCount();
-
- if(sender != null) {
- sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
- }
- }
-
- @Override
- public void onFailure(final Throwable t) {
- LOG.error("{}: An exception happened during abort", persistenceId(), t);
-
- if(sender != null) {
- sender.tell(new akka.actor.Status.Failure(t), self);
- }
- }
- });
- }
+ commitCoordinator.handleAbort(transactionID, sender, this);
}
private void handleCreateTransaction(final Object message) {
@Override
protected void onStateChanged() {
boolean isLeader = isLeader();
- changeSupport.onLeadershipChange(isLeader);
- treeChangeSupport.onLeadershipChange(isLeader);
+ boolean hasLeader = hasLeader();
+ changeSupport.onLeadershipChange(isLeader, hasLeader);
+ treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
// If this actor is no longer the leader close all the transaction chains
if (!isLeader) {
return commitCoordinator;
}
+ public DatastoreContext getDatastoreContext() {
+ return datastoreContext;
+ }
- private static class ShardCreator implements Creator<Shard> {
-
+ protected abstract static class AbstractShardCreator implements Creator<Shard> {
private static final long serialVersionUID = 1L;
- final ShardIdentifier name;
- final Map<String, String> peerAddresses;
- final DatastoreContext datastoreContext;
- final SchemaContext schemaContext;
+ protected final ShardIdentifier name;
+ protected final Map<String, String> peerAddresses;
+ protected final DatastoreContext datastoreContext;
+ protected final SchemaContext schemaContext;
+
+ protected AbstractShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
+ final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
+ this.name = Preconditions.checkNotNull(name, "name should not be null");
+ this.peerAddresses = Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
+ this.datastoreContext = Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
+ this.schemaContext = Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
+ }
+ }
+
+ private static class ShardCreator extends AbstractShardCreator {
+ private static final long serialVersionUID = 1L;
ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
- this.name = name;
- this.peerAddresses = peerAddresses;
- this.datastoreContext = datastoreContext;
- this.schemaContext = schemaContext;
+ super(name, peerAddresses, datastoreContext, schemaContext);
}
@Override