import akka.actor.ActorSelection;
import akka.actor.Cancellable;
import akka.actor.Props;
-import akka.persistence.RecoveryFailure;
import akka.serialization.Serialization;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import java.io.IOException;
+import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.TimeUnit;
LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
- store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType());
+ store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
+ new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
+ new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
datastoreContext.getDataStoreMXBeanType());
}
@Override
- public void onReceiveRecover(final Object message) throws Exception {
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
- message.getClass().toString(), getSender());
- }
-
- if (message instanceof RecoveryFailure){
- LOG.error("{}: Recovery failed because of this cause",
- persistenceId(), ((RecoveryFailure) message).cause());
+ protected void handleRecover(final Object message) {
+ LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
+ getSender());
- // Even though recovery failed, we still need to finish our recovery, eg send the
- // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
- onRecoveryComplete();
- } else {
- super.onReceiveRecover(message);
- if(LOG.isTraceEnabled()) {
- appendEntriesReplyTracker.begin();
- }
+ super.handleRecover(message);
+ if (LOG.isTraceEnabled()) {
+ appendEntriesReplyTracker.begin();
}
}
@Override
- public void onReceiveCommand(final Object message) throws Exception {
+ protected void handleCommand(final Object message) {
MessageTracker.Context context = appendEntriesReplyTracker.received(message);
try {
if (CreateTransaction.isSerializedType(message)) {
handleCreateTransaction(message);
- } else if (BatchedModifications.class.isInstance(message)) {
+ } else if (message instanceof BatchedModifications) {
handleBatchedModifications((BatchedModifications)message);
} else if (message instanceof ForwardedReadyTransaction) {
handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
} else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
messageRetrySupport.onTimerMessage(message);
} else {
- super.onReceiveCommand(message);
+ super.handleCommand(message);
}
} finally {
context.done();
messageRetrySupport.addMessageToRetry(batched, getSender(),
"Could not commit transaction " + batched.getTransactionID());
} else {
- // TODO: what if this is not the first batch and leadership changed in between batched messages?
- // We could check if the commitCoordinator already has a cached entry and forward all the previous
- // batched modifications.
- LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
- leader.forward(batched, getContext());
+ // If this is not the first batch and leadership changed in between batched messages,
+ // we need to reconstruct previous BatchedModifications from the transaction
+ // DataTreeModification, honoring the max batched modification count, and forward all the
+ // previous BatchedModifications to the new leader.
+ Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
+ batched, datastoreContext.getShardBatchedModificationCount());
+
+ LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
+ newModifications.size(), leader);
+
+ for(BatchedModifications bm: newModifications) {
+ leader.forward(bm, getContext());
+ }
}
}
}