import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
-import akka.persistence.UntypedPersistentActor;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
+import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
-import org.opendaylight.controller.cluster.raft.behaviors.Candidate;
+import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
-import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
-import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
* <li> when a snapshot should be saved </li>
* </ul>
*/
-public abstract class RaftActor extends UntypedPersistentActor {
+public abstract class RaftActor extends AbstractUntypedPersistentActor {
protected final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- protected RaftActorContext context;
+ private final RaftActorContext context;
/**
* The in-memory journal
private volatile boolean hasSnapshotCaptureInitiated = false;
+ private Stopwatch recoveryTimer;
+
+ private int currentRecoveryBatchCount;
+
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
}
LOG);
}
- @Override public void onReceiveRecover(Object message) {
- if (message instanceof SnapshotOffer) {
- LOG.info("SnapshotOffer called..");
- SnapshotOffer offer = (SnapshotOffer) message;
- Snapshot snapshot = (Snapshot) offer.snapshot();
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = new Stopwatch();
+ recoveryTimer.start();
+ }
+ }
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
- replicatedLog = new ReplicatedLogImpl(snapshot);
+ @Override
+ public void preStart() throws Exception {
+ LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
+ context.getConfigParams().getJournalRecoveryLogBatchSize());
- context.setReplicatedLog(replicatedLog);
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
+ super.preStart();
+ }
- LOG.info("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
- replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size()
- );
+ @Override
+ public void handleRecover(Object message) {
+ if(persistence().isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ onRecoveredApplyLogEntries((ApplyLogEntries) message);
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ }
+ } else {
+ if (message instanceof RecoveryCompleted) {
+ // Delete all the messages from the akka journal so that we do not end up with consistency issues
+ // Note I am not using the dataPersistenceProvider and directly using the akka api here
+ deleteMessages(lastSequenceNr());
- // Apply the snapshot to the actors state
- applySnapshot(ByteString.copyFrom(snapshot.getState()));
+ // Delete all the akka snapshots as they will not be needed
+ deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
- } else if (message instanceof ReplicatedLogEntry) {
- ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
- LOG.info("Received ReplicatedLogEntry for recovery:{}", logEntry.getIndex());
- replicatedLog.append(logEntry);
+ onRecoveryComplete();
+ currentBehavior = new Follower(context);
+ onStateChanged();
+ }
+ }
+ }
- } else if (message instanceof ApplyLogEntries) {
- ApplyLogEntries ale = (ApplyLogEntries) message;
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("SnapshotOffer called..");
+ }
- LOG.info("Received ApplyLogEntries for recovery, applying to state:{} to {}",
- context.getLastApplied() + 1, ale.getToIndex());
+ initRecoveryTimer();
- for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
- applyState(null, "recovery", replicatedLog.get(i).getData());
- }
- context.setLastApplied(ale.getToIndex());
- context.setCommitIndex(ale.getToIndex());
+ Snapshot snapshot = (Snapshot) offer.snapshot();
+
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+ replicatedLog = new ReplicatedLogImpl(snapshot);
+
+ context.setReplicatedLog(replicatedLog);
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+ Stopwatch timer = new Stopwatch();
+ timer.start();
+
+ // Apply the snapshot to the actors state
+ applyRecoverySnapshot(ByteString.copyFrom(snapshot.getState()));
+
+ timer.stop();
+ LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
+ replicatedLog.size(), persistenceId(), timer.toString(),
+ replicatedLog.snapshotIndex, replicatedLog.snapshotTerm);
+ }
+
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ReplicatedLogEntry for recovery: {}", logEntry.getIndex());
+ }
- } else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ replicatedLog.append(logEntry);
+ }
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
+ private void onRecoveredApplyLogEntries(ApplyLogEntries ale) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ApplyLogEntries for recovery, applying to state: {} to {}",
+ context.getLastApplied() + 1, ale.getToIndex());
+ }
- } else if (message instanceof RecoveryCompleted) {
- LOG.info(
- "RecoveryCompleted - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
- replicatedLog.snapshotTerm, replicatedLog.size());
- currentBehavior = switchBehavior(RaftState.Follower);
- onStateChanged();
+ for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
+ batchRecoveredLogEntry(replicatedLog.get(i));
}
+
+ context.setLastApplied(ale.getToIndex());
+ context.setCommitIndex(ale.getToIndex());
}
- @Override public void onReceiveCommand(Object message) {
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ startLogRecoveryBatch(batchSize);
+ }
+
+ appendRecoveredLogEntry(logEntry.getData());
+
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
+ }
+ }
+
+ private void endCurrentLogRecoveryBatch() {
+ applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ onRecoveryComplete();
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ LOG.info(
+ "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + persistenceId() +
+ " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
+ "journal-size={}",
+ replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
+ replicatedLog.snapshotTerm, replicatedLog.size());
+
+ currentBehavior = new Follower(context);
+ onStateChanged();
+ }
+
+ @Override public void handleCommand(Object message) {
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
} else if (message instanceof ApplyLogEntries){
ApplyLogEntries ale = (ApplyLogEntries) message;
- LOG.info("Persisting ApplyLogEntries with index={}", ale.getToIndex());
- persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Persisting ApplyLogEntries with index={}", ale.getToIndex());
+ }
+ persistence().persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
@Override
public void apply(ApplyLogEntries param) throws Exception {
}
SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
LOG.info("SaveSnapshotSuccess received for snapshot");
- context.getReplicatedLog().snapshotCommit();
+ long sequenceNumber = success.metadata().sequenceNr();
- // TODO: Not sure if we want to be this aggressive with trimming stuff
- trimPersistentData(success.metadata().sequenceNr());
+ commitSnapshot(sequenceNumber);
} else if (message instanceof SaveSnapshotFailure) {
SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
context.getReplicatedLog().getSnapshotTerm(),
context.getReplicatedLog().size());
- } else if (message instanceof AddRaftPeer){
-
- // FIXME : Do not add raft peers like this.
- // When adding a new Peer we have to ensure that the a majority of
- // the peers know about the new Peer. Doing it this way may cause
- // a situation where multiple Leaders may emerge
- AddRaftPeer arp = (AddRaftPeer)message;
- context.addToPeers(arp.getName(), arp.getAddress());
-
- } else if (message instanceof RemoveRaftPeer){
-
- RemoveRaftPeer rrp = (RemoveRaftPeer)message;
- context.removePeer(rrp.getName());
-
} else if (message instanceof CaptureSnapshot) {
LOG.info("CaptureSnapshot received by actor");
CaptureSnapshot cs = (CaptureSnapshot)message;
if (!(message instanceof AppendEntriesMessages.AppendEntries)
&& !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
if(LOG.isDebugEnabled()) {
- LOG.debug("onReceiveCommand: message:" + message.getClass());
+ LOG.debug("onReceiveCommand: message: {}", message.getClass());
}
}
- RaftState state =
- currentBehavior.handleMessage(getSender(), message);
RaftActorBehavior oldBehavior = currentBehavior;
- currentBehavior = switchBehavior(state);
+ currentBehavior = currentBehavior.handleMessage(getSender(), message);
+
if(oldBehavior != currentBehavior){
onStateChanged();
}
}
public java.util.Set<String> getPeers() {
+
return context.getPeerAddresses().keySet();
}
return context.getLastApplied();
}
+ protected RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
context.setPeerAddress(peerId, peerAddress);
}
+ protected void commitSnapshot(long sequenceNumber) {
+ context.getReplicatedLog().snapshotCommit();
+ // TODO: Not sure if we want to be this aggressive with trimming stuff
+ trimPersistentData(sequenceNumber);
+ }
/**
* The applyState method will be called by the RaftActor when some data
protected abstract void applyState(ActorRef clientActor, String identifier,
Object data);
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ protected abstract void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
+ *
+ * @param data the state data
+ */
+ protected abstract void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshot A snapshot of the state of the actor
+ */
+ protected abstract void applyRecoverySnapshot(ByteString snapshot);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
+ */
+ protected abstract void applyCurrentLogRecoveryBatch();
+
+ /**
+ * This method is called when recovery is complete.
+ */
+ protected abstract void onRecoveryComplete();
+
/**
* This method will be called by the RaftActor when a snapshot needs to be
* created. The derived actor should respond with its current state.
protected abstract void createSnapshot();
/**
- * This method will be called by the RaftActor during recovery to
- * reconstruct the state of the actor.
- * <p/>
- * This method may also be called at any other point during normal
+ * This method can be called at any other point during normal
* operations when the derived actor is out of sync with it's peers
* and the only way to bring it in sync is by applying a snapshot
*
*/
protected abstract void onStateChanged();
- protected void onLeaderChanged(String oldLeader, String newLeader){};
-
- private RaftActorBehavior switchBehavior(RaftState state) {
- if (currentBehavior != null) {
- if (currentBehavior.state() == state) {
- return currentBehavior;
- }
- LOG.info("Switching from state " + currentBehavior.state() + " to "
- + state);
-
- try {
- currentBehavior.close();
- } catch (Exception e) {
- LOG.error(e,
- "Failed to close behavior : " + currentBehavior.state());
- }
-
- } else {
- LOG.info("Switching behavior to " + state);
- }
- RaftActorBehavior behavior = null;
- if (state == RaftState.Candidate) {
- behavior = new Candidate(context);
- } else if (state == RaftState.Follower) {
- behavior = new Follower(context);
- } else {
- behavior = new Leader(context);
- }
-
-
+ protected abstract DataPersistenceProvider persistence();
- return behavior;
- }
+ protected void onLeaderChanged(String oldLeader, String newLeader){};
private void trimPersistentData(long sequenceNumber) {
// Trim akka snapshots
// FIXME : Not sure how exactly the SnapshotSelectionCriteria is applied
// For now guessing that it is ANDed.
- deleteSnapshots(new SnapshotSelectionCriteria(
+ persistence().deleteSnapshots(new SnapshotSelectionCriteria(
sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
// Trim akka journal
- deleteMessages(sequenceNumber);
+ persistence().deleteMessages(sequenceNumber);
}
private String getLeaderAddress(){
}
String peerAddress = context.getPeerAddress(leaderId);
if(LOG.isDebugEnabled()) {
- LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
- + peerAddress);
+ LOG.debug("getLeaderAddress leaderId = {} peerAddress = {}",
+ leaderId, peerAddress);
}
return peerAddress;
captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
- saveSnapshot(sn);
+ persistence().saveSnapshot(sn);
LOG.info("Persisting of snapshot done:{}", sn.getLogMessage());
//be greedy and remove entries from in-mem journal which are in the snapshot
// and update snapshotIndex and snapshotTerm without waiting for the success,
- context.getReplicatedLog().snapshotPreCommit(stateInBytes,
+ context.getReplicatedLog().snapshotPreCommit(
captureSnapshot.getLastAppliedIndex(),
captureSnapshot.getLastAppliedTerm());
"and term:{}", captureSnapshot.getLastAppliedIndex(),
captureSnapshot.getLastAppliedTerm());
+ if (isLeader() && captureSnapshot.isInstallSnapshotInitiated()) {
+ // this would be call straight to the leader and won't initiate in serialization
+ currentBehavior.handleMessage(getSelf(), new SendInstallSnapshot(stateInBytes));
+ }
+
captureSnapshot = null;
hasSnapshotCaptureInitiated = false;
}
-
private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
public ReplicatedLogImpl(Snapshot snapshot) {
- super(ByteString.copyFrom(snapshot.getState()),
- snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
+ super(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
snapshot.getUnAppliedEntries());
}
// FIXME: Maybe this should be done after the command is saved
journal.subList(adjustedIndex , journal.size()).clear();
- persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+ persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
@Override public void apply(DeleteEntries param)
throws Exception {
public void appendAndPersist(final ActorRef clientActor,
final String identifier,
final ReplicatedLogEntry replicatedLogEntry) {
- context.getLogger().debug(
- "Append log entry and persist {} ", replicatedLogEntry);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Append log entry and persist {} ", replicatedLogEntry);
+ }
+
// FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
journal.add(replicatedLogEntry);
// persist call and the execution(s) of the associated event
// handler. This also holds for multiple persist calls in context
// of a single command.
- persist(replicatedLogEntry,
+ persistence().persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
+ @Override
public void apply(ReplicatedLogEntry evt) throws Exception {
// when a snaphsot is being taken, captureSnapshot != null
if (hasSnapshotCaptureInitiated == false &&
}
- private static class DeleteEntries implements Serializable {
+ static class DeleteEntries implements Serializable {
+ private static final long serialVersionUID = 1L;
private final int fromIndex;
-
public DeleteEntries(int fromIndex) {
this.fromIndex = fromIndex;
}
private long currentTerm = 0;
private String votedFor = null;
+ @Override
public long getCurrentTerm() {
return currentTerm;
}
+ @Override
public String getVotedFor() {
return votedFor;
}
public void updateAndPersist(long currentTerm, String votedFor){
update(currentTerm, votedFor);
// FIXME : Maybe first persist then update the state
- persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
+ persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
@Override public void apply(UpdateElectionTerm param)
throws Exception {
}
}
- private static class UpdateElectionTerm implements Serializable {
+ static class UpdateElectionTerm implements Serializable {
+ private static final long serialVersionUID = 1L;
private final long currentTerm;
private final String votedFor;
}
}
+ protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
+
+ public NonPersistentRaftDataProvider(){
+
+ }
+
+ /**
+ * The way snapshotting works is,
+ * <ol>
+ * <li> RaftActor calls createSnapshot on the Shard
+ * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+ * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
+ * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
+ * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
+ * in SaveSnapshotSuccess.
+ * </ol>
+ * @param o
+ */
+ @Override
+ public void saveSnapshot(Object o) {
+ // Make saving Snapshot successful
+ commitSnapshot(-1L);
+ }
+ }
+
+ @VisibleForTesting
+ void setCurrentBehavior(AbstractRaftActorBehavior behavior) {
+ currentBehavior = behavior;
+ }
+
+ protected RaftActorBehavior getCurrentBehavior() {
+ return currentBehavior;
+ }
}