Bug 6587: Retain state when transitioning between Leader and IsolatedLeader
[controller.git] / opendaylight / md-sal / sal-akka-raft / src / main / java / org / opendaylight / controller / cluster / raft / behaviors / AbstractLeader.java
index 0927b0a5313dd4fc32393cd1b51f44c5007dcb7b..b241e0a67a4f81118b1592e25bc1011e336318dc 100644 (file)
@@ -14,10 +14,9 @@ import akka.actor.Cancellable;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
 import com.google.protobuf.ByteString;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -26,16 +25,18 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
+import java.util.Queue;
+import javax.annotation.Nullable;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
 import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
 import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
+import org.opendaylight.controller.cluster.raft.PeerInfo;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import org.opendaylight.controller.cluster.raft.VotingState;
 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
@@ -45,6 +46,8 @@ import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
+import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
+import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -80,56 +83,54 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     // This would be passed as the hash code of the last chunk when sending the first chunk
     public static final int INITIAL_LAST_CHUNK_HASH_CODE = -1;
 
-    private final Map<String, FollowerLogInformation> followerToLog;
+    private final Map<String, FollowerLogInformation> followerToLog = new HashMap<>();
     private final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
 
-    private Cancellable heartbeatSchedule = null;
-
-    private final Collection<ClientRequestTracker> trackerList = new LinkedList<>();
-
-    protected final int minReplicationCount;
-
-    protected final int minIsolatedLeaderPeerCount;
-
-    private Optional<ByteString> snapshot;
-
-    private long replicatedToAllIndex = -1;
-
-    public AbstractLeader(RaftActorContext context) {
-        super(context);
-
-        final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
-        for (String followerId : context.getPeerAddresses().keySet()) {
-            FollowerLogInformation followerLogInformation =
-                new FollowerLogInformationImpl(followerId,
-                    context.getCommitIndex(), -1,
-                    context.getConfigParams().getElectionTimeOutInterval());
+    /**
+     * Lookup table for request contexts based on journal index. We could use a {@link Map} here, but we really
+     * expect the entries to be modified in sequence, hence we open-code the lookup.
+     *
+     * TODO: Evaluate the use of ArrayDeque(), as that has lower memory overhead. Non-head removals are more costly,
+     *       but we already expect those to be far from frequent.
+     */
+    private final Queue<ClientRequestTracker> trackers = new LinkedList<>();
 
-            ftlBuilder.put(followerId, followerLogInformation);
+    private Cancellable heartbeatSchedule = null;
+    private Optional<SnapshotHolder> snapshot = Optional.absent();;
+    private int minReplicationCount;
+
+    protected AbstractLeader(RaftActorContext context, RaftState state,
+            @Nullable AbstractLeader initializeFromLeader) {
+        super(context, state);
+
+        if(initializeFromLeader != null) {
+            followerToLog.putAll(initializeFromLeader.followerToLog);
+            mapFollowerToSnapshot.putAll(initializeFromLeader.mapFollowerToSnapshot);
+            snapshot = initializeFromLeader.snapshot;
+            trackers.addAll(initializeFromLeader.trackers);
+        } else {
+            for(PeerInfo peerInfo: context.getPeers()) {
+                FollowerLogInformation followerLogInformation = new FollowerLogInformationImpl(peerInfo, -1, context);
+                followerToLog.put(peerInfo.getId(), followerLogInformation);
+            }
         }
-        followerToLog = ftlBuilder.build();
-
-        leaderId = context.getId();
 
-        LOG.debug("{}: Election: Leader has following peers: {}", context.getId(), getFollowerIds());
+        LOG.debug("{}: Election: Leader has following peers: {}", logName(), getFollowerIds());
 
-        minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
-
-        // the isolated Leader peer count will be 1 less than the majority vote count.
-        // this is because the vote count has the self vote counted in it
-        // for e.g
-        // 0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
-        // 2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
-        // 4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
-        minIsolatedLeaderPeerCount = minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
-
-        snapshot = Optional.absent();
+        updateMinReplicaCount();
 
         // Immediately schedule a heartbeat
         // Upon election: send initial empty AppendEntries RPCs
         // (heartbeat) to each server; repeat during idle periods to
         // prevent election timeouts (§5.2)
-        scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
+        sendAppendEntries(0, false);
+
+        // It is important to schedule this heartbeat here
+        scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
+    }
+
+    protected AbstractLeader(RaftActorContext context, RaftState state) {
+        this(context, state, null);
     }
 
     /**
@@ -137,26 +138,61 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
      *
      * @return Collection of follower IDs
      */
-    protected final Collection<String> getFollowerIds() {
+    public final Collection<String> getFollowerIds() {
         return followerToLog.keySet();
     }
 
-    private Optional<ByteString> getSnapshot() {
-        return snapshot;
+    public void addFollower(String followerId) {
+        FollowerLogInformation followerLogInformation = new FollowerLogInformationImpl(
+                context.getPeerInfo(followerId), -1, context);
+        followerToLog.put(followerId, followerLogInformation);
+
+        if(heartbeatSchedule == null) {
+            scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
+        }
+    }
+
+    public void removeFollower(String followerId) {
+        followerToLog.remove(followerId);
+        mapFollowerToSnapshot.remove(followerId);
+    }
+
+    public void updateMinReplicaCount() {
+        int numVoting = 0;
+        for(PeerInfo peer: context.getPeers()) {
+            if(peer.isVoting()) {
+                numVoting++;
+            }
+        }
+
+        minReplicationCount = getMajorityVoteCount(numVoting);
+    }
+
+    protected int getMinIsolatedLeaderPeerCount(){
+      //the isolated Leader peer count will be 1 less than the majority vote count.
+        //this is because the vote count has the self vote counted in it
+        //for e.g
+        //0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
+        //2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
+        //4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
+
+        return minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
     }
 
     @VisibleForTesting
-    void setSnapshot(Optional<ByteString> snapshot) {
-        this.snapshot = snapshot;
+    void setSnapshot(@Nullable Snapshot snapshot) {
+        if(snapshot != null) {
+            this.snapshot = Optional.of(new SnapshotHolder(snapshot));
+        } else {
+            this.snapshot = Optional.absent();
+        }
     }
 
     @Override
     protected RaftActorBehavior handleAppendEntries(ActorRef sender,
         AppendEntries appendEntries) {
 
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
-        }
+        LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
 
         return this;
     }
@@ -165,10 +201,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
         AppendEntriesReply appendEntriesReply) {
 
-        if(! appendEntriesReply.isSuccess()) {
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("{}: handleAppendEntriesReply: {}", context.getId(), appendEntriesReply);
-            }
+        if(LOG.isTraceEnabled()) {
+            LOG.trace("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
         }
 
         // Update the FollowerLogInformation
@@ -177,81 +211,182 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             followerToLog.get(followerId);
 
         if(followerLogInformation == null){
-            LOG.error("{}: handleAppendEntriesReply - unknown follower {}", context.getId(), followerId);
+            LOG.error("{}: handleAppendEntriesReply - unknown follower {}", logName(), followerId);
             return this;
         }
 
-        followerLogInformation.markFollowerActive();
+        if(followerLogInformation.timeSinceLastActivity() >
+                context.getConfigParams().getElectionTimeOutInterval().toMillis()) {
+            LOG.warn("{} : handleAppendEntriesReply delayed beyond election timeout, " +
+                            "appendEntriesReply : {}, timeSinceLastActivity : {}, lastApplied : {}, commitIndex : {}",
+                    logName(), appendEntriesReply, followerLogInformation.timeSinceLastActivity(),
+                    context.getLastApplied(), context.getCommitIndex());
+        }
 
-        if (appendEntriesReply.isSuccess()) {
-            followerLogInformation
-                .setMatchIndex(appendEntriesReply.getLogLastIndex());
-            followerLogInformation
-                .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
+        followerLogInformation.markFollowerActive();
+        followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
+        followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
+
+        boolean updated = false;
+        if(appendEntriesReply.getLogLastIndex() > context.getReplicatedLog().lastIndex()) {
+            // The follower's log is actually ahead of the leader's log. Normally this doesn't happen
+            // in raft as a node cannot become leader if it's log is behind another's. However, the
+            // non-voting semantics deviate a bit from raft. Only voting members participate in
+            // elections and can become leader so it's possible for a non-voting follower to be ahead
+            // of the leader. This can happen if persistence is disabled and all voting members are
+            // restarted. In this case, the voting leader will start out with an empty log however
+            // the non-voting followers still retain the previous data in memory. On the first
+            // AppendEntries, the non-voting follower returns a successful reply b/c the prevLogIndex
+            // sent by the leader is -1 and thus the integrity checks pass. However the follower's returned
+            // lastLogIndex may be higher in which case we want to reset the follower by installing a
+            // snapshot. It's also possible that the follower's last log index is behind the leader's.
+            // However in this case the log terms won't match and the logs will conflict - this is handled
+            // elsewhere.
+            LOG.debug("{}: handleAppendEntriesReply: follower {} lastIndex {} is ahead of our lastIndex {} - forcing install snaphot",
+                    logName(), followerLogInformation.getId(), appendEntriesReply.getLogLastIndex(),
+                    context.getReplicatedLog().lastIndex());
+
+            followerLogInformation.setMatchIndex(-1);
+            followerLogInformation.setNextIndex(-1);
+
+            initiateCaptureSnapshot(followerId);
+            updated = true;
+        } else if (appendEntriesReply.isSuccess()) {
+            updated = updateFollowerLogInformation(followerLogInformation, appendEntriesReply);
         } else {
+            LOG.debug("{}: handleAppendEntriesReply: received unsuccessful reply: {}", logName(), appendEntriesReply);
+
+            long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
+            long followersLastLogTerm = getLogEntryTerm(followerLastLogIndex);
+            if(appendEntriesReply.isForceInstallSnapshot()) {
+                // Reset the followers match and next index. This is to signal that this follower has nothing
+                // in common with this Leader and so would require a snapshot to be installed
+                followerLogInformation.setMatchIndex(-1);
+                followerLogInformation.setNextIndex(-1);
+
+                // Force initiate a snapshot capture
+                initiateCaptureSnapshot(followerId);
+            } else if(followerLastLogIndex < 0 || (followersLastLogTerm >= 0 &&
+                    followersLastLogTerm == appendEntriesReply.getLogLastTerm())) {
+                // The follower's log is empty or the last entry is present in the leader's journal
+                // and the terms match so the follower is just behind the leader's journal from
+                // the last snapshot, if any. We'll catch up the follower quickly by starting at the
+                // follower's last log index.
+
+                updated = updateFollowerLogInformation(followerLogInformation, appendEntriesReply);
+            } else {
+                // The follower's log conflicts with leader's log so decrement follower's next index by 1
+                // in an attempt to find where the logs match.
 
-            // TODO: When we find that the follower is out of sync with the
-            // Leader we simply decrement that followers next index by 1.
-            // Would it be possible to do better than this? The RAFT spec
-            // does not explicitly deal with it but may be something for us to
-            // think about
+                LOG.debug("{}: follower's last log term {} conflicts with the leader's {} - dec next index",
+                        logName(), appendEntriesReply.getLogLastTerm(), followersLastLogTerm);
 
-            followerLogInformation.decrNextIndex();
+                followerLogInformation.decrNextIndex();
+            }
         }
 
         // Now figure out if this reply warrants a change in the commitIndex
         // If there exists an N such that N > commitIndex, a majority
         // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
         // set commitIndex = N (§5.3, §5.4).
+        if(LOG.isTraceEnabled()) {
+            LOG.trace("{}: handleAppendEntriesReply from {}: commitIndex: {}, lastAppliedIndex: {}, currentTerm: {}",
+                    logName(), followerId, context.getCommitIndex(), context.getLastApplied(), currentTerm());
+        }
+
         for (long N = context.getCommitIndex() + 1; ; N++) {
             int replicatedCount = 1;
 
+            LOG.trace("{}: checking Nth index {}", logName(), N);
             for (FollowerLogInformation info : followerToLog.values()) {
-                if (info.getMatchIndex() >= N) {
+                final PeerInfo peerInfo = context.getPeerInfo(info.getId());
+                if(info.getMatchIndex() >= N && peerInfo != null && peerInfo.isVoting()) {
                     replicatedCount++;
+                } else if(LOG.isTraceEnabled()) {
+                    LOG.trace("{}: Not counting follower {} - matchIndex: {}, {}", logName(), info.getId(),
+                            info.getMatchIndex(), peerInfo);
                 }
             }
 
+            if(LOG.isTraceEnabled()) {
+                LOG.trace("{}: replicatedCount {}, minReplicationCount: {}", logName(), replicatedCount, minReplicationCount);
+            }
+
             if (replicatedCount >= minReplicationCount) {
                 ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(N);
-                if (replicatedLogEntry != null &&
-                    replicatedLogEntry.getTerm() == currentTerm()) {
+                if (replicatedLogEntry == null) {
+                    LOG.debug("{}: ReplicatedLogEntry not found for index {} - snapshotIndex: {}, journal size: {}",
+                            logName(), N, context.getReplicatedLog().getSnapshotIndex(), context.getReplicatedLog().size());
+                    break;
+                }
+
+                // Don't update the commit index if the log entry is from a previous term, as per §5.4.1:
+                // "Raft never commits log entries from previous terms by counting replicas".
+                // However we keep looping so we can make progress when new entries in the current term
+                // reach consensus, as per §5.4.1: "once an entry from the current term is committed by
+                // counting replicas, then all prior entries are committed indirectly".
+                if (replicatedLogEntry.getTerm() == currentTerm()) {
+                    LOG.trace("{}: Setting commit index to {}", logName(), N);
                     context.setCommitIndex(N);
+                } else {
+                    LOG.debug("{}: Not updating commit index to {} - retrieved log entry with index {}, term {} does not match the current term {}",
+                            logName(), N, replicatedLogEntry.getIndex(), replicatedLogEntry.getTerm(), currentTerm());
                 }
             } else {
+                LOG.trace("{}: minReplicationCount not reached, actual {} - breaking", logName(), replicatedCount);
                 break;
             }
         }
 
         // Apply the change to the state machine
         if (context.getCommitIndex() > context.getLastApplied()) {
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("{}: handleAppendEntriesReply from {}: applying to log - commitIndex: {}, lastAppliedIndex: {}",
+                        logName(), followerId, context.getCommitIndex(), context.getLastApplied());
+            }
+
             applyLogToStateMachine(context.getCommitIndex());
         }
 
-        if (!context.isSnapshotCaptureInitiated()) {
+        if (!context.getSnapshotManager().isCapturing()) {
             purgeInMemoryLog();
         }
 
         //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
-        sendUpdatesToFollower(followerId, followerLogInformation, false);
+        sendUpdatesToFollower(followerId, followerLogInformation, false, !updated);
+
         return this;
     }
 
+    private boolean updateFollowerLogInformation(FollowerLogInformation followerLogInformation,
+            AppendEntriesReply appendEntriesReply) {
+        boolean updated = followerLogInformation.setMatchIndex(appendEntriesReply.getLogLastIndex());
+        updated = followerLogInformation.setNextIndex(appendEntriesReply.getLogLastIndex() + 1) || updated;
+
+        if(updated && LOG.isDebugEnabled()) {
+            LOG.debug("{}: handleAppendEntriesReply - FollowerLogInformation for {} updated: matchIndex: {}, nextIndex: {}",
+                    logName(), followerLogInformation.getId(), followerLogInformation.getMatchIndex(),
+                    followerLogInformation.getNextIndex());
+        }
+        return updated;
+    }
+
     private void purgeInMemoryLog() {
-        //find the lowest index across followers which has been replicated to all. -1 if there are no followers.
+        //find the lowest index across followers which has been replicated to all.
+        // lastApplied if there are no followers, so that we keep clearing the log for single-node
         // we would delete the in-mem log from that index on, in-order to minimize mem usage
         // we would also share this info thru AE with the followers so that they can delete their log entries as well.
-        long minReplicatedToAllIndex = followerToLog.isEmpty() ? -1 : Long.MAX_VALUE;
+        long minReplicatedToAllIndex = followerToLog.isEmpty() ? context.getLastApplied() : Long.MAX_VALUE;
         for (FollowerLogInformation info : followerToLog.values()) {
             minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
         }
 
-        replicatedToAllIndex = fakeSnapshot(minReplicatedToAllIndex, replicatedToAllIndex);
+        super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
     }
 
     @Override
     protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
-        final Iterator<ClientRequestTracker> it = trackerList.iterator();
+        final Iterator<ClientRequestTracker> it = trackers.iterator();
         while (it.hasNext()) {
             final ClientRequestTracker t = it.next();
             if (t.getIndex() == logIndex) {
@@ -263,86 +398,73 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         return null;
     }
 
-    @Override
-    protected ClientRequestTracker findClientRequestTracker(long logIndex) {
-        for (ClientRequestTracker tracker : trackerList) {
-            if (tracker.getIndex() == logIndex) {
-                return tracker;
-            }
-        }
-        return null;
-    }
-
     @Override
     protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
         RequestVoteReply requestVoteReply) {
         return this;
     }
 
-    @Override
-    public RaftState state() {
-        return RaftState.Leader;
-    }
+    protected void beforeSendHeartbeat(){}
 
     @Override
-    public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
+    public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
         Preconditions.checkNotNull(sender, "sender should not be null");
 
-        Object message = fromSerializableMessage(originalMessage);
-
         if (message instanceof RaftRPC) {
             RaftRPC rpc = (RaftRPC) message;
             // If RPC request or response contains term T > currentTerm:
             // set currentTerm = T, convert to follower (§5.1)
             // This applies to all RPC messages and responses
             if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
-                LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {}", context.getId(),
-                        rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+                LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+                        logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
 
                 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
 
-                return switchBehavior(new Follower(context));
+                return internalSwitchBehavior(RaftState.Follower);
             }
         }
 
-        try {
-            if (message instanceof SendHeartBeat) {
-                sendHeartBeat();
-                return this;
-
-            } else if(message instanceof InitiateInstallSnapshot) {
-                installSnapshotIfNeeded();
-
-            } else if(message instanceof SendInstallSnapshot) {
-                // received from RaftActor
-                setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
-                sendInstallSnapshot();
-
-            } else if (message instanceof Replicate) {
-                replicate((Replicate) message);
-
-            } else if (message instanceof InstallSnapshotReply){
-                handleInstallSnapshotReply((InstallSnapshotReply) message);
-
-            }
-        } finally {
+        if (message instanceof SendHeartBeat) {
+            beforeSendHeartbeat();
+            sendHeartBeat();
             scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
+        } else if(message instanceof SendInstallSnapshot) {
+            // received from RaftActor
+            setSnapshot(((SendInstallSnapshot) message).getSnapshot());
+            sendInstallSnapshot();
+        } else if (message instanceof Replicate) {
+            replicate((Replicate) message);
+        } else if (message instanceof InstallSnapshotReply) {
+            handleInstallSnapshotReply((InstallSnapshotReply) message);
+        } else {
+            return super.handleMessage(sender, message);
         }
 
-        return super.handleMessage(sender, message);
+        return this;
     }
 
     private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
+        LOG.debug("{}: handleInstallSnapshotReply: {}", logName(), reply);
+
         String followerId = reply.getFollowerId();
         FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
 
         if (followerToSnapshot == null) {
-            LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
-                    context.getId(), followerId);
+            LOG.error("{}: FollowerToSnapshot not found for follower {} in InstallSnapshotReply",
+                    logName(), followerId);
             return;
         }
 
         FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
+        if(followerLogInformation == null) {
+            // This can happen during AddServer if it times out.
+            LOG.error("{}: FollowerLogInformation not found for follower {} in InstallSnapshotReply",
+                    logName(), followerId);
+            mapFollowerToSnapshot.remove(followerId);
+            return;
+        }
+
         followerLogInformation.markFollowerActive();
 
         if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
@@ -352,41 +474,47 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                     //this was the last chunk reply
                     if(LOG.isDebugEnabled()) {
                         LOG.debug("{}: InstallSnapshotReply received, " +
-                                "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
-                                context.getId(), reply.getChunkIndex(), followerId,
+                                "last chunk received, Chunk: {}. Follower: {} Setting nextIndex: {}",
+                                logName(), reply.getChunkIndex(), followerId,
                             context.getReplicatedLog().getSnapshotIndex() + 1
                         );
                     }
 
-                    followerLogInformation.setMatchIndex(
-                        context.getReplicatedLog().getSnapshotIndex());
-                    followerLogInformation.setNextIndex(
-                        context.getReplicatedLog().getSnapshotIndex() + 1);
+                    long followerMatchIndex = snapshot.get().getLastIncludedIndex();
+                    followerLogInformation.setMatchIndex(followerMatchIndex);
+                    followerLogInformation.setNextIndex(followerMatchIndex + 1);
                     mapFollowerToSnapshot.remove(followerId);
 
-                    if(LOG.isDebugEnabled()) {
-                        LOG.debug("{}: followerToLog.get(followerId).getNextIndex()=" +
-                                context.getId(), followerToLog.get(followerId).getNextIndex());
-                    }
+                    LOG.debug("{}: follower: {}, matchIndex set to {}, nextIndex set to {}",
+                        logName(), followerId, followerLogInformation.getMatchIndex(),
+                        followerLogInformation.getNextIndex());
 
                     if (mapFollowerToSnapshot.isEmpty()) {
                         // once there are no pending followers receiving snapshots
                         // we can remove snapshot from the memory
-                        setSnapshot(Optional.<ByteString>absent());
+                        setSnapshot(null);
                     }
                     wasLastChunk = true;
-
+                    if(context.getPeerInfo(followerId).getVotingState() == VotingState.VOTING_NOT_INITIALIZED){
+                        UnInitializedFollowerSnapshotReply unInitFollowerSnapshotSuccess =
+                                             new UnInitializedFollowerSnapshotReply(followerId);
+                        context.getActor().tell(unInitFollowerSnapshotSuccess, context.getActor());
+                        LOG.debug("Sent message UnInitializedFollowerSnapshotReply to self");
+                    }
                 } else {
                     followerToSnapshot.markSendStatus(true);
                 }
             } else {
                 LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
-                        context.getId(), reply.getChunkIndex());
+                        logName(), reply.getChunkIndex());
 
                 followerToSnapshot.markSendStatus(false);
             }
 
-            if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
+            if (wasLastChunk && !context.getSnapshotManager().isCapturing()) {
+                // Since the follower is now caught up try to purge the log.
+                purgeInMemoryLog();
+            } else if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
                 ActorSelection followerActor = context.getPeerActorSelection(followerId);
                 if(followerActor != null) {
                     sendSnapshotChunk(followerActor, followerId);
@@ -395,7 +523,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
 
         } else {
             LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
-                    context.getId(), reply.getChunkIndex(), followerId,
+                    logName(), reply.getChunkIndex(), followerId,
                     followerToSnapshot.getChunkIndex());
 
             if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
@@ -409,35 +537,38 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     private void replicate(Replicate replicate) {
         long logIndex = replicate.getReplicatedLogEntry().getIndex();
 
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{}: Replicate message {}", context.getId(), logIndex);
-        }
+        LOG.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}", logName(),
+                replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass());
 
         // Create a tracker entry we will use this later to notify the
         // client actor
-        trackerList.add(
-            new ClientRequestTrackerImpl(replicate.getClientActor(),
-                replicate.getIdentifier(),
-                logIndex)
-        );
+        if(replicate.getClientActor() != null) {
+            trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(),
+                    logIndex));
+        }
 
-        if (followerToLog.isEmpty()) {
+        boolean applyModificationToState = !context.anyVotingPeers()
+                || context.getRaftPolicy().applyModificationToStateBeforeConsensus();
+
+        if(applyModificationToState){
             context.setCommitIndex(logIndex);
             applyLogToStateMachine(logIndex);
-        } else {
-            sendAppendEntries();
+        }
+
+        if (!followerToLog.isEmpty()) {
+            sendAppendEntries(0, false);
         }
     }
 
-    private void sendAppendEntries() {
+    protected void sendAppendEntries(long timeSinceLastActivityInterval, boolean isHeartbeat) {
         // Send an AppendEntries to all followers
-        long heartbeatInterval = context.getConfigParams().getHeartBeatInterval().toMillis();
         for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
             final String followerId = e.getKey();
             final FollowerLogInformation followerLogInformation = e.getValue();
             // This checks helps not to send a repeat message to the follower
-            if(followerLogInformation.timeSinceLastActivity() >= heartbeatInterval) {
-                sendUpdatesToFollower(followerId, followerLogInformation, true);
+            if(!followerLogInformation.isFollowerActive() ||
+                    followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
+                sendUpdatesToFollower(followerId, followerLogInformation, true, isHeartbeat);
             }
         }
     }
@@ -450,12 +581,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
      */
 
     private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
-                                          boolean sendHeartbeat) {
+                                       boolean sendHeartbeat, boolean isHeartbeat) {
 
         ActorSelection followerActor = context.getPeerActorSelection(followerId);
         if (followerActor != null) {
             long followerNextIndex = followerLogInformation.getNextIndex();
             boolean isFollowerActive = followerLogInformation.isFollowerActive();
+            boolean sendAppendEntries = false;
+            List<ReplicatedLogEntry> entries = Collections.emptyList();
 
             if (mapFollowerToSnapshot.get(followerId) != null) {
                 // if install snapshot is in process , then sent next chunk if possible
@@ -463,145 +596,129 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                     sendSnapshotChunk(followerActor, followerId);
                 } else if(sendHeartbeat) {
                     // we send a heartbeat even if we have not received a reply for the last chunk
-                    sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
-                        Collections.<ReplicatedLogEntry>emptyList(), followerId);
+                    sendAppendEntries = true;
                 }
             } else {
                 long leaderLastIndex = context.getReplicatedLog().lastIndex();
                 long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
-                if (isFollowerActive &&
-                    context.getReplicatedLog().isPresent(followerNextIndex)) {
-                    // FIXME : Sending one entry at a time
-                    final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
 
-                    sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
+                if((!isHeartbeat && LOG.isDebugEnabled()) || LOG.isTraceEnabled()) {
+                    LOG.debug("{}: Checking sendAppendEntries for follower {}: active: {}, followerNextIndex: {}, leaderLastIndex: {}, leaderSnapShotIndex: {}",
+                            logName(), followerId, isFollowerActive, followerNextIndex, leaderLastIndex, leaderSnapShotIndex);
+                }
+
+                if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
+
+                    LOG.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
+                            followerNextIndex, followerId);
 
+                    if(followerLogInformation.okToReplicate()) {
+                        // Try to send all the entries in the journal but not exceeding the max data size
+                        // for a single AppendEntries message.
+                        int maxEntries = (int) context.getReplicatedLog().size();
+                        entries = context.getReplicatedLog().getFrom(followerNextIndex, maxEntries,
+                                context.getConfigParams().getSnapshotChunkSize());
+                        sendAppendEntries = true;
+                    }
                 } else if (isFollowerActive && followerNextIndex >= 0 &&
-                    leaderLastIndex >= followerNextIndex) {
+                    leaderLastIndex > followerNextIndex && !context.getSnapshotManager().isCapturing()) {
                     // if the followers next index is not present in the leaders log, and
                     // if the follower is just not starting and if leader's index is more than followers index
                     // then snapshot should be sent
 
                     if (LOG.isDebugEnabled()) {
-                        LOG.debug("InitiateInstallSnapshot to follower:{}," +
-                                "follower-nextIndex:{}, leader-snapshot-index:{},  " +
-                                "leader-last-index:{}", followerId,
-                            followerNextIndex, leaderSnapShotIndex, leaderLastIndex
-                        );
+                        LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s, " +
+                                    "follower-nextIndex: %d, leader-snapshot-index: %d,  " +
+                                    "leader-last-index: %d", logName(), followerId,
+                                    followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
                     }
-                    actor().tell(new InitiateInstallSnapshot(), actor());
 
                     // Send heartbeat to follower whenever install snapshot is initiated.
-                    sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
-                            Collections.<ReplicatedLogEntry>emptyList(), followerId);
+                    sendAppendEntries = true;
+                    if (canInstallSnapshot(followerNextIndex)) {
+                        initiateCaptureSnapshot(followerId);
+                    }
 
                 } else if(sendHeartbeat) {
-                    //we send an AppendEntries, even if the follower is inactive
+                    // we send an AppendEntries, even if the follower is inactive
                     // in-order to update the followers timestamp, in case it becomes active again
-                    sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
-                        Collections.<ReplicatedLogEntry>emptyList(), followerId);
+                    sendAppendEntries = true;
                 }
 
             }
+
+            if(sendAppendEntries) {
+                sendAppendEntriesToFollower(followerActor, followerNextIndex,
+                        entries, followerId);
+            }
         }
     }
 
     private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
         List<ReplicatedLogEntry> entries, String followerId) {
         AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
-            prevLogIndex(followerNextIndex),
-            prevLogTerm(followerNextIndex), entries,
-            context.getCommitIndex(), replicatedToAllIndex);
+            getLogEntryIndex(followerNextIndex - 1),
+            getLogEntryTerm(followerNextIndex - 1), entries,
+            context.getCommitIndex(), super.getReplicatedToAllIndex(), context.getPayloadVersion());
 
-        if(!entries.isEmpty()) {
-            LOG.debug("{}: Sending AppendEntries to follower {}: {}", context.getId(), followerId,
+        if(!entries.isEmpty() || LOG.isTraceEnabled()) {
+            LOG.debug("{}: Sending AppendEntries to follower {}: {}", logName(), followerId,
                     appendEntries);
         }
 
-        followerActor.tell(appendEntries.toSerializable(), actor());
+        followerActor.tell(appendEntries, actor());
     }
 
     /**
-     * An installSnapshot is scheduled at a interval that is a multiple of
-     * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
-     * snapshots at every heartbeat.
-     *
      * Install Snapshot works as follows
-     * 1. Leader sends a InitiateInstallSnapshot message to self
-     * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
-     * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
+     * 1. Leader initiates the capture snapshot by sending a CaptureSnapshot message to actor
+     * 2. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
      * and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
-     * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
-     * 5. On complete, Follower sends back a InstallSnapshotReply.
-     * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
+     * 3. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
+     * 4. On complete, Follower sends back a InstallSnapshotReply.
+     * 5. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
      * and replenishes the memory by deleting the snapshot in Replicated log.
-     *
+     * 6. If another follower requires a snapshot and a snapshot has been collected (via CaptureSnapshotReply)
+     * then send the existing snapshot in chunks to the follower.
+     * @param followerId
      */
-    private void installSnapshotIfNeeded() {
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{}: installSnapshotIfNeeded, followers {}", context.getId(), followerToLog.keySet());
-        }
-
-        for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
-            final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
-
-            if (followerActor != null) {
-                long nextIndex = e.getValue().getNextIndex();
-
-                if (!context.getReplicatedLog().isPresent(nextIndex) &&
-                        context.getReplicatedLog().isInSnapshot(nextIndex)) {
-                    LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
-                    if (snapshot.isPresent()) {
-                        // if a snapshot is present in the memory, most likely another install is in progress
-                        // no need to capture snapshot
-                        sendSnapshotChunk(followerActor, e.getKey());
-
-                    } else {
-                        initiateCaptureSnapshot();
-                        //we just need 1 follower who would need snapshot to be installed.
-                        // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
-                        // who needs an install and send to all who need
-                        break;
-                    }
-
-                }
-            }
+    public boolean initiateCaptureSnapshot(String followerId) {
+        if (snapshot.isPresent()) {
+            // if a snapshot is present in the memory, most likely another install is in progress
+            // no need to capture snapshot.
+            // This could happen if another follower needs an install when one is going on.
+            final ActorSelection followerActor = context.getPeerActorSelection(followerId);
+            sendSnapshotChunk(followerActor, followerId);
+            return true;
+        } else {
+            return context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
+                    this.getReplicatedToAllIndex(), followerId);
         }
     }
 
-    // on every install snapshot, we try to capture the snapshot.
-    // Once a capture is going on, another one issued will get ignored by RaftActor.
-    private void initiateCaptureSnapshot() {
-        LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", context.getId(), getLeaderId());
-        ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
-        long lastAppliedIndex = -1;
-        long lastAppliedTerm = -1;
+    private boolean canInstallSnapshot(long nextIndex){
+        // If the follower's nextIndex is -1 then we might as well send it a snapshot
+        // Otherwise send it a snapshot only if the nextIndex is not present in the log but is present
+        // in the snapshot
+        return nextIndex == -1 ||
+                (!context.getReplicatedLog().isPresent(nextIndex)
+                        && context.getReplicatedLog().isInSnapshot(nextIndex));
 
-        if (lastAppliedEntry != null) {
-            lastAppliedIndex = lastAppliedEntry.getIndex();
-            lastAppliedTerm = lastAppliedEntry.getTerm();
-        } else if (context.getReplicatedLog().getSnapshotIndex() > -1)  {
-            lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
-            lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
-        }
-
-        boolean isInstallSnapshotInitiated = true;
-        actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
-                lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
-            actor());
     }
 
 
     private void sendInstallSnapshot() {
+        LOG.debug("{}: sendInstallSnapshot", logName());
         for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
-            ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
+            String followerId = e.getKey();
+            ActorSelection followerActor = context.getPeerActorSelection(followerId);
+            FollowerLogInformation followerLogInfo = e.getValue();
 
             if (followerActor != null) {
-                long nextIndex = e.getValue().getNextIndex();
-
-                if (!context.getReplicatedLog().isPresent(nextIndex) &&
-                    context.getReplicatedLog().isInSnapshot(nextIndex)) {
-                    sendSnapshotChunk(followerActor, e.getKey());
+                long nextIndex = followerLogInfo.getNextIndex();
+                if (context.getPeerInfo(followerId).getVotingState() == VotingState.VOTING_NOT_INITIALIZED ||
+                        canInstallSnapshot(nextIndex)) {
+                    sendSnapshotChunk(followerActor, followerId);
                 }
             }
         }
@@ -614,30 +731,39 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
         try {
             if (snapshot.isPresent()) {
-                ByteString nextSnapshotChunk = getNextSnapshotChunk(followerId,snapshot.get());
+                byte[] nextSnapshotChunk = getNextSnapshotChunk(followerId, snapshot.get().getSnapshotBytes());
 
                 // Note: the previous call to getNextSnapshotChunk has the side-effect of adding
                 // followerId to the followerToSnapshot map.
                 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
 
+                int nextChunkIndex = followerToSnapshot.incrementChunkIndex();
+                Optional<ServerConfigurationPayload> serverConfig = Optional.absent();
+                if(followerToSnapshot.isLastChunk(nextChunkIndex)) {
+                    serverConfig = Optional.fromNullable(context.getPeerServerInfo(true));
+                }
+
                 followerActor.tell(
                     new InstallSnapshot(currentTerm(), context.getId(),
-                        context.getReplicatedLog().getSnapshotIndex(),
-                        context.getReplicatedLog().getSnapshotTerm(),
+                        snapshot.get().getLastIncludedIndex(),
+                        snapshot.get().getLastIncludedTerm(),
                         nextSnapshotChunk,
-                        followerToSnapshot.incrementChunkIndex(),
+                        nextChunkIndex,
                         followerToSnapshot.getTotalChunks(),
-                        Optional.of(followerToSnapshot.getLastChunkHashCode())
-                    ).toSerializable(),
+                        Optional.of(followerToSnapshot.getLastChunkHashCode()),
+                        serverConfig
+                    ).toSerializable(followerToLog.get(followerId).getRaftVersion()),
                     actor()
                 );
-                LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
-                        context.getId(), followerActor.path(),
-                        followerToSnapshot.getChunkIndex(),
-                        followerToSnapshot.getTotalChunks());
+
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
+                            logName(), followerActor.path(), followerToSnapshot.getChunkIndex(),
+                            followerToSnapshot.getTotalChunks());
+                }
             }
         } catch (IOException e) {
-            LOG.error("{}: InstallSnapshot failed for Leader.", context.getId(), e);
+            LOG.error("{}: InstallSnapshot failed for Leader.", logName(), e);
         }
     }
 
@@ -645,22 +771,23 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
      * Acccepts snaphot as ByteString, enters into map for future chunks
      * creates and return a ByteString chunk
      */
-    private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
+    private byte[] getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
         FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
         if (followerToSnapshot == null) {
             followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
             mapFollowerToSnapshot.put(followerId, followerToSnapshot);
         }
-        ByteString nextChunk = followerToSnapshot.getNextChunk();
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("{}: Leader's snapshot nextChunk size:{}", context.getId(), nextChunk.size());
-        }
+        byte[] nextChunk = followerToSnapshot.getNextChunk();
+
+        LOG.debug("{}: next snapshot chunk size for follower {}: {}", logName(), followerId, nextChunk.length);
+
         return nextChunk;
     }
 
     private void sendHeartBeat() {
         if (!followerToLog.isEmpty()) {
-            sendAppendEntries();
+            LOG.trace("{}: Sending heartbeat", logName());
+            sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis(), true);
         }
     }
 
@@ -685,31 +812,37 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         // need to be sent if there are other messages being sent to the remote
         // actor.
         heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
-            interval, context.getActor(), new SendHeartBeat(),
+            interval, context.getActor(), SendHeartBeat.INSTANCE,
             context.getActorSystem().dispatcher(), context.getActor());
     }
 
     @Override
-    public void close() throws Exception {
+    public void close() {
         stopHeartBeat();
     }
 
     @Override
-    public String getLeaderId() {
+    public final String getLeaderId() {
         return context.getId();
     }
 
+    @Override
+    public final short getLeaderPayloadVersion() {
+        return context.getPayloadVersion();
+    }
+
     protected boolean isLeaderIsolated() {
-        int minPresent = minIsolatedLeaderPeerCount;
+        int minPresent = getMinIsolatedLeaderPeerCount();
         for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
-            if (followerLogInformation.isFollowerActive()) {
+            final PeerInfo peerInfo = context.getPeerInfo(followerLogInformation.getId());
+            if(peerInfo != null && peerInfo.isVoting() && followerLogInformation.isFollowerActive()) {
                 --minPresent;
                 if (minPresent == 0) {
-                    break;
+                    return false;
                 }
             }
         }
-        return (minPresent != 0);
+        return minPresent != 0;
     }
 
     /**
@@ -731,11 +864,11 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         public FollowerToSnapshot(ByteString snapshotBytes) {
             this.snapshotBytes = snapshotBytes;
             int size = snapshotBytes.size();
-            totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
-                ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
+            totalChunks = (size / context.getConfigParams().getSnapshotChunkSize()) +
+                (size % context.getConfigParams().getSnapshotChunkSize() > 0 ? 1 : 0);
             if(LOG.isDebugEnabled()) {
                 LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
-                        context.getId(), size, totalChunks);
+                        logName(), size, totalChunks);
             }
             replyReceivedForOffset = -1;
             chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
@@ -791,25 +924,23 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             }
         }
 
-        public ByteString getNextChunk() {
+        public byte[] getNextChunk() {
             int snapshotLength = getSnapshotBytes().size();
             int start = incrementOffset();
             int size = context.getConfigParams().getSnapshotChunkSize();
             if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
                 size = snapshotLength;
-            } else {
-                if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
-                    size = snapshotLength - start;
-                }
+            } else if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
+                size = snapshotLength - start;
             }
 
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("{}: Next chunk: length={}, offset={},size={}", context.getId(),
-                    snapshotLength, start, size);
-            }
-            ByteString substring = getSnapshotBytes().substring(start, start + size);
-            nextChunkHashCode = substring.hashCode();
-            return substring;
+            byte[] nextChunk = new byte[size];
+            getSnapshotBytes().copyTo(nextChunk, start, 0, size);
+            nextChunkHashCode = Arrays.hashCode(nextChunk);
+
+            LOG.debug("{}: Next chunk: total length={}, offset={}, size={}, hashCode={}", logName(),
+                    snapshotLength, start, size, nextChunkHashCode);
+            return nextChunk;
         }
 
         /**
@@ -864,4 +995,28 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     public int followerLogSize() {
         return followerToLog.size();
     }
+
+    private static class SnapshotHolder {
+        private final long lastIncludedTerm;
+        private final long lastIncludedIndex;
+        private final ByteString snapshotBytes;
+
+        SnapshotHolder(Snapshot snapshot) {
+            this.lastIncludedTerm = snapshot.getLastAppliedTerm();
+            this.lastIncludedIndex = snapshot.getLastAppliedIndex();
+            this.snapshotBytes = ByteString.copyFrom(snapshot.getState());
+        }
+
+        long getLastIncludedTerm() {
+            return lastIncludedTerm;
+        }
+
+        long getLastIncludedIndex() {
+            return lastIncludedIndex;
+        }
+
+        ByteString getSnapshotBytes() {
+            return snapshotBytes;
+        }
+    }
 }