2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import com.google.common.annotations.VisibleForTesting;
15 import com.google.common.base.Optional;
16 import com.google.common.base.Preconditions;
17 import com.google.common.collect.ImmutableMap;
18 import com.google.common.collect.ImmutableMap.Builder;
19 import com.google.protobuf.ByteString;
20 import java.io.IOException;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.HashMap;
24 import java.util.Iterator;
25 import java.util.LinkedList;
26 import java.util.List;
28 import java.util.Map.Entry;
29 import java.util.concurrent.TimeUnit;
30 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
31 import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
32 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
33 import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
34 import org.opendaylight.controller.cluster.raft.RaftActorContext;
35 import org.opendaylight.controller.cluster.raft.RaftState;
36 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
37 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
38 import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
39 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
40 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
41 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
42 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
43 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
44 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
45 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
46 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
47 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
48 import scala.concurrent.duration.FiniteDuration;
51 * The behavior of a RaftActor when it is in the Leader state
55 * <li> Upon election: send initial empty AppendEntries RPCs
56 * (heartbeat) to each server; repeat during idle periods to
57 * prevent election timeouts (§5.2)
58 * <li> If command received from client: append entry to local log,
59 * respond after entry applied to state machine (§5.3)
60 * <li> If last log index ≥ nextIndex for a follower: send
61 * AppendEntries RPC with log entries starting at nextIndex
63 * <li> If successful: update nextIndex and matchIndex for
65 * <li> If AppendEntries fails because of log inconsistency:
66 * decrement nextIndex and retry (§5.3)
68 * <li> If there exists an N such that N > commitIndex, a majority
69 * of matchIndex[i] ≥ N, and log[N].term == currentTerm:
70 * set commitIndex = N (§5.3, §5.4).
72 public abstract class AbstractLeader extends AbstractRaftActorBehavior {
74 // The index of the first chunk that is sent when installing a snapshot
75 public static final int FIRST_CHUNK_INDEX = 1;
77 // The index that the follower should respond with if it needs the install snapshot to be reset
78 public static final int INVALID_CHUNK_INDEX = -1;
80 // This would be passed as the hash code of the last chunk when sending the first chunk
81 public static final int INITIAL_LAST_CHUNK_HASH_CODE = -1;
83 private final Map<String, FollowerLogInformation> followerToLog;
84 private final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
86 private Cancellable heartbeatSchedule = null;
88 private final Collection<ClientRequestTracker> trackerList = new LinkedList<>();
90 protected final int minReplicationCount;
92 protected final int minIsolatedLeaderPeerCount;
94 private Optional<ByteString> snapshot;
96 private long replicatedToAllIndex = -1;
98 public AbstractLeader(RaftActorContext context) {
101 final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
102 for (String followerId : context.getPeerAddresses().keySet()) {
103 FollowerLogInformation followerLogInformation =
104 new FollowerLogInformationImpl(followerId,
105 context.getCommitIndex(), -1,
106 context.getConfigParams().getElectionTimeOutInterval());
108 ftlBuilder.put(followerId, followerLogInformation);
110 followerToLog = ftlBuilder.build();
112 leaderId = context.getId();
114 LOG.debug("{}: Election: Leader has following peers: {}", context.getId(), getFollowerIds());
116 minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
118 // the isolated Leader peer count will be 1 less than the majority vote count.
119 // this is because the vote count has the self vote counted in it
121 // 0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
122 // 2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
123 // 4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
124 minIsolatedLeaderPeerCount = minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
126 snapshot = Optional.absent();
128 // Immediately schedule a heartbeat
129 // Upon election: send initial empty AppendEntries RPCs
130 // (heartbeat) to each server; repeat during idle periods to
131 // prevent election timeouts (§5.2)
132 scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
136 * Return an immutable collection of follower identifiers.
138 * @return Collection of follower IDs
140 protected final Collection<String> getFollowerIds() {
141 return followerToLog.keySet();
144 private Optional<ByteString> getSnapshot() {
149 void setSnapshot(Optional<ByteString> snapshot) {
150 this.snapshot = snapshot;
154 protected RaftActorBehavior handleAppendEntries(ActorRef sender,
155 AppendEntries appendEntries) {
157 if(LOG.isDebugEnabled()) {
158 LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
165 protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
166 AppendEntriesReply appendEntriesReply) {
168 if(! appendEntriesReply.isSuccess()) {
169 if(LOG.isDebugEnabled()) {
170 LOG.debug("{}: handleAppendEntriesReply: {}", context.getId(), appendEntriesReply);
174 // Update the FollowerLogInformation
175 String followerId = appendEntriesReply.getFollowerId();
176 FollowerLogInformation followerLogInformation =
177 followerToLog.get(followerId);
179 if(followerLogInformation == null){
180 LOG.error("{}: handleAppendEntriesReply - unknown follower {}", context.getId(), followerId);
184 followerLogInformation.markFollowerActive();
186 if (appendEntriesReply.isSuccess()) {
187 followerLogInformation
188 .setMatchIndex(appendEntriesReply.getLogLastIndex());
189 followerLogInformation
190 .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
193 // TODO: When we find that the follower is out of sync with the
194 // Leader we simply decrement that followers next index by 1.
195 // Would it be possible to do better than this? The RAFT spec
196 // does not explicitly deal with it but may be something for us to
199 followerLogInformation.decrNextIndex();
202 // Now figure out if this reply warrants a change in the commitIndex
203 // If there exists an N such that N > commitIndex, a majority
204 // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
205 // set commitIndex = N (§5.3, §5.4).
206 for (long N = context.getCommitIndex() + 1; ; N++) {
207 int replicatedCount = 1;
209 for (FollowerLogInformation info : followerToLog.values()) {
210 if (info.getMatchIndex() >= N) {
215 if (replicatedCount >= minReplicationCount) {
216 ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(N);
217 if (replicatedLogEntry != null &&
218 replicatedLogEntry.getTerm() == currentTerm()) {
219 context.setCommitIndex(N);
226 // Apply the change to the state machine
227 if (context.getCommitIndex() > context.getLastApplied()) {
228 applyLogToStateMachine(context.getCommitIndex());
231 if (!context.isSnapshotCaptureInitiated()) {
238 private void purgeInMemoryLog() {
239 //find the lowest index across followers which has been replicated to all. -1 if there are no followers.
240 // we would delete the in-mem log from that index on, in-order to minimize mem usage
241 // we would also share this info thru AE with the followers so that they can delete their log entries as well.
242 long minReplicatedToAllIndex = followerToLog.isEmpty() ? -1 : Long.MAX_VALUE;
243 for (FollowerLogInformation info : followerToLog.values()) {
244 minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
247 replicatedToAllIndex = fakeSnapshot(minReplicatedToAllIndex, replicatedToAllIndex);
251 protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
252 final Iterator<ClientRequestTracker> it = trackerList.iterator();
253 while (it.hasNext()) {
254 final ClientRequestTracker t = it.next();
255 if (t.getIndex() == logIndex) {
265 protected ClientRequestTracker findClientRequestTracker(long logIndex) {
266 for (ClientRequestTracker tracker : trackerList) {
267 if (tracker.getIndex() == logIndex) {
275 protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
276 RequestVoteReply requestVoteReply) {
281 public RaftState state() {
282 return RaftState.Leader;
286 public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
287 Preconditions.checkNotNull(sender, "sender should not be null");
289 Object message = fromSerializableMessage(originalMessage);
291 if (message instanceof RaftRPC) {
292 RaftRPC rpc = (RaftRPC) message;
293 // If RPC request or response contains term T > currentTerm:
294 // set currentTerm = T, convert to follower (§5.1)
295 // This applies to all RPC messages and responses
296 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
297 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
299 return switchBehavior(new Follower(context));
304 if (message instanceof SendHeartBeat) {
308 } else if(message instanceof InitiateInstallSnapshot) {
309 installSnapshotIfNeeded();
311 } else if(message instanceof SendInstallSnapshot) {
312 // received from RaftActor
313 setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
314 sendInstallSnapshot();
316 } else if (message instanceof Replicate) {
317 replicate((Replicate) message);
319 } else if (message instanceof InstallSnapshotReply){
320 handleInstallSnapshotReply((InstallSnapshotReply) message);
324 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
327 return super.handleMessage(sender, message);
330 private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
331 String followerId = reply.getFollowerId();
332 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
333 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
334 followerLogInformation.markFollowerActive();
336 if (followerToSnapshot != null &&
337 followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
339 if (reply.isSuccess()) {
340 if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
341 //this was the last chunk reply
342 if(LOG.isDebugEnabled()) {
343 LOG.debug("{}: InstallSnapshotReply received, " +
344 "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
345 context.getId(), reply.getChunkIndex(), followerId,
346 context.getReplicatedLog().getSnapshotIndex() + 1
350 followerLogInformation.setMatchIndex(
351 context.getReplicatedLog().getSnapshotIndex());
352 followerLogInformation.setNextIndex(
353 context.getReplicatedLog().getSnapshotIndex() + 1);
354 mapFollowerToSnapshot.remove(followerId);
356 if(LOG.isDebugEnabled()) {
357 LOG.debug("{}: followerToLog.get(followerId).getNextIndex()=" +
358 context.getId(), followerToLog.get(followerId).getNextIndex());
361 if (mapFollowerToSnapshot.isEmpty()) {
362 // once there are no pending followers receiving snapshots
363 // we can remove snapshot from the memory
364 setSnapshot(Optional.<ByteString>absent());
368 followerToSnapshot.markSendStatus(true);
371 LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
372 context.getId(), reply.getChunkIndex());
374 followerToSnapshot.markSendStatus(false);
378 LOG.error("{}: FollowerId in InstallSnapshotReply not known to Leader" +
379 " or Chunk Index in InstallSnapshotReply not matching {} != {}",
380 context.getId(), followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
383 if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
384 // Since the Follower did not find this index to be valid we should reset the follower snapshot
385 // so that Installing the snapshot can resume from the beginning
386 followerToSnapshot.reset();
391 private void replicate(Replicate replicate) {
392 long logIndex = replicate.getReplicatedLogEntry().getIndex();
394 if(LOG.isDebugEnabled()) {
395 LOG.debug("{}: Replicate message {}", context.getId(), logIndex);
398 // Create a tracker entry we will use this later to notify the
401 new ClientRequestTrackerImpl(replicate.getClientActor(),
402 replicate.getIdentifier(),
406 if (followerToLog.isEmpty()) {
407 context.setCommitIndex(logIndex);
408 applyLogToStateMachine(logIndex);
414 private void sendAppendEntries() {
415 // Send an AppendEntries to all followers
416 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
417 final String followerId = e.getKey();
418 ActorSelection followerActor = context.getPeerActorSelection(followerId);
420 if (followerActor != null) {
421 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
422 long followerNextIndex = followerLogInformation.getNextIndex();
423 boolean isFollowerActive = followerLogInformation.isFollowerActive();
425 if (mapFollowerToSnapshot.get(followerId) != null) {
426 // if install snapshot is in process , then sent next chunk if possible
427 if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
428 sendSnapshotChunk(followerActor, followerId);
430 // we send a heartbeat even if we have not received a reply for the last chunk
431 sendAppendEntriesToFollower(followerActor, followerNextIndex,
432 Collections.<ReplicatedLogEntry>emptyList());
436 long leaderLastIndex = context.getReplicatedLog().lastIndex();
437 long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
438 final List<ReplicatedLogEntry> entries;
440 if (isFollowerActive &&
441 context.getReplicatedLog().isPresent(followerNextIndex)) {
442 // FIXME : Sending one entry at a time
443 entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
445 } else if (isFollowerActive && followerNextIndex >= 0 &&
446 leaderLastIndex >= followerNextIndex ) {
447 // if the followers next index is not present in the leaders log, and
448 // if the follower is just not starting and if leader's index is more than followers index
449 // then snapshot should be sent
451 if(LOG.isDebugEnabled()) {
452 LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
453 "follower-nextIndex: %s, leader-snapshot-index: %s, " +
454 "leader-last-index: %s", context.getId(), followerId,
455 followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
457 actor().tell(new InitiateInstallSnapshot(), actor());
459 // we would want to sent AE as the capture snapshot might take time
460 entries = Collections.<ReplicatedLogEntry>emptyList();
463 //we send an AppendEntries, even if the follower is inactive
464 // in-order to update the followers timestamp, in case it becomes active again
465 entries = Collections.<ReplicatedLogEntry>emptyList();
468 sendAppendEntriesToFollower(followerActor, followerNextIndex, entries);
475 private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
476 List<ReplicatedLogEntry> entries) {
478 new AppendEntries(currentTerm(), context.getId(),
479 prevLogIndex(followerNextIndex),
480 prevLogTerm(followerNextIndex), entries,
481 context.getCommitIndex(),
482 replicatedToAllIndex).toSerializable(),
488 * An installSnapshot is scheduled at a interval that is a multiple of
489 * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
490 * snapshots at every heartbeat.
492 * Install Snapshot works as follows
493 * 1. Leader sends a InitiateInstallSnapshot message to self
494 * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
495 * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
496 * and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
497 * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
498 * 5. On complete, Follower sends back a InstallSnapshotReply.
499 * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
500 * and replenishes the memory by deleting the snapshot in Replicated log.
503 private void installSnapshotIfNeeded() {
504 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
505 final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
507 if (followerActor != null) {
508 long nextIndex = e.getValue().getNextIndex();
510 if (!context.getReplicatedLog().isPresent(nextIndex) &&
511 context.getReplicatedLog().isInSnapshot(nextIndex)) {
512 LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
513 if (snapshot.isPresent()) {
514 // if a snapshot is present in the memory, most likely another install is in progress
515 // no need to capture snapshot
516 sendSnapshotChunk(followerActor, e.getKey());
519 initiateCaptureSnapshot();
520 //we just need 1 follower who would need snapshot to be installed.
521 // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
522 // who needs an install and send to all who need
531 // on every install snapshot, we try to capture the snapshot.
532 // Once a capture is going on, another one issued will get ignored by RaftActor.
533 private void initiateCaptureSnapshot() {
534 LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", context.getId(), getLeaderId());
535 ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
536 long lastAppliedIndex = -1;
537 long lastAppliedTerm = -1;
539 if (lastAppliedEntry != null) {
540 lastAppliedIndex = lastAppliedEntry.getIndex();
541 lastAppliedTerm = lastAppliedEntry.getTerm();
542 } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
543 lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
544 lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
547 boolean isInstallSnapshotInitiated = true;
548 actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
549 lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
554 private void sendInstallSnapshot() {
555 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
556 ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
558 if (followerActor != null) {
559 long nextIndex = e.getValue().getNextIndex();
561 if (!context.getReplicatedLog().isPresent(nextIndex) &&
562 context.getReplicatedLog().isInSnapshot(nextIndex)) {
563 sendSnapshotChunk(followerActor, e.getKey());
570 * Sends a snapshot chunk to a given follower
571 * InstallSnapshot should qualify as a heartbeat too.
573 private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
575 if (snapshot.isPresent()) {
577 new InstallSnapshot(currentTerm(), context.getId(),
578 context.getReplicatedLog().getSnapshotIndex(),
579 context.getReplicatedLog().getSnapshotTerm(),
580 getNextSnapshotChunk(followerId,snapshot.get()),
581 mapFollowerToSnapshot.get(followerId).incrementChunkIndex(),
582 mapFollowerToSnapshot.get(followerId).getTotalChunks(),
583 Optional.of(mapFollowerToSnapshot.get(followerId).getLastChunkHashCode())
587 LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
588 context.getId(), followerActor.path(),
589 mapFollowerToSnapshot.get(followerId).getChunkIndex(),
590 mapFollowerToSnapshot.get(followerId).getTotalChunks());
592 } catch (IOException e) {
593 LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
598 * Acccepts snaphot as ByteString, enters into map for future chunks
599 * creates and return a ByteString chunk
601 private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
602 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
603 if (followerToSnapshot == null) {
604 followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
605 mapFollowerToSnapshot.put(followerId, followerToSnapshot);
607 ByteString nextChunk = followerToSnapshot.getNextChunk();
608 if (LOG.isDebugEnabled()) {
609 LOG.debug("{}: Leader's snapshot nextChunk size:{}", context.getId(), nextChunk.size());
614 private void sendHeartBeat() {
615 if (!followerToLog.isEmpty()) {
620 private void stopHeartBeat() {
621 if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
622 heartbeatSchedule.cancel();
626 private void scheduleHeartBeat(FiniteDuration interval) {
627 if (followerToLog.isEmpty()) {
628 // Optimization - do not bother scheduling a heartbeat as there are
635 // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
636 // message is sent to itself.
637 // Scheduling the heartbeat only once here because heartbeats do not
638 // need to be sent if there are other messages being sent to the remote
640 heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
641 interval, context.getActor(), new SendHeartBeat(),
642 context.getActorSystem().dispatcher(), context.getActor());
646 public void close() throws Exception {
651 public String getLeaderId() {
652 return context.getId();
655 protected boolean isLeaderIsolated() {
656 int minPresent = minIsolatedLeaderPeerCount;
657 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
658 if (followerLogInformation.isFollowerActive()) {
660 if (minPresent == 0) {
665 return (minPresent != 0);
669 * Encapsulates the snapshot bytestring and handles the logic of sending
672 protected class FollowerToSnapshot {
673 private final ByteString snapshotBytes;
674 private int offset = 0;
675 // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
676 private int replyReceivedForOffset;
677 // if replyStatus is false, the previous chunk is attempted
678 private boolean replyStatus = false;
679 private int chunkIndex;
680 private final int totalChunks;
681 private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
682 private int nextChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
684 public FollowerToSnapshot(ByteString snapshotBytes) {
685 this.snapshotBytes = snapshotBytes;
686 int size = snapshotBytes.size();
687 totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
688 ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
689 if(LOG.isDebugEnabled()) {
690 LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
691 context.getId(), size, totalChunks);
693 replyReceivedForOffset = -1;
694 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
697 public ByteString getSnapshotBytes() {
698 return snapshotBytes;
701 public int incrementOffset() {
703 // if prev chunk failed, we would want to sent the same chunk again
704 offset = offset + context.getConfigParams().getSnapshotChunkSize();
709 public int incrementChunkIndex() {
711 // if prev chunk failed, we would want to sent the same chunk again
712 chunkIndex = chunkIndex + 1;
717 public int getChunkIndex() {
721 public int getTotalChunks() {
725 public boolean canSendNextChunk() {
726 // we only send a false if a chunk is sent but we have not received a reply yet
727 return replyReceivedForOffset == offset;
730 public boolean isLastChunk(int chunkIndex) {
731 return totalChunks == chunkIndex;
734 public void markSendStatus(boolean success) {
736 // if the chunk sent was successful
737 replyReceivedForOffset = offset;
739 lastChunkHashCode = nextChunkHashCode;
741 // if the chunk sent was failure
742 replyReceivedForOffset = offset;
747 public ByteString getNextChunk() {
748 int snapshotLength = getSnapshotBytes().size();
749 int start = incrementOffset();
750 int size = context.getConfigParams().getSnapshotChunkSize();
751 if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
752 size = snapshotLength;
754 if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
755 size = snapshotLength - start;
759 if(LOG.isDebugEnabled()) {
760 LOG.debug("{}: Next chunk: length={}, offset={},size={}", context.getId(),
761 snapshotLength, start, size);
763 ByteString substring = getSnapshotBytes().substring(start, start + size);
764 nextChunkHashCode = substring.hashCode();
769 * reset should be called when the Follower needs to be sent the snapshot from the beginning
774 replyReceivedForOffset = offset;
775 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
776 lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
779 public int getLastChunkHashCode() {
780 return lastChunkHashCode;
784 // called from example-actor for printing the follower-states
785 public String printFollowerStates() {
786 final StringBuilder sb = new StringBuilder();
789 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
791 sb.append(followerLogInformation.getId());
792 sb.append(" state:");
793 sb.append(followerLogInformation.isFollowerActive());
798 return sb.toString();
802 public FollowerLogInformation getFollower(String followerId) {
803 return followerToLog.get(followerId);
807 protected void setFollowerSnapshot(String followerId, FollowerToSnapshot snapshot) {
808 mapFollowerToSnapshot.put(followerId, snapshot);
812 public int followerSnapshotSize() {
813 return mapFollowerToSnapshot.size();
817 public int followerLogSize() {
818 return followerToLog.size();