2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import com.google.common.annotations.VisibleForTesting;
15 import com.google.common.base.Optional;
16 import com.google.common.base.Preconditions;
17 import com.google.common.collect.ImmutableMap;
18 import com.google.common.collect.ImmutableMap.Builder;
19 import com.google.protobuf.ByteString;
20 import java.io.IOException;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.HashMap;
24 import java.util.Iterator;
25 import java.util.LinkedList;
26 import java.util.List;
28 import java.util.Map.Entry;
29 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
30 import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
31 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
32 import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
33 import org.opendaylight.controller.cluster.raft.RaftActorContext;
34 import org.opendaylight.controller.cluster.raft.RaftState;
35 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
36 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
37 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
38 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
39 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
40 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
41 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
42 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
43 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
44 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
45 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
46 import scala.concurrent.duration.FiniteDuration;
49 * The behavior of a RaftActor when it is in the Leader state
53 * <li> Upon election: send initial empty AppendEntries RPCs
54 * (heartbeat) to each server; repeat during idle periods to
55 * prevent election timeouts (§5.2)
56 * <li> If command received from client: append entry to local log,
57 * respond after entry applied to state machine (§5.3)
58 * <li> If last log index ≥ nextIndex for a follower: send
59 * AppendEntries RPC with log entries starting at nextIndex
61 * <li> If successful: update nextIndex and matchIndex for
63 * <li> If AppendEntries fails because of log inconsistency:
64 * decrement nextIndex and retry (§5.3)
66 * <li> If there exists an N such that N > commitIndex, a majority
67 * of matchIndex[i] ≥ N, and log[N].term == currentTerm:
68 * set commitIndex = N (§5.3, §5.4).
70 public abstract class AbstractLeader extends AbstractRaftActorBehavior {
72 // The index of the first chunk that is sent when installing a snapshot
73 public static final int FIRST_CHUNK_INDEX = 1;
75 // The index that the follower should respond with if it needs the install snapshot to be reset
76 public static final int INVALID_CHUNK_INDEX = -1;
78 // This would be passed as the hash code of the last chunk when sending the first chunk
79 public static final int INITIAL_LAST_CHUNK_HASH_CODE = -1;
81 private final Map<String, FollowerLogInformation> followerToLog;
82 private final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
84 private Cancellable heartbeatSchedule = null;
86 private final Collection<ClientRequestTracker> trackerList = new LinkedList<>();
88 protected final int minReplicationCount;
90 protected final int minIsolatedLeaderPeerCount;
92 private Optional<ByteString> snapshot;
94 public AbstractLeader(RaftActorContext context) {
95 super(context, RaftState.Leader);
97 final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
98 for (String followerId : context.getPeerAddresses().keySet()) {
99 FollowerLogInformation followerLogInformation =
100 new FollowerLogInformationImpl(followerId, -1, context);
102 ftlBuilder.put(followerId, followerLogInformation);
104 followerToLog = ftlBuilder.build();
106 leaderId = context.getId();
108 LOG.debug("{}: Election: Leader has following peers: {}", logName(), getFollowerIds());
110 minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
112 // the isolated Leader peer count will be 1 less than the majority vote count.
113 // this is because the vote count has the self vote counted in it
115 // 0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
116 // 2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
117 // 4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
118 minIsolatedLeaderPeerCount = minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
120 snapshot = Optional.absent();
122 // Immediately schedule a heartbeat
123 // Upon election: send initial empty AppendEntries RPCs
124 // (heartbeat) to each server; repeat during idle periods to
125 // prevent election timeouts (§5.2)
126 sendAppendEntries(0, false);
128 // It is important to schedule this heartbeat here
129 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
133 * Return an immutable collection of follower identifiers.
135 * @return Collection of follower IDs
137 protected final Collection<String> getFollowerIds() {
138 return followerToLog.keySet();
142 void setSnapshot(Optional<ByteString> snapshot) {
143 this.snapshot = snapshot;
147 protected RaftActorBehavior handleAppendEntries(ActorRef sender,
148 AppendEntries appendEntries) {
150 LOG.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
156 protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
157 AppendEntriesReply appendEntriesReply) {
159 if(LOG.isTraceEnabled()) {
160 LOG.trace("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
163 // Update the FollowerLogInformation
164 String followerId = appendEntriesReply.getFollowerId();
165 FollowerLogInformation followerLogInformation =
166 followerToLog.get(followerId);
168 if(followerLogInformation == null){
169 LOG.error("{}: handleAppendEntriesReply - unknown follower {}", logName(), followerId);
173 if(followerLogInformation.timeSinceLastActivity() >
174 context.getConfigParams().getElectionTimeOutInterval().toMillis()) {
175 LOG.warn("{} : handleAppendEntriesReply delayed beyond election timeout, " +
176 "appendEntriesReply : {}, timeSinceLastActivity : {}, lastApplied : {}, commitIndex : {}",
177 logName(), appendEntriesReply, followerLogInformation.timeSinceLastActivity(),
178 context.getLastApplied(), context.getCommitIndex());
181 followerLogInformation.markFollowerActive();
183 boolean updated = false;
184 if (appendEntriesReply.isSuccess()) {
185 updated = followerLogInformation.setMatchIndex(appendEntriesReply.getLogLastIndex());
186 updated = followerLogInformation.setNextIndex(appendEntriesReply.getLogLastIndex() + 1) || updated;
188 if(updated && LOG.isDebugEnabled()) {
189 LOG.debug("{}: handleAppendEntriesReply - FollowerLogInformation for {} updated: matchIndex: {}, nextIndex: {}", logName(),
190 followerId, followerLogInformation.getMatchIndex(), followerLogInformation.getNextIndex());
193 LOG.debug("{}: handleAppendEntriesReply: received unsuccessful reply: {}", logName(), appendEntriesReply);
195 // TODO: When we find that the follower is out of sync with the
196 // Leader we simply decrement that followers next index by 1.
197 // Would it be possible to do better than this? The RAFT spec
198 // does not explicitly deal with it but may be something for us to
201 followerLogInformation.decrNextIndex();
204 // Now figure out if this reply warrants a change in the commitIndex
205 // If there exists an N such that N > commitIndex, a majority
206 // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
207 // set commitIndex = N (§5.3, §5.4).
208 for (long N = context.getCommitIndex() + 1; ; N++) {
209 int replicatedCount = 1;
211 for (FollowerLogInformation info : followerToLog.values()) {
212 if (info.getMatchIndex() >= N) {
217 if (replicatedCount >= minReplicationCount) {
218 ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(N);
219 if (replicatedLogEntry != null &&
220 replicatedLogEntry.getTerm() == currentTerm()) {
221 context.setCommitIndex(N);
228 // Apply the change to the state machine
229 if (context.getCommitIndex() > context.getLastApplied()) {
230 if(LOG.isDebugEnabled()) {
231 LOG.debug("{}: handleAppendEntriesReply from {}: applying to log - commitIndex: {}, lastAppliedIndex: {}",
232 logName(), followerId, context.getCommitIndex(), context.getLastApplied());
235 applyLogToStateMachine(context.getCommitIndex());
238 if (!context.isSnapshotCaptureInitiated()) {
242 //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
243 sendUpdatesToFollower(followerId, followerLogInformation, false, !updated);
247 private void purgeInMemoryLog() {
248 //find the lowest index across followers which has been replicated to all.
249 // lastApplied if there are no followers, so that we keep clearing the log for single-node
250 // we would delete the in-mem log from that index on, in-order to minimize mem usage
251 // we would also share this info thru AE with the followers so that they can delete their log entries as well.
252 long minReplicatedToAllIndex = followerToLog.isEmpty() ? context.getLastApplied() : Long.MAX_VALUE;
253 for (FollowerLogInformation info : followerToLog.values()) {
254 minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
257 super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
261 protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
262 final Iterator<ClientRequestTracker> it = trackerList.iterator();
263 while (it.hasNext()) {
264 final ClientRequestTracker t = it.next();
265 if (t.getIndex() == logIndex) {
275 protected ClientRequestTracker findClientRequestTracker(long logIndex) {
276 for (ClientRequestTracker tracker : trackerList) {
277 if (tracker.getIndex() == logIndex) {
285 protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
286 RequestVoteReply requestVoteReply) {
290 protected void beforeSendHeartbeat(){}
293 public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
294 Preconditions.checkNotNull(sender, "sender should not be null");
296 Object message = fromSerializableMessage(originalMessage);
298 if (message instanceof RaftRPC) {
299 RaftRPC rpc = (RaftRPC) message;
300 // If RPC request or response contains term T > currentTerm:
301 // set currentTerm = T, convert to follower (§5.1)
302 // This applies to all RPC messages and responses
303 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
304 LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
305 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
307 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
309 return switchBehavior(new Follower(context));
313 if (message instanceof SendHeartBeat) {
314 beforeSendHeartbeat();
316 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
319 } else if(message instanceof SendInstallSnapshot) {
320 // received from RaftActor
321 setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
322 sendInstallSnapshot();
324 } else if (message instanceof Replicate) {
325 replicate((Replicate) message);
327 } else if (message instanceof InstallSnapshotReply){
328 handleInstallSnapshotReply((InstallSnapshotReply) message);
333 return super.handleMessage(sender, message);
336 private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
337 LOG.debug("{}: handleInstallSnapshotReply: {}", logName(), reply);
339 String followerId = reply.getFollowerId();
340 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
342 if (followerToSnapshot == null) {
343 LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
344 logName(), followerId);
348 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
349 followerLogInformation.markFollowerActive();
351 if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
352 boolean wasLastChunk = false;
353 if (reply.isSuccess()) {
354 if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
355 //this was the last chunk reply
356 if(LOG.isDebugEnabled()) {
357 LOG.debug("{}: InstallSnapshotReply received, " +
358 "last chunk received, Chunk: {}. Follower: {} Setting nextIndex: {}",
359 logName(), reply.getChunkIndex(), followerId,
360 context.getReplicatedLog().getSnapshotIndex() + 1
364 followerLogInformation.setMatchIndex(
365 context.getReplicatedLog().getSnapshotIndex());
366 followerLogInformation.setNextIndex(
367 context.getReplicatedLog().getSnapshotIndex() + 1);
368 mapFollowerToSnapshot.remove(followerId);
370 LOG.debug("{}: follower: {}, matchIndex set to {}, nextIndex set to {}",
371 logName(), followerId, followerLogInformation.getMatchIndex(),
372 followerLogInformation.getNextIndex());
374 if (mapFollowerToSnapshot.isEmpty()) {
375 // once there are no pending followers receiving snapshots
376 // we can remove snapshot from the memory
377 setSnapshot(Optional.<ByteString>absent());
382 followerToSnapshot.markSendStatus(true);
385 LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
386 logName(), reply.getChunkIndex());
388 followerToSnapshot.markSendStatus(false);
391 if (wasLastChunk && !context.isSnapshotCaptureInitiated()) {
392 // Since the follower is now caught up try to purge the log.
394 } else if (!wasLastChunk && followerToSnapshot.canSendNextChunk()) {
395 ActorSelection followerActor = context.getPeerActorSelection(followerId);
396 if(followerActor != null) {
397 sendSnapshotChunk(followerActor, followerId);
402 LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
403 logName(), reply.getChunkIndex(), followerId,
404 followerToSnapshot.getChunkIndex());
406 if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
407 // Since the Follower did not find this index to be valid we should reset the follower snapshot
408 // so that Installing the snapshot can resume from the beginning
409 followerToSnapshot.reset();
414 private void replicate(Replicate replicate) {
415 long logIndex = replicate.getReplicatedLogEntry().getIndex();
417 LOG.debug("{}: Replicate message: identifier: {}, logIndex: {}", logName(),
418 replicate.getIdentifier(), logIndex);
420 // Create a tracker entry we will use this later to notify the
423 new ClientRequestTrackerImpl(replicate.getClientActor(),
424 replicate.getIdentifier(),
428 if (followerToLog.isEmpty()) {
429 context.setCommitIndex(logIndex);
430 applyLogToStateMachine(logIndex);
432 sendAppendEntries(0, false);
436 private void sendAppendEntries(long timeSinceLastActivityInterval, boolean isHeartbeat) {
437 // Send an AppendEntries to all followers
438 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
439 final String followerId = e.getKey();
440 final FollowerLogInformation followerLogInformation = e.getValue();
441 // This checks helps not to send a repeat message to the follower
442 if(!followerLogInformation.isFollowerActive() ||
443 followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
444 sendUpdatesToFollower(followerId, followerLogInformation, true, isHeartbeat);
451 * This method checks if any update needs to be sent to the given follower. This includes append log entries,
452 * sending next snapshot chunk, and initiating a snapshot.
453 * @return true if any update is sent, false otherwise
456 private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
457 boolean sendHeartbeat, boolean isHeartbeat) {
459 ActorSelection followerActor = context.getPeerActorSelection(followerId);
460 if (followerActor != null) {
461 long followerNextIndex = followerLogInformation.getNextIndex();
462 boolean isFollowerActive = followerLogInformation.isFollowerActive();
464 if (mapFollowerToSnapshot.get(followerId) != null) {
465 // if install snapshot is in process , then sent next chunk if possible
466 if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
467 sendSnapshotChunk(followerActor, followerId);
468 } else if(sendHeartbeat) {
469 // we send a heartbeat even if we have not received a reply for the last chunk
470 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
471 Collections.<ReplicatedLogEntry>emptyList(), followerId);
474 long leaderLastIndex = context.getReplicatedLog().lastIndex();
475 long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
477 if((!isHeartbeat && LOG.isDebugEnabled()) || LOG.isTraceEnabled()) {
478 LOG.debug("{}: Checking sendAppendEntries for follower {}, followerNextIndex {}, leaderLastIndex: {}, leaderSnapShotIndex: {}",
479 logName(), followerId, followerNextIndex, leaderLastIndex, leaderSnapShotIndex);
482 if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
484 LOG.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
485 followerNextIndex, followerId);
487 // FIXME : Sending one entry at a time
488 final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
490 sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
492 } else if (isFollowerActive && followerNextIndex >= 0 &&
493 leaderLastIndex > followerNextIndex && !context.isSnapshotCaptureInitiated()) {
494 // if the followers next index is not present in the leaders log, and
495 // if the follower is just not starting and if leader's index is more than followers index
496 // then snapshot should be sent
498 if (LOG.isDebugEnabled()) {
499 LOG.debug(String.format("%s: InitiateInstallSnapshot to follower: %s," +
500 "follower-nextIndex: %d, leader-snapshot-index: %d, " +
501 "leader-last-index: %d", logName(), followerId,
502 followerNextIndex, leaderSnapShotIndex, leaderLastIndex));
505 // Send heartbeat to follower whenever install snapshot is initiated.
506 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
507 Collections.<ReplicatedLogEntry>emptyList(), followerId);
509 initiateCaptureSnapshot(followerId, followerNextIndex);
511 } else if(sendHeartbeat) {
512 //we send an AppendEntries, even if the follower is inactive
513 // in-order to update the followers timestamp, in case it becomes active again
514 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
515 Collections.<ReplicatedLogEntry>emptyList(), followerId);
522 private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
523 List<ReplicatedLogEntry> entries, String followerId) {
524 AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
525 prevLogIndex(followerNextIndex),
526 prevLogTerm(followerNextIndex), entries,
527 context.getCommitIndex(), super.getReplicatedToAllIndex());
529 if(!entries.isEmpty() || LOG.isTraceEnabled()) {
530 LOG.debug("{}: Sending AppendEntries to follower {}: {}", logName(), followerId,
534 followerActor.tell(appendEntries.toSerializable(), actor());
538 * Install Snapshot works as follows
539 * 1. Leader initiates the capture snapshot by sending a CaptureSnapshot message to actor
540 * 2. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
541 * and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
542 * 3. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
543 * 4. On complete, Follower sends back a InstallSnapshotReply.
544 * 5. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
545 * and replenishes the memory by deleting the snapshot in Replicated log.
546 * 6. If another follower requires a snapshot and a snapshot has been collected (via CaptureSnapshotReply)
547 * then send the existing snapshot in chunks to the follower.
549 * @param followerNextIndex
551 private void initiateCaptureSnapshot(String followerId, long followerNextIndex) {
552 if (!context.getReplicatedLog().isPresent(followerNextIndex) &&
553 context.getReplicatedLog().isInSnapshot(followerNextIndex)) {
555 if (snapshot.isPresent()) {
556 // if a snapshot is present in the memory, most likely another install is in progress
557 // no need to capture snapshot.
558 // This could happen if another follower needs an install when one is going on.
559 final ActorSelection followerActor = context.getPeerActorSelection(followerId);
560 sendSnapshotChunk(followerActor, followerId);
562 } else if (!context.isSnapshotCaptureInitiated()) {
564 ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
565 long lastAppliedIndex = -1;
566 long lastAppliedTerm = -1;
568 if (lastAppliedEntry != null) {
569 lastAppliedIndex = lastAppliedEntry.getIndex();
570 lastAppliedTerm = lastAppliedEntry.getTerm();
571 } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
572 lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
573 lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
576 boolean isInstallSnapshotInitiated = true;
577 long replicatedToAllIndex = super.getReplicatedToAllIndex();
578 ReplicatedLogEntry replicatedToAllEntry = context.getReplicatedLog().get(replicatedToAllIndex);
580 CaptureSnapshot captureSnapshot = new CaptureSnapshot(
581 lastIndex(), lastTerm(), lastAppliedIndex, lastAppliedTerm,
582 (replicatedToAllEntry != null ? replicatedToAllEntry.getIndex() : -1),
583 (replicatedToAllEntry != null ? replicatedToAllEntry.getTerm() : -1),
584 isInstallSnapshotInitiated);
586 if(LOG.isDebugEnabled()) {
587 LOG.debug("{}: Initiating install snapshot to follower {}: {}", logName(), followerId,
591 actor().tell(captureSnapshot, actor());
592 context.setSnapshotCaptureInitiated(true);
598 private void sendInstallSnapshot() {
599 LOG.debug("{}: sendInstallSnapshot", logName());
600 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
601 ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
603 if (followerActor != null) {
604 long nextIndex = e.getValue().getNextIndex();
606 if (!context.getReplicatedLog().isPresent(nextIndex) &&
607 context.getReplicatedLog().isInSnapshot(nextIndex)) {
608 sendSnapshotChunk(followerActor, e.getKey());
615 * Sends a snapshot chunk to a given follower
616 * InstallSnapshot should qualify as a heartbeat too.
618 private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
620 if (snapshot.isPresent()) {
621 ByteString nextSnapshotChunk = getNextSnapshotChunk(followerId,snapshot.get());
623 // Note: the previous call to getNextSnapshotChunk has the side-effect of adding
624 // followerId to the followerToSnapshot map.
625 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
628 new InstallSnapshot(currentTerm(), context.getId(),
629 context.getReplicatedLog().getSnapshotIndex(),
630 context.getReplicatedLog().getSnapshotTerm(),
632 followerToSnapshot.incrementChunkIndex(),
633 followerToSnapshot.getTotalChunks(),
634 Optional.of(followerToSnapshot.getLastChunkHashCode())
639 if(LOG.isDebugEnabled()) {
640 LOG.debug("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
641 logName(), followerActor.path(), followerToSnapshot.getChunkIndex(),
642 followerToSnapshot.getTotalChunks());
645 } catch (IOException e) {
646 LOG.error("{}: InstallSnapshot failed for Leader.", logName(), e);
651 * Acccepts snaphot as ByteString, enters into map for future chunks
652 * creates and return a ByteString chunk
654 private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
655 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
656 if (followerToSnapshot == null) {
657 followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
658 mapFollowerToSnapshot.put(followerId, followerToSnapshot);
660 ByteString nextChunk = followerToSnapshot.getNextChunk();
662 LOG.debug("{}: next snapshot chunk size for follower {}: {}", logName(), followerId, nextChunk.size());
667 private void sendHeartBeat() {
668 if (!followerToLog.isEmpty()) {
669 LOG.trace("{}: Sending heartbeat", logName());
670 sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis(), true);
674 private void stopHeartBeat() {
675 if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
676 heartbeatSchedule.cancel();
680 private void scheduleHeartBeat(FiniteDuration interval) {
681 if (followerToLog.isEmpty()) {
682 // Optimization - do not bother scheduling a heartbeat as there are
689 // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
690 // message is sent to itself.
691 // Scheduling the heartbeat only once here because heartbeats do not
692 // need to be sent if there are other messages being sent to the remote
694 heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
695 interval, context.getActor(), new SendHeartBeat(),
696 context.getActorSystem().dispatcher(), context.getActor());
700 public void close() throws Exception {
705 public String getLeaderId() {
706 return context.getId();
709 protected boolean isLeaderIsolated() {
710 int minPresent = minIsolatedLeaderPeerCount;
711 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
712 if (followerLogInformation.isFollowerActive()) {
714 if (minPresent == 0) {
719 return (minPresent != 0);
723 * Encapsulates the snapshot bytestring and handles the logic of sending
726 protected class FollowerToSnapshot {
727 private final ByteString snapshotBytes;
728 private int offset = 0;
729 // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
730 private int replyReceivedForOffset;
731 // if replyStatus is false, the previous chunk is attempted
732 private boolean replyStatus = false;
733 private int chunkIndex;
734 private final int totalChunks;
735 private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
736 private int nextChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
738 public FollowerToSnapshot(ByteString snapshotBytes) {
739 this.snapshotBytes = snapshotBytes;
740 int size = snapshotBytes.size();
741 totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
742 ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
743 if(LOG.isDebugEnabled()) {
744 LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
745 logName(), size, totalChunks);
747 replyReceivedForOffset = -1;
748 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
751 public ByteString getSnapshotBytes() {
752 return snapshotBytes;
755 public int incrementOffset() {
757 // if prev chunk failed, we would want to sent the same chunk again
758 offset = offset + context.getConfigParams().getSnapshotChunkSize();
763 public int incrementChunkIndex() {
765 // if prev chunk failed, we would want to sent the same chunk again
766 chunkIndex = chunkIndex + 1;
771 public int getChunkIndex() {
775 public int getTotalChunks() {
779 public boolean canSendNextChunk() {
780 // we only send a false if a chunk is sent but we have not received a reply yet
781 return replyReceivedForOffset == offset;
784 public boolean isLastChunk(int chunkIndex) {
785 return totalChunks == chunkIndex;
788 public void markSendStatus(boolean success) {
790 // if the chunk sent was successful
791 replyReceivedForOffset = offset;
793 lastChunkHashCode = nextChunkHashCode;
795 // if the chunk sent was failure
796 replyReceivedForOffset = offset;
801 public ByteString getNextChunk() {
802 int snapshotLength = getSnapshotBytes().size();
803 int start = incrementOffset();
804 int size = context.getConfigParams().getSnapshotChunkSize();
805 if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
806 size = snapshotLength;
808 if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
809 size = snapshotLength - start;
814 LOG.debug("{}: Next chunk: length={}, offset={},size={}", logName(),
815 snapshotLength, start, size);
817 ByteString substring = getSnapshotBytes().substring(start, start + size);
818 nextChunkHashCode = substring.hashCode();
823 * reset should be called when the Follower needs to be sent the snapshot from the beginning
828 replyReceivedForOffset = offset;
829 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
830 lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
833 public int getLastChunkHashCode() {
834 return lastChunkHashCode;
838 // called from example-actor for printing the follower-states
839 public String printFollowerStates() {
840 final StringBuilder sb = new StringBuilder();
843 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
845 sb.append(followerLogInformation.getId());
846 sb.append(" state:");
847 sb.append(followerLogInformation.isFollowerActive());
852 return sb.toString();
856 public FollowerLogInformation getFollower(String followerId) {
857 return followerToLog.get(followerId);
861 protected void setFollowerSnapshot(String followerId, FollowerToSnapshot snapshot) {
862 mapFollowerToSnapshot.put(followerId, snapshot);
866 public int followerSnapshotSize() {
867 return mapFollowerToSnapshot.size();
871 public int followerLogSize() {
872 return followerToLog.size();