2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import com.google.common.annotations.VisibleForTesting;
15 import com.google.common.base.Optional;
16 import com.google.common.base.Preconditions;
17 import com.google.common.collect.ImmutableMap;
18 import com.google.common.collect.ImmutableMap.Builder;
19 import com.google.protobuf.ByteString;
20 import java.io.IOException;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.HashMap;
24 import java.util.Iterator;
25 import java.util.LinkedList;
26 import java.util.List;
28 import java.util.Map.Entry;
29 import java.util.concurrent.TimeUnit;
30 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
31 import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
32 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
33 import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
34 import org.opendaylight.controller.cluster.raft.RaftActorContext;
35 import org.opendaylight.controller.cluster.raft.RaftState;
36 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
37 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
38 import org.opendaylight.controller.cluster.raft.base.messages.InitiateInstallSnapshot;
39 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
40 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
41 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
42 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
43 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
44 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
45 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
46 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
47 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
48 import scala.concurrent.duration.FiniteDuration;
51 * The behavior of a RaftActor when it is in the Leader state
55 * <li> Upon election: send initial empty AppendEntries RPCs
56 * (heartbeat) to each server; repeat during idle periods to
57 * prevent election timeouts (§5.2)
58 * <li> If command received from client: append entry to local log,
59 * respond after entry applied to state machine (§5.3)
60 * <li> If last log index ≥ nextIndex for a follower: send
61 * AppendEntries RPC with log entries starting at nextIndex
63 * <li> If successful: update nextIndex and matchIndex for
65 * <li> If AppendEntries fails because of log inconsistency:
66 * decrement nextIndex and retry (§5.3)
68 * <li> If there exists an N such that N > commitIndex, a majority
69 * of matchIndex[i] ≥ N, and log[N].term == currentTerm:
70 * set commitIndex = N (§5.3, §5.4).
72 public abstract class AbstractLeader extends AbstractRaftActorBehavior {
74 // The index of the first chunk that is sent when installing a snapshot
75 public static final int FIRST_CHUNK_INDEX = 1;
77 // The index that the follower should respond with if it needs the install snapshot to be reset
78 public static final int INVALID_CHUNK_INDEX = -1;
80 // This would be passed as the hash code of the last chunk when sending the first chunk
81 public static final int INITIAL_LAST_CHUNK_HASH_CODE = -1;
83 private final Map<String, FollowerLogInformation> followerToLog;
84 private final Map<String, FollowerToSnapshot> mapFollowerToSnapshot = new HashMap<>();
86 private Cancellable heartbeatSchedule = null;
88 private final Collection<ClientRequestTracker> trackerList = new LinkedList<>();
90 protected final int minReplicationCount;
92 protected final int minIsolatedLeaderPeerCount;
94 private Optional<ByteString> snapshot;
96 private long replicatedToAllIndex = -1;
98 public AbstractLeader(RaftActorContext context) {
101 final Builder<String, FollowerLogInformation> ftlBuilder = ImmutableMap.builder();
102 for (String followerId : context.getPeerAddresses().keySet()) {
103 FollowerLogInformation followerLogInformation =
104 new FollowerLogInformationImpl(followerId,
105 context.getCommitIndex(), -1,
106 context.getConfigParams().getElectionTimeOutInterval());
108 ftlBuilder.put(followerId, followerLogInformation);
110 followerToLog = ftlBuilder.build();
112 leaderId = context.getId();
114 LOG.debug("{}: Election: Leader has following peers: {}", context.getId(), getFollowerIds());
116 minReplicationCount = getMajorityVoteCount(getFollowerIds().size());
118 // the isolated Leader peer count will be 1 less than the majority vote count.
119 // this is because the vote count has the self vote counted in it
121 // 0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
122 // 2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
123 // 4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
124 minIsolatedLeaderPeerCount = minReplicationCount > 0 ? (minReplicationCount - 1) : 0;
126 snapshot = Optional.absent();
128 // Immediately schedule a heartbeat
129 // Upon election: send initial empty AppendEntries RPCs
130 // (heartbeat) to each server; repeat during idle periods to
131 // prevent election timeouts (§5.2)
132 scheduleHeartBeat(new FiniteDuration(0, TimeUnit.SECONDS));
136 * Return an immutable collection of follower identifiers.
138 * @return Collection of follower IDs
140 protected final Collection<String> getFollowerIds() {
141 return followerToLog.keySet();
144 private Optional<ByteString> getSnapshot() {
149 void setSnapshot(Optional<ByteString> snapshot) {
150 this.snapshot = snapshot;
154 protected RaftActorBehavior handleAppendEntries(ActorRef sender,
155 AppendEntries appendEntries) {
157 if(LOG.isDebugEnabled()) {
158 LOG.debug("{}: handleAppendEntries: {}", context.getId(), appendEntries);
165 protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
166 AppendEntriesReply appendEntriesReply) {
168 if(! appendEntriesReply.isSuccess()) {
169 if(LOG.isDebugEnabled()) {
170 LOG.debug("{}: handleAppendEntriesReply: {}", context.getId(), appendEntriesReply);
174 // Update the FollowerLogInformation
175 String followerId = appendEntriesReply.getFollowerId();
176 FollowerLogInformation followerLogInformation =
177 followerToLog.get(followerId);
179 if(followerLogInformation == null){
180 LOG.error("{}: handleAppendEntriesReply - unknown follower {}", context.getId(), followerId);
184 followerLogInformation.markFollowerActive();
186 if (appendEntriesReply.isSuccess()) {
187 followerLogInformation
188 .setMatchIndex(appendEntriesReply.getLogLastIndex());
189 followerLogInformation
190 .setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
193 // TODO: When we find that the follower is out of sync with the
194 // Leader we simply decrement that followers next index by 1.
195 // Would it be possible to do better than this? The RAFT spec
196 // does not explicitly deal with it but may be something for us to
199 followerLogInformation.decrNextIndex();
202 // Now figure out if this reply warrants a change in the commitIndex
203 // If there exists an N such that N > commitIndex, a majority
204 // of matchIndex[i] ≥ N, and log[N].term == currentTerm:
205 // set commitIndex = N (§5.3, §5.4).
206 for (long N = context.getCommitIndex() + 1; ; N++) {
207 int replicatedCount = 1;
209 for (FollowerLogInformation info : followerToLog.values()) {
210 if (info.getMatchIndex() >= N) {
215 if (replicatedCount >= minReplicationCount) {
216 ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(N);
217 if (replicatedLogEntry != null &&
218 replicatedLogEntry.getTerm() == currentTerm()) {
219 context.setCommitIndex(N);
226 // Apply the change to the state machine
227 if (context.getCommitIndex() > context.getLastApplied()) {
228 applyLogToStateMachine(context.getCommitIndex());
231 if (!context.isSnapshotCaptureInitiated()) {
235 //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
236 sendUpdatesToFollower(followerId, followerLogInformation, false);
240 private void purgeInMemoryLog() {
241 //find the lowest index across followers which has been replicated to all. -1 if there are no followers.
242 // we would delete the in-mem log from that index on, in-order to minimize mem usage
243 // we would also share this info thru AE with the followers so that they can delete their log entries as well.
244 long minReplicatedToAllIndex = followerToLog.isEmpty() ? -1 : Long.MAX_VALUE;
245 for (FollowerLogInformation info : followerToLog.values()) {
246 minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
249 replicatedToAllIndex = fakeSnapshot(minReplicatedToAllIndex, replicatedToAllIndex);
253 protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
254 final Iterator<ClientRequestTracker> it = trackerList.iterator();
255 while (it.hasNext()) {
256 final ClientRequestTracker t = it.next();
257 if (t.getIndex() == logIndex) {
267 protected ClientRequestTracker findClientRequestTracker(long logIndex) {
268 for (ClientRequestTracker tracker : trackerList) {
269 if (tracker.getIndex() == logIndex) {
277 protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
278 RequestVoteReply requestVoteReply) {
283 public RaftState state() {
284 return RaftState.Leader;
288 public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
289 Preconditions.checkNotNull(sender, "sender should not be null");
291 Object message = fromSerializableMessage(originalMessage);
293 if (message instanceof RaftRPC) {
294 RaftRPC rpc = (RaftRPC) message;
295 // If RPC request or response contains term T > currentTerm:
296 // set currentTerm = T, convert to follower (§5.1)
297 // This applies to all RPC messages and responses
298 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
299 LOG.debug("{}: Term {} in \"{}\" message is greater than leader's term {}", context.getId(),
300 rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
302 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
304 return switchBehavior(new Follower(context));
309 if (message instanceof SendHeartBeat) {
313 } else if(message instanceof InitiateInstallSnapshot) {
314 installSnapshotIfNeeded();
316 } else if(message instanceof SendInstallSnapshot) {
317 // received from RaftActor
318 setSnapshot(Optional.of(((SendInstallSnapshot) message).getSnapshot()));
319 sendInstallSnapshot();
321 } else if (message instanceof Replicate) {
322 replicate((Replicate) message);
324 } else if (message instanceof InstallSnapshotReply){
325 handleInstallSnapshotReply((InstallSnapshotReply) message);
329 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
332 return super.handleMessage(sender, message);
335 private void handleInstallSnapshotReply(InstallSnapshotReply reply) {
336 String followerId = reply.getFollowerId();
337 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
339 if (followerToSnapshot == null) {
340 LOG.error("{}: FollowerId {} in InstallSnapshotReply not known to Leader",
341 context.getId(), followerId);
345 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
346 followerLogInformation.markFollowerActive();
348 if (followerToSnapshot.getChunkIndex() == reply.getChunkIndex()) {
349 if (reply.isSuccess()) {
350 if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
351 //this was the last chunk reply
352 if(LOG.isDebugEnabled()) {
353 LOG.debug("{}: InstallSnapshotReply received, " +
354 "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
355 context.getId(), reply.getChunkIndex(), followerId,
356 context.getReplicatedLog().getSnapshotIndex() + 1
360 followerLogInformation.setMatchIndex(
361 context.getReplicatedLog().getSnapshotIndex());
362 followerLogInformation.setNextIndex(
363 context.getReplicatedLog().getSnapshotIndex() + 1);
364 mapFollowerToSnapshot.remove(followerId);
366 if(LOG.isDebugEnabled()) {
367 LOG.debug("{}: followerToLog.get(followerId).getNextIndex()=" +
368 context.getId(), followerToLog.get(followerId).getNextIndex());
371 if (mapFollowerToSnapshot.isEmpty()) {
372 // once there are no pending followers receiving snapshots
373 // we can remove snapshot from the memory
374 setSnapshot(Optional.<ByteString>absent());
378 followerToSnapshot.markSendStatus(true);
381 LOG.info("{}: InstallSnapshotReply received sending snapshot chunk failed, Will retry, Chunk: {}",
382 context.getId(), reply.getChunkIndex());
384 followerToSnapshot.markSendStatus(false);
387 LOG.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
388 context.getId(), reply.getChunkIndex(), followerId,
389 followerToSnapshot.getChunkIndex());
391 if(reply.getChunkIndex() == INVALID_CHUNK_INDEX){
392 // Since the Follower did not find this index to be valid we should reset the follower snapshot
393 // so that Installing the snapshot can resume from the beginning
394 followerToSnapshot.reset();
399 private void replicate(Replicate replicate) {
400 long logIndex = replicate.getReplicatedLogEntry().getIndex();
402 if(LOG.isDebugEnabled()) {
403 LOG.debug("{}: Replicate message {}", context.getId(), logIndex);
406 // Create a tracker entry we will use this later to notify the
409 new ClientRequestTrackerImpl(replicate.getClientActor(),
410 replicate.getIdentifier(),
414 if (followerToLog.isEmpty()) {
415 context.setCommitIndex(logIndex);
416 applyLogToStateMachine(logIndex);
422 private void sendAppendEntries() {
423 // Send an AppendEntries to all followers
424 long heartbeatInterval = context.getConfigParams().getHeartBeatInterval().toMillis();
425 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
426 final String followerId = e.getKey();
427 final FollowerLogInformation followerLogInformation = e.getValue();
428 // This checks helps not to send a repeat message to the follower
429 if(followerLogInformation.timeSinceLastActivity() >= heartbeatInterval) {
430 sendUpdatesToFollower(followerId, followerLogInformation, true);
437 * This method checks if any update needs to be sent to the given follower. This includes append log entries,
438 * sending next snapshot chunk, and initiating a snapshot.
439 * @return true if any update is sent, false otherwise
442 private void sendUpdatesToFollower(String followerId, FollowerLogInformation followerLogInformation,
443 boolean sendHeartbeat) {
445 ActorSelection followerActor = context.getPeerActorSelection(followerId);
446 if (followerActor != null) {
447 long followerNextIndex = followerLogInformation.getNextIndex();
448 boolean isFollowerActive = followerLogInformation.isFollowerActive();
450 if (mapFollowerToSnapshot.get(followerId) != null) {
451 // if install snapshot is in process , then sent next chunk if possible
452 if (isFollowerActive && mapFollowerToSnapshot.get(followerId).canSendNextChunk()) {
453 sendSnapshotChunk(followerActor, followerId);
454 } else if(sendHeartbeat) {
455 // we send a heartbeat even if we have not received a reply for the last chunk
456 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
457 Collections.<ReplicatedLogEntry>emptyList(), followerId);
460 long leaderLastIndex = context.getReplicatedLog().lastIndex();
461 long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
462 if (isFollowerActive &&
463 context.getReplicatedLog().isPresent(followerNextIndex)) {
464 // FIXME : Sending one entry at a time
465 final List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex, 1);
467 sendAppendEntriesToFollower(followerActor, followerNextIndex, entries, followerId);
469 } else if (isFollowerActive && followerNextIndex >= 0 &&
470 leaderLastIndex >= followerNextIndex) {
471 // if the followers next index is not present in the leaders log, and
472 // if the follower is just not starting and if leader's index is more than followers index
473 // then snapshot should be sent
475 if (LOG.isDebugEnabled()) {
476 LOG.debug("InitiateInstallSnapshot to follower:{}," +
477 "follower-nextIndex:{}, leader-snapshot-index:{}, " +
478 "leader-last-index:{}", followerId,
479 followerNextIndex, leaderSnapShotIndex, leaderLastIndex
482 actor().tell(new InitiateInstallSnapshot(), actor());
484 // Send heartbeat to follower whenever install snapshot is initiated.
485 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
486 Collections.<ReplicatedLogEntry>emptyList(), followerId);
488 } else if(sendHeartbeat) {
489 //we send an AppendEntries, even if the follower is inactive
490 // in-order to update the followers timestamp, in case it becomes active again
491 sendAppendEntriesToFollower(followerActor, followerLogInformation.getNextIndex(),
492 Collections.<ReplicatedLogEntry>emptyList(), followerId);
499 private void sendAppendEntriesToFollower(ActorSelection followerActor, long followerNextIndex,
500 List<ReplicatedLogEntry> entries, String followerId) {
501 AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
502 prevLogIndex(followerNextIndex),
503 prevLogTerm(followerNextIndex), entries,
504 context.getCommitIndex(), replicatedToAllIndex);
506 if(!entries.isEmpty()) {
507 LOG.debug("{}: Sending AppendEntries to follower {}: {}", context.getId(), followerId,
511 followerActor.tell(appendEntries.toSerializable(), actor());
515 * An installSnapshot is scheduled at a interval that is a multiple of
516 * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
517 * snapshots at every heartbeat.
519 * Install Snapshot works as follows
520 * 1. Leader sends a InitiateInstallSnapshot message to self
521 * 2. Leader then initiates the capture snapshot by sending a CaptureSnapshot message to actor
522 * 3. RaftActor on receipt of the CaptureSnapshotReply (from Shard), stores the received snapshot in the replicated log
523 * and makes a call to Leader's handleMessage , with SendInstallSnapshot message.
524 * 4. Leader , picks the snapshot from im-mem ReplicatedLog and sends it in chunks to the Follower
525 * 5. On complete, Follower sends back a InstallSnapshotReply.
526 * 6. On receipt of the InstallSnapshotReply for the last chunk, Leader marks the install complete for that follower
527 * and replenishes the memory by deleting the snapshot in Replicated log.
530 private void installSnapshotIfNeeded() {
531 if(LOG.isDebugEnabled()) {
532 LOG.debug("{}: installSnapshotIfNeeded, followers {}", context.getId(), followerToLog.keySet());
535 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
536 final ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
538 if (followerActor != null) {
539 long nextIndex = e.getValue().getNextIndex();
541 if (!context.getReplicatedLog().isPresent(nextIndex) &&
542 context.getReplicatedLog().isInSnapshot(nextIndex)) {
543 LOG.info("{}: {} follower needs a snapshot install", context.getId(), e.getKey());
544 if (snapshot.isPresent()) {
545 // if a snapshot is present in the memory, most likely another install is in progress
546 // no need to capture snapshot
547 sendSnapshotChunk(followerActor, e.getKey());
550 initiateCaptureSnapshot();
551 //we just need 1 follower who would need snapshot to be installed.
552 // when we have the snapshot captured, we would again check (in SendInstallSnapshot)
553 // who needs an install and send to all who need
562 // on every install snapshot, we try to capture the snapshot.
563 // Once a capture is going on, another one issued will get ignored by RaftActor.
564 private void initiateCaptureSnapshot() {
565 LOG.info("{}: Initiating Snapshot Capture to Install Snapshot, Leader:{}", context.getId(), getLeaderId());
566 ReplicatedLogEntry lastAppliedEntry = context.getReplicatedLog().get(context.getLastApplied());
567 long lastAppliedIndex = -1;
568 long lastAppliedTerm = -1;
570 if (lastAppliedEntry != null) {
571 lastAppliedIndex = lastAppliedEntry.getIndex();
572 lastAppliedTerm = lastAppliedEntry.getTerm();
573 } else if (context.getReplicatedLog().getSnapshotIndex() > -1) {
574 lastAppliedIndex = context.getReplicatedLog().getSnapshotIndex();
575 lastAppliedTerm = context.getReplicatedLog().getSnapshotTerm();
578 boolean isInstallSnapshotInitiated = true;
579 actor().tell(new CaptureSnapshot(lastIndex(), lastTerm(),
580 lastAppliedIndex, lastAppliedTerm, isInstallSnapshotInitiated),
585 private void sendInstallSnapshot() {
586 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
587 ActorSelection followerActor = context.getPeerActorSelection(e.getKey());
589 if (followerActor != null) {
590 long nextIndex = e.getValue().getNextIndex();
592 if (!context.getReplicatedLog().isPresent(nextIndex) &&
593 context.getReplicatedLog().isInSnapshot(nextIndex)) {
594 sendSnapshotChunk(followerActor, e.getKey());
601 * Sends a snapshot chunk to a given follower
602 * InstallSnapshot should qualify as a heartbeat too.
604 private void sendSnapshotChunk(ActorSelection followerActor, String followerId) {
606 if (snapshot.isPresent()) {
607 ByteString nextSnapshotChunk = getNextSnapshotChunk(followerId,snapshot.get());
609 // Note: the previous call to getNextSnapshotChunk has the side-effect of adding
610 // followerId to the followerToSnapshot map.
611 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
614 new InstallSnapshot(currentTerm(), context.getId(),
615 context.getReplicatedLog().getSnapshotIndex(),
616 context.getReplicatedLog().getSnapshotTerm(),
618 followerToSnapshot.incrementChunkIndex(),
619 followerToSnapshot.getTotalChunks(),
620 Optional.of(followerToSnapshot.getLastChunkHashCode())
624 LOG.info("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}",
625 context.getId(), followerActor.path(),
626 followerToSnapshot.getChunkIndex(),
627 followerToSnapshot.getTotalChunks());
629 } catch (IOException e) {
630 LOG.error(e, "{}: InstallSnapshot failed for Leader.", context.getId());
635 * Acccepts snaphot as ByteString, enters into map for future chunks
636 * creates and return a ByteString chunk
638 private ByteString getNextSnapshotChunk(String followerId, ByteString snapshotBytes) throws IOException {
639 FollowerToSnapshot followerToSnapshot = mapFollowerToSnapshot.get(followerId);
640 if (followerToSnapshot == null) {
641 followerToSnapshot = new FollowerToSnapshot(snapshotBytes);
642 mapFollowerToSnapshot.put(followerId, followerToSnapshot);
644 ByteString nextChunk = followerToSnapshot.getNextChunk();
645 if (LOG.isDebugEnabled()) {
646 LOG.debug("{}: Leader's snapshot nextChunk size:{}", context.getId(), nextChunk.size());
651 private void sendHeartBeat() {
652 if (!followerToLog.isEmpty()) {
657 private void stopHeartBeat() {
658 if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
659 heartbeatSchedule.cancel();
663 private void scheduleHeartBeat(FiniteDuration interval) {
664 if (followerToLog.isEmpty()) {
665 // Optimization - do not bother scheduling a heartbeat as there are
672 // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
673 // message is sent to itself.
674 // Scheduling the heartbeat only once here because heartbeats do not
675 // need to be sent if there are other messages being sent to the remote
677 heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
678 interval, context.getActor(), new SendHeartBeat(),
679 context.getActorSystem().dispatcher(), context.getActor());
683 public void close() throws Exception {
688 public String getLeaderId() {
689 return context.getId();
692 protected boolean isLeaderIsolated() {
693 int minPresent = minIsolatedLeaderPeerCount;
694 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
695 if (followerLogInformation.isFollowerActive()) {
697 if (minPresent == 0) {
702 return (minPresent != 0);
706 * Encapsulates the snapshot bytestring and handles the logic of sending
709 protected class FollowerToSnapshot {
710 private final ByteString snapshotBytes;
711 private int offset = 0;
712 // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
713 private int replyReceivedForOffset;
714 // if replyStatus is false, the previous chunk is attempted
715 private boolean replyStatus = false;
716 private int chunkIndex;
717 private final int totalChunks;
718 private int lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
719 private int nextChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
721 public FollowerToSnapshot(ByteString snapshotBytes) {
722 this.snapshotBytes = snapshotBytes;
723 int size = snapshotBytes.size();
724 totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
725 ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
726 if(LOG.isDebugEnabled()) {
727 LOG.debug("{}: Snapshot {} bytes, total chunks to send:{}",
728 context.getId(), size, totalChunks);
730 replyReceivedForOffset = -1;
731 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
734 public ByteString getSnapshotBytes() {
735 return snapshotBytes;
738 public int incrementOffset() {
740 // if prev chunk failed, we would want to sent the same chunk again
741 offset = offset + context.getConfigParams().getSnapshotChunkSize();
746 public int incrementChunkIndex() {
748 // if prev chunk failed, we would want to sent the same chunk again
749 chunkIndex = chunkIndex + 1;
754 public int getChunkIndex() {
758 public int getTotalChunks() {
762 public boolean canSendNextChunk() {
763 // we only send a false if a chunk is sent but we have not received a reply yet
764 return replyReceivedForOffset == offset;
767 public boolean isLastChunk(int chunkIndex) {
768 return totalChunks == chunkIndex;
771 public void markSendStatus(boolean success) {
773 // if the chunk sent was successful
774 replyReceivedForOffset = offset;
776 lastChunkHashCode = nextChunkHashCode;
778 // if the chunk sent was failure
779 replyReceivedForOffset = offset;
784 public ByteString getNextChunk() {
785 int snapshotLength = getSnapshotBytes().size();
786 int start = incrementOffset();
787 int size = context.getConfigParams().getSnapshotChunkSize();
788 if (context.getConfigParams().getSnapshotChunkSize() > snapshotLength) {
789 size = snapshotLength;
791 if ((start + context.getConfigParams().getSnapshotChunkSize()) > snapshotLength) {
792 size = snapshotLength - start;
796 if(LOG.isDebugEnabled()) {
797 LOG.debug("{}: Next chunk: length={}, offset={},size={}", context.getId(),
798 snapshotLength, start, size);
800 ByteString substring = getSnapshotBytes().substring(start, start + size);
801 nextChunkHashCode = substring.hashCode();
806 * reset should be called when the Follower needs to be sent the snapshot from the beginning
811 replyReceivedForOffset = offset;
812 chunkIndex = AbstractLeader.FIRST_CHUNK_INDEX;
813 lastChunkHashCode = AbstractLeader.INITIAL_LAST_CHUNK_HASH_CODE;
816 public int getLastChunkHashCode() {
817 return lastChunkHashCode;
821 // called from example-actor for printing the follower-states
822 public String printFollowerStates() {
823 final StringBuilder sb = new StringBuilder();
826 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
828 sb.append(followerLogInformation.getId());
829 sb.append(" state:");
830 sb.append(followerLogInformation.isFollowerActive());
835 return sb.toString();
839 public FollowerLogInformation getFollower(String followerId) {
840 return followerToLog.get(followerId);
844 protected void setFollowerSnapshot(String followerId, FollowerToSnapshot snapshot) {
845 mapFollowerToSnapshot.put(followerId, snapshot);
849 public int followerSnapshotSize() {
850 return mapFollowerToSnapshot.size();
854 public int followerLogSize() {
855 return followerToLog.size();