2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import com.google.common.annotations.VisibleForTesting;
15 import com.google.common.base.Optional;
16 import com.google.common.base.Preconditions;
17 import com.google.common.io.ByteSource;
18 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
19 import java.io.IOException;
20 import java.io.ObjectOutputStream;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.HashMap;
24 import java.util.Iterator;
25 import java.util.LinkedList;
26 import java.util.List;
28 import java.util.Map.Entry;
29 import java.util.Queue;
30 import java.util.concurrent.TimeUnit;
31 import javax.annotation.Nullable;
32 import org.opendaylight.controller.cluster.io.SharedFileBackedOutputStream;
33 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
34 import org.opendaylight.controller.cluster.messaging.SliceOptions;
35 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
36 import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
37 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
38 import org.opendaylight.controller.cluster.raft.PeerInfo;
39 import org.opendaylight.controller.cluster.raft.RaftActorContext;
40 import org.opendaylight.controller.cluster.raft.RaftState;
41 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
42 import org.opendaylight.controller.cluster.raft.VotingState;
43 import org.opendaylight.controller.cluster.raft.base.messages.CheckConsensusReached;
44 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
45 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
46 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
47 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
48 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
49 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
50 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
51 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
52 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
53 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
54 import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
55 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
56 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
57 import scala.concurrent.duration.FiniteDuration;
60 * The behavior of a RaftActor when it is in the Leader state.
65 * <li> Upon election: send initial empty AppendEntries RPCs
66 * (heartbeat) to each server; repeat during idle periods to
67 * prevent election timeouts (§5.2)
68 * <li> If command received from client: append entry to local log,
69 * respond after entry applied to state machine (§5.3)
70 * <li> If last log index ≥ nextIndex for a follower: send
71 * AppendEntries RPC with log entries starting at nextIndex
72 * <li> If successful: update nextIndex and matchIndex for
74 * <li> If AppendEntries fails because of log inconsistency:
75 * decrement nextIndex and retry (§5.3)
76 * <li> If there exists an N such that N > commitIndex, a majority
77 * of matchIndex[i] ≥ N, and log[N].term == currentTerm:
78 * set commitIndex = N (§5.3, §5.4).
81 public abstract class AbstractLeader extends AbstractRaftActorBehavior {
82 private final Map<String, FollowerLogInformation> followerToLog = new HashMap<>();
85 * Lookup table for request contexts based on journal index. We could use a {@link Map} here, but we really
86 * expect the entries to be modified in sequence, hence we open-code the lookup.
87 * TODO: Evaluate the use of ArrayDeque(), as that has lower memory overhead. Non-head removals are more costly,
88 * but we already expect those to be far from frequent.
90 private final Queue<ClientRequestTracker> trackers = new LinkedList<>();
93 * Map of serialized AppendEntries output streams keyed by log index. This is used in conjunction with the
94 * appendEntriesMessageSlicer for slicing single ReplicatedLogEntry payloads that exceed the message size threshold.
95 * This Map allows the SharedFileBackedOutputStreams to be reused for multiple followers.
97 private final Map<Long, SharedFileBackedOutputStream> sharedSerializedAppendEntriesStreams = new HashMap<>();
98 private final MessageSlicer appendEntriesMessageSlicer;
100 private Cancellable heartbeatSchedule = null;
101 private Optional<SnapshotHolder> snapshotHolder = Optional.absent();
102 private int minReplicationCount;
104 protected AbstractLeader(final RaftActorContext context, final RaftState state,
105 @Nullable final AbstractLeader initializeFromLeader) {
106 super(context, state);
108 appendEntriesMessageSlicer = MessageSlicer.builder().logContext(logName())
109 .messageSliceSize(context.getConfigParams().getSnapshotChunkSize())
110 .expireStateAfterInactivity(context.getConfigParams().getElectionTimeOutInterval().toMillis() * 3,
111 TimeUnit.MILLISECONDS).build();
113 if (initializeFromLeader != null) {
114 followerToLog.putAll(initializeFromLeader.followerToLog);
115 snapshotHolder = initializeFromLeader.snapshotHolder;
116 trackers.addAll(initializeFromLeader.trackers);
118 for (PeerInfo peerInfo: context.getPeers()) {
119 FollowerLogInformation followerLogInformation = new FollowerLogInformation(peerInfo, context);
120 followerToLog.put(peerInfo.getId(), followerLogInformation);
124 log.debug("{}: Election: Leader has following peers: {}", logName(), getFollowerIds());
126 updateMinReplicaCount();
128 // Immediately schedule a heartbeat
129 // Upon election: send initial empty AppendEntries RPCs
130 // (heartbeat) to each server; repeat during idle periods to
131 // prevent election timeouts (§5.2)
132 sendAppendEntries(0, false);
134 // It is important to schedule this heartbeat here
135 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
138 protected AbstractLeader(final RaftActorContext context, final RaftState state) {
139 this(context, state, null);
143 * Return an immutable collection of follower identifiers.
145 * @return Collection of follower IDs
147 public final Collection<String> getFollowerIds() {
148 return followerToLog.keySet();
151 public void addFollower(final String followerId) {
152 FollowerLogInformation followerLogInformation = new FollowerLogInformation(context.getPeerInfo(followerId),
154 followerToLog.put(followerId, followerLogInformation);
156 if (heartbeatSchedule == null) {
157 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
161 public void removeFollower(final String followerId) {
162 followerToLog.remove(followerId);
165 public void updateMinReplicaCount() {
167 for (PeerInfo peer: context.getPeers()) {
168 if (peer.isVoting()) {
173 minReplicationCount = getMajorityVoteCount(numVoting);
176 protected int getMinIsolatedLeaderPeerCount() {
177 //the isolated Leader peer count will be 1 less than the majority vote count.
178 //this is because the vote count has the self vote counted in it
180 //0 peers = 1 votesRequired , minIsolatedLeaderPeerCount = 0
181 //2 peers = 2 votesRequired , minIsolatedLeaderPeerCount = 1
182 //4 peers = 3 votesRequired, minIsolatedLeaderPeerCount = 2
184 return minReplicationCount > 0 ? minReplicationCount - 1 : 0;
188 @SuppressFBWarnings(value = "NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE",
189 justification = "Unrecognised NullableDecl")
190 void setSnapshotHolder(@Nullable final SnapshotHolder snapshotHolder) {
191 this.snapshotHolder = Optional.fromNullable(snapshotHolder);
195 boolean hasSnapshot() {
196 return snapshotHolder.isPresent();
200 protected RaftActorBehavior handleAppendEntries(final ActorRef sender,
201 final AppendEntries appendEntries) {
203 log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
209 protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
210 final AppendEntriesReply appendEntriesReply) {
211 log.trace("{}: handleAppendEntriesReply: {}", logName(), appendEntriesReply);
213 // Update the FollowerLogInformation
214 String followerId = appendEntriesReply.getFollowerId();
215 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
217 if (followerLogInformation == null) {
218 log.error("{}: handleAppendEntriesReply - unknown follower {}", logName(), followerId);
222 if (followerLogInformation.timeSinceLastActivity()
223 > context.getConfigParams().getElectionTimeOutInterval().toMillis()) {
224 log.warn("{} : handleAppendEntriesReply delayed beyond election timeout, "
225 + "appendEntriesReply : {}, timeSinceLastActivity : {}, lastApplied : {}, commitIndex : {}",
226 logName(), appendEntriesReply, followerLogInformation.timeSinceLastActivity(),
227 context.getLastApplied(), context.getCommitIndex());
230 followerLogInformation.markFollowerActive();
231 followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
232 followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
234 long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
235 long followersLastLogTermInLeadersLog = getLogEntryTerm(followerLastLogIndex);
236 boolean updated = false;
237 if (appendEntriesReply.getLogLastIndex() > context.getReplicatedLog().lastIndex()) {
238 // The follower's log is actually ahead of the leader's log. Normally this doesn't happen
239 // in raft as a node cannot become leader if it's log is behind another's. However, the
240 // non-voting semantics deviate a bit from raft. Only voting members participate in
241 // elections and can become leader so it's possible for a non-voting follower to be ahead
242 // of the leader. This can happen if persistence is disabled and all voting members are
243 // restarted. In this case, the voting leader will start out with an empty log however
244 // the non-voting followers still retain the previous data in memory. On the first
245 // AppendEntries, the non-voting follower returns a successful reply b/c the prevLogIndex
246 // sent by the leader is -1 and thus the integrity checks pass. However the follower's returned
247 // lastLogIndex may be higher in which case we want to reset the follower by installing a
248 // snapshot. It's also possible that the follower's last log index is behind the leader's.
249 // However in this case the log terms won't match and the logs will conflict - this is handled
251 log.info("{}: handleAppendEntriesReply: follower {} lastIndex {} is ahead of our lastIndex {} "
252 + "(snapshotIndex {}) - forcing install snaphot", logName(), followerLogInformation.getId(),
253 appendEntriesReply.getLogLastIndex(), context.getReplicatedLog().lastIndex(),
254 context.getReplicatedLog().getSnapshotIndex());
256 followerLogInformation.setMatchIndex(-1);
257 followerLogInformation.setNextIndex(-1);
259 initiateCaptureSnapshot(followerId);
262 } else if (appendEntriesReply.isSuccess()) {
263 if (followerLastLogIndex >= 0 && followersLastLogTermInLeadersLog >= 0
264 && followersLastLogTermInLeadersLog != appendEntriesReply.getLogLastTerm()) {
265 // The follower's last entry is present in the leader's journal but the terms don't match so the
266 // follower has a conflicting entry. Since the follower didn't report that it's out of sync, this means
267 // either the previous leader entry sent didn't conflict or the previous leader entry is in the snapshot
268 // and no longer in the journal. Either way, we set the follower's next index to 1 less than the last
269 // index reported by the follower. For the former case, the leader will send all entries starting with
270 // the previous follower's index and the follower will remove and replace the conflicting entries as
271 // needed. For the latter, the leader will initiate an install snapshot.
273 followerLogInformation.setNextIndex(followerLastLogIndex - 1);
276 log.info("{}: handleAppendEntriesReply: follower {} last log term {} for index {} conflicts with the "
277 + "leader's {} - set the follower's next index to {}", logName(),
278 followerId, appendEntriesReply.getLogLastTerm(), appendEntriesReply.getLogLastIndex(),
279 followersLastLogTermInLeadersLog, followerLogInformation.getNextIndex());
281 updated = updateFollowerLogInformation(followerLogInformation, appendEntriesReply);
284 log.info("{}: handleAppendEntriesReply - received unsuccessful reply: {}, leader snapshotIndex: {}",
285 logName(), appendEntriesReply, context.getReplicatedLog().getSnapshotIndex());
287 if (appendEntriesReply.isForceInstallSnapshot()) {
288 // Reset the followers match and next index. This is to signal that this follower has nothing
289 // in common with this Leader and so would require a snapshot to be installed
290 followerLogInformation.setMatchIndex(-1);
291 followerLogInformation.setNextIndex(-1);
293 // Force initiate a snapshot capture
294 initiateCaptureSnapshot(followerId);
295 } else if (followerLastLogIndex < 0 || followersLastLogTermInLeadersLog >= 0
296 && followersLastLogTermInLeadersLog == appendEntriesReply.getLogLastTerm()) {
297 // The follower's log is empty or the last entry is present in the leader's journal
298 // and the terms match so the follower is just behind the leader's journal from
299 // the last snapshot, if any. We'll catch up the follower quickly by starting at the
300 // follower's last log index.
302 updated = updateFollowerLogInformation(followerLogInformation, appendEntriesReply);
304 log.info("{}: follower {} appears to be behind the leader from the last snapshot - "
305 + "updated: matchIndex: {}, nextIndex: {}", logName(), followerId,
306 followerLogInformation.getMatchIndex(), followerLogInformation.getNextIndex());
308 // The follower's log conflicts with leader's log so decrement follower's next index by 1
309 // in an attempt to find where the logs match.
311 if (followerLogInformation.decrNextIndex()) {
314 log.info("{}: follower {} last log term {} conflicts with the leader's {} - dec next index to {}",
315 logName(), followerId, appendEntriesReply.getLogLastTerm(),
316 followersLastLogTermInLeadersLog, followerLogInformation.getNextIndex());
321 if (log.isTraceEnabled()) {
322 log.trace("{}: handleAppendEntriesReply from {}: commitIndex: {}, lastAppliedIndex: {}, currentTerm: {}",
323 logName(), followerId, context.getCommitIndex(), context.getLastApplied(), currentTerm());
326 possiblyUpdateCommitIndex();
328 //Send the next log entry immediately, if possible, no need to wait for heartbeat to trigger that event
329 sendUpdatesToFollower(followerId, followerLogInformation, false, !updated);
334 private void possiblyUpdateCommitIndex() {
335 // Figure out if we can update the the commitIndex as follows:
336 // If there exists an index N such that N > commitIndex, a majority of matchIndex[i] ≥ N,
337 // and log[N].term == currentTerm:
338 // set commitIndex = N (§5.3, §5.4).
339 for (long index = context.getCommitIndex() + 1; ; index++) {
340 ReplicatedLogEntry replicatedLogEntry = context.getReplicatedLog().get(index);
341 if (replicatedLogEntry == null) {
342 log.trace("{}: ReplicatedLogEntry not found for index {} - snapshotIndex: {}, journal size: {}",
343 logName(), index, context.getReplicatedLog().getSnapshotIndex(),
344 context.getReplicatedLog().size());
348 // Count our entry if it has been persisted.
349 int replicatedCount = replicatedLogEntry.isPersistencePending() ? 0 : 1;
351 if (replicatedCount == 0) {
352 // We don't commit and apply a log entry until we've gotten the ack from our local persistence,
353 // even though there *shouldn't* be any issue with updating the commit index if we get a consensus
354 // amongst the followers w/o the local persistence ack.
358 log.trace("{}: checking Nth index {}", logName(), index);
359 for (FollowerLogInformation info : followerToLog.values()) {
360 final PeerInfo peerInfo = context.getPeerInfo(info.getId());
361 if (info.getMatchIndex() >= index && peerInfo != null && peerInfo.isVoting()) {
363 } else if (log.isTraceEnabled()) {
364 log.trace("{}: Not counting follower {} - matchIndex: {}, {}", logName(), info.getId(),
365 info.getMatchIndex(), peerInfo);
369 if (log.isTraceEnabled()) {
370 log.trace("{}: replicatedCount {}, minReplicationCount: {}", logName(), replicatedCount,
371 minReplicationCount);
374 if (replicatedCount >= minReplicationCount) {
375 // Don't update the commit index if the log entry is from a previous term, as per §5.4.1:
376 // "Raft never commits log entries from previous terms by counting replicas".
377 // However we keep looping so we can make progress when new entries in the current term
378 // reach consensus, as per §5.4.1: "once an entry from the current term is committed by
379 // counting replicas, then all prior entries are committed indirectly".
380 if (replicatedLogEntry.getTerm() == currentTerm()) {
381 log.trace("{}: Setting commit index to {}", logName(), index);
382 context.setCommitIndex(index);
384 log.debug("{}: Not updating commit index to {} - retrieved log entry with index {}, "
385 + "term {} does not match the current term {}", logName(), index,
386 replicatedLogEntry.getIndex(), replicatedLogEntry.getTerm(), currentTerm());
389 log.trace("{}: minReplicationCount not reached, actual {} - breaking", logName(), replicatedCount);
394 // Apply the change to the state machine
395 if (context.getCommitIndex() > context.getLastApplied()) {
396 log.debug("{}: Applying to log - commitIndex: {}, lastAppliedIndex: {}", logName(),
397 context.getCommitIndex(), context.getLastApplied());
399 applyLogToStateMachine(context.getCommitIndex());
402 if (!context.getSnapshotManager().isCapturing()) {
407 private boolean updateFollowerLogInformation(final FollowerLogInformation followerLogInformation,
408 final AppendEntriesReply appendEntriesReply) {
409 boolean updated = followerLogInformation.setMatchIndex(appendEntriesReply.getLogLastIndex());
410 updated = followerLogInformation.setNextIndex(appendEntriesReply.getLogLastIndex() + 1) || updated;
412 if (updated && log.isDebugEnabled()) {
414 "{}: handleAppendEntriesReply - FollowerLogInformation for {} updated: matchIndex: {}, nextIndex: {}",
415 logName(), followerLogInformation.getId(), followerLogInformation.getMatchIndex(),
416 followerLogInformation.getNextIndex());
421 private void purgeInMemoryLog() {
422 //find the lowest index across followers which has been replicated to all.
423 // lastApplied if there are no followers, so that we keep clearing the log for single-node
424 // we would delete the in-mem log from that index on, in-order to minimize mem usage
425 // we would also share this info thru AE with the followers so that they can delete their log entries as well.
426 long minReplicatedToAllIndex = followerToLog.isEmpty() ? context.getLastApplied() : Long.MAX_VALUE;
427 for (FollowerLogInformation info : followerToLog.values()) {
428 minReplicatedToAllIndex = Math.min(minReplicatedToAllIndex, info.getMatchIndex());
431 super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
435 protected ClientRequestTracker removeClientRequestTracker(final long logIndex) {
436 final Iterator<ClientRequestTracker> it = trackers.iterator();
437 while (it.hasNext()) {
438 final ClientRequestTracker t = it.next();
439 if (t.getIndex() == logIndex) {
449 protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
450 final RequestVoteReply requestVoteReply) {
454 protected void beforeSendHeartbeat(){}
457 public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
458 Preconditions.checkNotNull(sender, "sender should not be null");
460 if (appendEntriesMessageSlicer.handleMessage(message)) {
464 if (message instanceof RaftRPC) {
465 RaftRPC rpc = (RaftRPC) message;
466 // If RPC request or response contains term T > currentTerm:
467 // set currentTerm = T, convert to follower (§5.1)
468 // This applies to all RPC messages and responses
469 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
470 log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
471 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
473 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
475 // This is a special case. Normally when stepping down as leader we don't process and reply to the
476 // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
477 // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
478 // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
479 // state and starting a new election and grabbing leadership back before the other candidate node can
480 // start a new election due to lack of responses. This case would only occur if there isn't a majority
481 // of other nodes available that can elect the requesting candidate. Since we're transferring
482 // leadership, we should make every effort to get the requesting node elected.
483 if (message instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) {
484 log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
485 super.handleMessage(sender, message);
488 return internalSwitchBehavior(RaftState.Follower);
492 if (message instanceof SendHeartBeat) {
493 beforeSendHeartbeat();
495 scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
496 } else if (message instanceof SendInstallSnapshot) {
497 SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message;
498 setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(),
499 sendInstallSnapshot.getSnapshotBytes()));
500 sendInstallSnapshot();
501 } else if (message instanceof Replicate) {
502 replicate((Replicate) message);
503 } else if (message instanceof InstallSnapshotReply) {
504 handleInstallSnapshotReply((InstallSnapshotReply) message);
505 } else if (message instanceof CheckConsensusReached) {
506 possiblyUpdateCommitIndex();
508 return super.handleMessage(sender, message);
514 private void handleInstallSnapshotReply(final InstallSnapshotReply reply) {
515 log.debug("{}: handleInstallSnapshotReply: {}", logName(), reply);
517 String followerId = reply.getFollowerId();
518 FollowerLogInformation followerLogInformation = followerToLog.get(followerId);
519 if (followerLogInformation == null) {
520 // This can happen during AddServer if it times out.
521 log.error("{}: FollowerLogInformation not found for follower {} in InstallSnapshotReply",
522 logName(), followerId);
526 LeaderInstallSnapshotState installSnapshotState = followerLogInformation.getInstallSnapshotState();
527 if (installSnapshotState == null) {
528 log.error("{}: LeaderInstallSnapshotState not found for follower {} in InstallSnapshotReply",
529 logName(), followerId);
533 followerLogInformation.markFollowerActive();
535 if (installSnapshotState.getChunkIndex() == reply.getChunkIndex()) {
536 boolean wasLastChunk = false;
537 if (reply.isSuccess()) {
538 if (installSnapshotState.isLastChunk(reply.getChunkIndex())) {
539 //this was the last chunk reply
541 long followerMatchIndex = snapshotHolder.get().getLastIncludedIndex();
542 followerLogInformation.setMatchIndex(followerMatchIndex);
543 followerLogInformation.setNextIndex(followerMatchIndex + 1);
544 followerLogInformation.clearLeaderInstallSnapshotState();
546 log.info("{}: Snapshot successfully installed on follower {} (last chunk {}) - "
547 + "matchIndex set to {}, nextIndex set to {}", logName(), followerId, reply.getChunkIndex(),
548 followerLogInformation.getMatchIndex(), followerLogInformation.getNextIndex());
550 if (!anyFollowersInstallingSnapshot()) {
551 // once there are no pending followers receiving snapshots
552 // we can remove snapshot from the memory
553 setSnapshotHolder(null);
557 if (context.getPeerInfo(followerId).getVotingState() == VotingState.VOTING_NOT_INITIALIZED) {
558 UnInitializedFollowerSnapshotReply unInitFollowerSnapshotSuccess =
559 new UnInitializedFollowerSnapshotReply(followerId);
560 context.getActor().tell(unInitFollowerSnapshotSuccess, context.getActor());
561 log.debug("Sent message UnInitializedFollowerSnapshotReply to self");
564 installSnapshotState.markSendStatus(true);
567 log.warn("{}: Received failed InstallSnapshotReply - will retry: {}", logName(), reply);
569 installSnapshotState.markSendStatus(false);
573 if (!context.getSnapshotManager().isCapturing()) {
574 // Since the follower is now caught up try to purge the log.
578 ActorSelection followerActor = context.getPeerActorSelection(followerId);
579 if (followerActor != null) {
580 sendSnapshotChunk(followerActor, followerLogInformation);
585 log.error("{}: Chunk index {} in InstallSnapshotReply from follower {} does not match expected index {}",
586 logName(), reply.getChunkIndex(), followerId,
587 installSnapshotState.getChunkIndex());
589 if (reply.getChunkIndex() == LeaderInstallSnapshotState.INVALID_CHUNK_INDEX) {
590 // Since the Follower did not find this index to be valid we should reset the follower snapshot
591 // so that Installing the snapshot can resume from the beginning
592 installSnapshotState.reset();
597 private boolean anyFollowersInstallingSnapshot() {
598 for (FollowerLogInformation info: followerToLog.values()) {
599 if (info.getInstallSnapshotState() != null) {
608 private void replicate(final Replicate replicate) {
609 long logIndex = replicate.getReplicatedLogEntry().getIndex();
611 log.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}, isSendImmediate: {}", logName(),
612 replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass(),
613 replicate.isSendImmediate());
615 // Create a tracker entry we will use this later to notify the
617 if (replicate.getClientActor() != null) {
618 trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(),
622 boolean applyModificationToState = !context.anyVotingPeers()
623 || context.getRaftPolicy().applyModificationToStateBeforeConsensus();
625 if (applyModificationToState) {
626 context.setCommitIndex(logIndex);
627 applyLogToStateMachine(logIndex);
630 if (replicate.isSendImmediate() && !followerToLog.isEmpty()) {
631 sendAppendEntries(0, false);
635 protected void sendAppendEntries(final long timeSinceLastActivityInterval, final boolean isHeartbeat) {
636 // Send an AppendEntries to all followers
637 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
638 final String followerId = e.getKey();
639 final FollowerLogInformation followerLogInformation = e.getValue();
640 // This checks helps not to send a repeat message to the follower
641 if (!followerLogInformation.isFollowerActive()
642 || followerLogInformation.timeSinceLastActivity() >= timeSinceLastActivityInterval) {
643 sendUpdatesToFollower(followerId, followerLogInformation, true, isHeartbeat);
649 * This method checks if any update needs to be sent to the given follower. This includes append log entries,
650 * sending next snapshot chunk, and initiating a snapshot.
652 private void sendUpdatesToFollower(final String followerId, final FollowerLogInformation followerLogInformation,
653 final boolean sendHeartbeat, final boolean isHeartbeat) {
655 ActorSelection followerActor = context.getPeerActorSelection(followerId);
656 if (followerActor != null) {
657 long followerNextIndex = followerLogInformation.getNextIndex();
658 boolean isFollowerActive = followerLogInformation.isFollowerActive();
659 boolean sendAppendEntries = false;
660 List<ReplicatedLogEntry> entries = Collections.emptyList();
662 LeaderInstallSnapshotState installSnapshotState = followerLogInformation.getInstallSnapshotState();
663 if (installSnapshotState != null) {
664 // if install snapshot is in process , then sent next chunk if possible
665 if (isFollowerActive && installSnapshotState.canSendNextChunk()) {
666 sendSnapshotChunk(followerActor, followerLogInformation);
667 } else if (sendHeartbeat) {
668 // we send a heartbeat even if we have not received a reply for the last chunk
669 sendAppendEntries = true;
671 } else if (followerLogInformation.isLogEntrySlicingInProgress()) {
672 sendAppendEntries = sendHeartbeat;
674 long leaderLastIndex = context.getReplicatedLog().lastIndex();
675 long leaderSnapShotIndex = context.getReplicatedLog().getSnapshotIndex();
677 if (!isHeartbeat && log.isDebugEnabled() || log.isTraceEnabled()) {
678 log.debug("{}: Checking sendAppendEntries for follower {}: active: {}, followerNextIndex: {}, "
679 + "leaderLastIndex: {}, leaderSnapShotIndex: {}", logName(), followerId, isFollowerActive,
680 followerNextIndex, leaderLastIndex, leaderSnapShotIndex);
683 if (isFollowerActive && context.getReplicatedLog().isPresent(followerNextIndex)) {
685 log.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
686 followerNextIndex, followerId);
688 if (followerLogInformation.okToReplicate()) {
689 entries = getEntriesToSend(followerLogInformation, followerActor);
690 sendAppendEntries = true;
692 } else if (isFollowerActive && followerNextIndex >= 0
693 && leaderLastIndex > followerNextIndex && !context.getSnapshotManager().isCapturing()) {
694 // if the followers next index is not present in the leaders log, and
695 // if the follower is just not starting and if leader's index is more than followers index
696 // then snapshot should be sent
698 // Send heartbeat to follower whenever install snapshot is initiated.
699 sendAppendEntries = true;
700 if (canInstallSnapshot(followerNextIndex)) {
701 log.info("{}: Initiating install snapshot to follower {}: follower nextIndex: {}, leader "
702 + "snapshotIndex: {}, leader lastIndex: {}, leader log size: {}", logName(), followerId,
703 followerNextIndex, leaderSnapShotIndex, leaderLastIndex,
704 context.getReplicatedLog().size());
706 initiateCaptureSnapshot(followerId);
708 // It doesn't seem like we should ever reach here - most likely indicates sonething is
710 log.info("{}: Follower {} is behind but cannot install snapshot: follower nextIndex: {}, "
711 + "leader snapshotIndex: {}, leader lastIndex: {}, leader log size: {}", logName(),
712 followerId, followerNextIndex, leaderSnapShotIndex, leaderLastIndex,
713 context.getReplicatedLog().size());
716 } else if (sendHeartbeat) {
717 // we send an AppendEntries, even if the follower is inactive
718 // in-order to update the followers timestamp, in case it becomes active again
719 sendAppendEntries = true;
724 if (sendAppendEntries) {
725 sendAppendEntriesToFollower(followerActor, entries, followerLogInformation);
730 private List<ReplicatedLogEntry> getEntriesToSend(final FollowerLogInformation followerLogInfo,
731 final ActorSelection followerActor) {
732 // Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries
734 int maxEntries = (int) context.getReplicatedLog().size();
735 final int maxDataSize = context.getConfigParams().getSnapshotChunkSize();
736 final long followerNextIndex = followerLogInfo.getNextIndex();
737 List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex,
738 maxEntries, maxDataSize);
740 // If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If
741 // that is the case, then we need to slice it into smaller chunks.
742 if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) {
743 // Don't need to slice.
747 log.debug("{}: Log entry size {} exceeds max payload size {}", logName(), entries.get(0).getData().size(),
750 // If an AppendEntries has already been serialized for the log index then reuse the
751 // SharedFileBackedOutputStream.
752 final Long logIndex = entries.get(0).getIndex();
753 SharedFileBackedOutputStream fileBackedStream = sharedSerializedAppendEntriesStreams.get(logIndex);
754 if (fileBackedStream == null) {
755 fileBackedStream = context.getFileBackedOutputStreamFactory().newSharedInstance();
757 final AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
758 getLogEntryIndex(followerNextIndex - 1), getLogEntryTerm(followerNextIndex - 1), entries,
759 context.getCommitIndex(), getReplicatedToAllIndex(), context.getPayloadVersion());
761 log.debug("{}: Serializing {} for slicing for follower {}", logName(), appendEntries,
762 followerLogInfo.getId());
764 try (ObjectOutputStream out = new ObjectOutputStream(fileBackedStream)) {
765 out.writeObject(appendEntries);
766 } catch (IOException e) {
767 log.error("{}: Error serializing {}", logName(), appendEntries, e);
768 fileBackedStream.cleanup();
769 return Collections.emptyList();
772 sharedSerializedAppendEntriesStreams.put(logIndex, fileBackedStream);
774 fileBackedStream.setOnCleanupCallback(index -> {
775 log.debug("{}: On SharedFileBackedOutputStream cleanup for index {}", logName(), index);
776 sharedSerializedAppendEntriesStreams.remove(index);
779 log.debug("{}: Reusing SharedFileBackedOutputStream for follower {}", logName(), followerLogInfo.getId());
780 fileBackedStream.incrementUsageCount();
783 log.debug("{}: Slicing stream for index {}, follower {}", logName(), logIndex, followerLogInfo.getId());
785 // Record that slicing is in progress for the follower.
786 followerLogInfo.setSlicedLogEntryIndex(logIndex);
788 final FollowerIdentifier identifier = new FollowerIdentifier(followerLogInfo.getId());
789 appendEntriesMessageSlicer.slice(SliceOptions.builder().identifier(identifier)
790 .fileBackedOutputStream(fileBackedStream).sendTo(followerActor).replyTo(actor())
791 .onFailureCallback(failure -> {
792 log.error("{}: Error slicing AppendEntries for follower {}", logName(),
793 followerLogInfo.getId(), failure);
794 followerLogInfo.setSlicedLogEntryIndex(FollowerLogInformation.NO_INDEX);
797 return Collections.emptyList();
800 private void sendAppendEntriesToFollower(final ActorSelection followerActor, final List<ReplicatedLogEntry> entries,
801 final FollowerLogInformation followerLogInformation) {
802 // In certain cases outlined below we don't want to send the actual commit index to prevent the follower from
803 // possibly committing and applying conflicting entries (those with same index, different term) from a prior
804 // term that weren't replicated to a majority, which would be a violation of raft.
805 // - if the follower isn't active. In this case we don't know the state of the follower and we send an
806 // empty AppendEntries as a heart beat to prevent election.
807 // - if we're in the process of installing a snapshot. In this case we don't send any new entries but still
808 // need to send AppendEntries to prevent election.
809 // - if we're in the process of slicing an AppendEntries with a large log entry payload. In this case we
810 // need to send an empty AppendEntries to prevent election.
811 boolean isInstallingSnaphot = followerLogInformation.getInstallSnapshotState() != null;
812 long leaderCommitIndex = isInstallingSnaphot || followerLogInformation.isLogEntrySlicingInProgress()
813 || !followerLogInformation.isFollowerActive() ? -1 : context.getCommitIndex();
815 long followerNextIndex = followerLogInformation.getNextIndex();
816 AppendEntries appendEntries = new AppendEntries(currentTerm(), context.getId(),
817 getLogEntryIndex(followerNextIndex - 1),
818 getLogEntryTerm(followerNextIndex - 1), entries,
819 leaderCommitIndex, super.getReplicatedToAllIndex(), context.getPayloadVersion());
821 if (!entries.isEmpty() || log.isTraceEnabled()) {
822 log.debug("{}: Sending AppendEntries to follower {}: {}", logName(), followerLogInformation.getId(),
826 followerActor.tell(appendEntries, actor());
830 * Initiates a snapshot capture to install on a follower.
833 * Install Snapshot works as follows
834 * 1. Leader initiates the capture snapshot by calling createSnapshot on the RaftActor.
835 * 2. On receipt of the CaptureSnapshotReply message, the RaftActor persists the snapshot and makes a call to
836 * the Leader's handleMessage with a SendInstallSnapshot message.
837 * 3. The Leader obtains and stores the Snapshot from the SendInstallSnapshot message and sends it in chunks to
838 * the Follower via InstallSnapshot messages.
839 * 4. For each chunk, the Follower sends back an InstallSnapshotReply.
840 * 5. On receipt of the InstallSnapshotReply for the last chunk, the Leader marks the install complete for that
842 * 6. If another follower requires a snapshot and a snapshot has been collected (via SendInstallSnapshot)
843 * then send the existing snapshot in chunks to the follower.
845 * @param followerId the id of the follower.
846 * @return true if capture was initiated, false otherwise.
848 public boolean initiateCaptureSnapshot(final String followerId) {
849 FollowerLogInformation followerLogInfo = followerToLog.get(followerId);
850 if (snapshotHolder.isPresent()) {
851 // If a snapshot is present in the memory, most likely another install is in progress no need to capture
852 // snapshot. This could happen if another follower needs an install when one is going on.
853 final ActorSelection followerActor = context.getPeerActorSelection(followerId);
855 // Note: sendSnapshotChunk will set the LeaderInstallSnapshotState.
856 sendSnapshotChunk(followerActor, followerLogInfo);
860 boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
861 this.getReplicatedToAllIndex(), followerId);
862 if (captureInitiated) {
863 followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState(
864 context.getConfigParams().getSnapshotChunkSize(), logName()));
867 return captureInitiated;
870 private boolean canInstallSnapshot(final long nextIndex) {
871 // If the follower's nextIndex is -1 then we might as well send it a snapshot
872 // Otherwise send it a snapshot only if the nextIndex is not present in the log but is present
874 return nextIndex == -1 || !context.getReplicatedLog().isPresent(nextIndex)
875 && context.getReplicatedLog().isInSnapshot(nextIndex);
880 private void sendInstallSnapshot() {
881 log.debug("{}: sendInstallSnapshot", logName());
882 for (Entry<String, FollowerLogInformation> e : followerToLog.entrySet()) {
883 String followerId = e.getKey();
884 ActorSelection followerActor = context.getPeerActorSelection(followerId);
885 FollowerLogInformation followerLogInfo = e.getValue();
887 if (followerActor != null) {
888 long nextIndex = followerLogInfo.getNextIndex();
889 if (followerLogInfo.getInstallSnapshotState() != null
890 || context.getPeerInfo(followerId).getVotingState() == VotingState.VOTING_NOT_INITIALIZED
891 || canInstallSnapshot(nextIndex)) {
892 sendSnapshotChunk(followerActor, followerLogInfo);
899 * Sends a snapshot chunk to a given follower
900 * InstallSnapshot should qualify as a heartbeat too.
902 private void sendSnapshotChunk(final ActorSelection followerActor, final FollowerLogInformation followerLogInfo) {
903 if (snapshotHolder.isPresent()) {
904 LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
905 if (installSnapshotState == null) {
906 installSnapshotState = new LeaderInstallSnapshotState(context.getConfigParams().getSnapshotChunkSize(),
908 followerLogInfo.setLeaderInstallSnapshotState(installSnapshotState);
912 // Ensure the snapshot bytes are set - this is a no-op.
913 installSnapshotState.setSnapshotBytes(snapshotHolder.get().getSnapshotBytes());
915 if (!installSnapshotState.canSendNextChunk()) {
919 byte[] nextSnapshotChunk = installSnapshotState.getNextChunk();
921 log.debug("{}: next snapshot chunk size for follower {}: {}", logName(), followerLogInfo.getId(),
922 nextSnapshotChunk.length);
924 int nextChunkIndex = installSnapshotState.incrementChunkIndex();
925 Optional<ServerConfigurationPayload> serverConfig = Optional.absent();
926 if (installSnapshotState.isLastChunk(nextChunkIndex)) {
927 serverConfig = Optional.fromNullable(context.getPeerServerInfo(true));
931 new InstallSnapshot(currentTerm(), context.getId(),
932 snapshotHolder.get().getLastIncludedIndex(),
933 snapshotHolder.get().getLastIncludedTerm(),
936 installSnapshotState.getTotalChunks(),
937 Optional.of(installSnapshotState.getLastChunkHashCode()),
939 ).toSerializable(followerLogInfo.getRaftVersion()),
943 } catch (IOException e) {
944 throw new RuntimeException(e);
947 log.debug("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}", logName(), followerActor.path(),
948 installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks());
952 private void sendHeartBeat() {
953 if (!followerToLog.isEmpty()) {
954 log.trace("{}: Sending heartbeat", logName());
955 sendAppendEntries(context.getConfigParams().getHeartBeatInterval().toMillis(), true);
957 appendEntriesMessageSlicer.checkExpiredSlicedMessageState();
961 private void stopHeartBeat() {
962 if (heartbeatSchedule != null && !heartbeatSchedule.isCancelled()) {
963 heartbeatSchedule.cancel();
967 private void scheduleHeartBeat(final FiniteDuration interval) {
968 if (followerToLog.isEmpty()) {
969 // Optimization - do not bother scheduling a heartbeat as there are
976 // Schedule a heartbeat. When the scheduler triggers a SendHeartbeat
977 // message is sent to itself.
978 // Scheduling the heartbeat only once here because heartbeats do not
979 // need to be sent if there are other messages being sent to the remote
981 heartbeatSchedule = context.getActorSystem().scheduler().scheduleOnce(
982 interval, context.getActor(), SendHeartBeat.INSTANCE,
983 context.getActorSystem().dispatcher(), context.getActor());
987 public void close() {
989 appendEntriesMessageSlicer.close();
993 public final String getLeaderId() {
994 return context.getId();
998 public final short getLeaderPayloadVersion() {
999 return context.getPayloadVersion();
1002 protected boolean isLeaderIsolated() {
1003 int minPresent = getMinIsolatedLeaderPeerCount();
1004 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
1005 final PeerInfo peerInfo = context.getPeerInfo(followerLogInformation.getId());
1006 if (peerInfo != null && peerInfo.isVoting() && followerLogInformation.isFollowerActive()) {
1008 if (minPresent == 0) {
1013 return minPresent != 0;
1016 // called from example-actor for printing the follower-states
1017 public String printFollowerStates() {
1018 final StringBuilder sb = new StringBuilder();
1021 for (FollowerLogInformation followerLogInformation : followerToLog.values()) {
1023 sb.append(followerLogInformation.getId());
1024 sb.append(" state:");
1025 sb.append(followerLogInformation.isFollowerActive());
1030 return sb.toString();
1034 public FollowerLogInformation getFollower(final String followerId) {
1035 return followerToLog.get(followerId);
1039 public int followerLogSize() {
1040 return followerToLog.size();
1043 static class SnapshotHolder {
1044 private final long lastIncludedTerm;
1045 private final long lastIncludedIndex;
1046 private final ByteSource snapshotBytes;
1048 SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) {
1049 this.lastIncludedTerm = snapshot.getLastAppliedTerm();
1050 this.lastIncludedIndex = snapshot.getLastAppliedIndex();
1051 this.snapshotBytes = snapshotBytes;
1054 long getLastIncludedTerm() {
1055 return lastIncludedTerm;
1058 long getLastIncludedIndex() {
1059 return lastIncludedIndex;
1062 ByteSource getSnapshotBytes() {
1063 return snapshotBytes;