2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Address;
14 import akka.cluster.Cluster;
15 import akka.cluster.ClusterEvent.CurrentClusterState;
16 import akka.cluster.Member;
17 import akka.cluster.MemberStatus;
18 import akka.japi.Procedure;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Stopwatch;
21 import java.io.IOException;
22 import java.util.ArrayList;
23 import java.util.List;
24 import java.util.Optional;
26 import java.util.concurrent.TimeUnit;
27 import java.util.concurrent.atomic.AtomicBoolean;
28 import javax.annotation.Nullable;
29 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
30 import org.opendaylight.controller.cluster.raft.RaftActorContext;
31 import org.opendaylight.controller.cluster.raft.RaftState;
32 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
33 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
34 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
35 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
36 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
37 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
38 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
39 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
40 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
41 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
42 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
43 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
44 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
47 * The behavior of a RaftActor in the Follower raft state.
49 * <li> Respond to RPCs from candidates and leaders
50 * <li> If election timeout elapses without receiving AppendEntries
51 * RPC from current leader or granting vote to candidate:
52 * convert to candidate
55 public class Follower extends AbstractRaftActorBehavior {
56 private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
58 private final SyncStatusTracker initialSyncStatusTracker;
60 private final MessageAssembler appendEntriesMessageAssembler;
62 private final Stopwatch lastLeaderMessageTimer = Stopwatch.createStarted();
63 private SnapshotTracker snapshotTracker = null;
64 private String leaderId;
65 private short leaderPayloadVersion;
67 public Follower(final RaftActorContext context) {
68 this(context, null, (short)-1);
71 public Follower(final RaftActorContext context, final String initialLeaderId,
72 final short initialLeaderPayloadVersion) {
73 super(context, RaftState.Follower);
74 this.leaderId = initialLeaderId;
75 this.leaderPayloadVersion = initialLeaderPayloadVersion;
77 initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
78 .getSyncIndexThreshold());
80 appendEntriesMessageAssembler = MessageAssembler.builder().logContext(logName())
81 .fileBackedStreamFactory(context.getFileBackedOutputStreamFactory())
82 .assembledMessageCallback((message, sender) -> handleMessage(sender, message)).build();
84 if (context.getPeerIds().isEmpty() && getLeaderId() == null) {
85 actor().tell(TimeoutNow.INSTANCE, actor());
87 scheduleElection(electionDuration());
92 public final String getLeaderId() {
97 protected final void setLeaderId(@Nullable final String leaderId) {
98 this.leaderId = leaderId;
102 public short getLeaderPayloadVersion() {
103 return leaderPayloadVersion;
107 protected final void setLeaderPayloadVersion(final short leaderPayloadVersion) {
108 this.leaderPayloadVersion = leaderPayloadVersion;
111 private void restartLastLeaderMessageTimer() {
112 if (lastLeaderMessageTimer.isRunning()) {
113 lastLeaderMessageTimer.reset();
116 lastLeaderMessageTimer.start();
119 private boolean isLogEntryPresent(final long index) {
120 if (context.getReplicatedLog().isInSnapshot(index)) {
124 ReplicatedLogEntry entry = context.getReplicatedLog().get(index);
125 return entry != null;
129 private void updateInitialSyncStatus(final long currentLeaderCommit, final String newLeaderId) {
130 initialSyncStatusTracker.update(newLeaderId, currentLeaderCommit, context.getCommitIndex());
134 protected RaftActorBehavior handleAppendEntries(final ActorRef sender, final AppendEntries appendEntries) {
135 int numLogEntries = appendEntries.getEntries().size();
136 if (log.isTraceEnabled()) {
137 log.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
138 } else if (log.isDebugEnabled() && numLogEntries > 0) {
139 log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
142 if (snapshotTracker != null && !snapshotTracker.getLeaderId().equals(appendEntries.getLeaderId())) {
143 log.debug("{}: snapshot install is in progress but the prior snapshot leaderId {} does not match the "
144 + "AppendEntries leaderId {}", logName(), snapshotTracker.getLeaderId(), appendEntries.getLeaderId());
145 closeSnapshotTracker();
148 if (snapshotTracker != null || context.getSnapshotManager().isApplying()) {
149 // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
150 AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
151 lastIndex(), lastTerm(), context.getPayloadVersion());
153 log.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
154 sender.tell(reply, actor());
159 // If we got here then we do appear to be talking to the leader
160 leaderId = appendEntries.getLeaderId();
161 leaderPayloadVersion = appendEntries.getPayloadVersion();
163 // First check if the logs are in sync or not
164 if (isOutOfSync(appendEntries, sender)) {
165 updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
169 if (!processNewEntries(appendEntries, sender)) {
170 updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
174 long lastIndex = lastIndex();
175 long prevCommitIndex = context.getCommitIndex();
177 // If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)
178 if (appendEntries.getLeaderCommit() > prevCommitIndex) {
179 context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
182 if (prevCommitIndex != context.getCommitIndex()) {
183 log.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
186 AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
187 lastIndex, lastTerm(), context.getPayloadVersion());
189 if (log.isTraceEnabled()) {
190 log.trace("{}: handleAppendEntries returning : {}", logName(), reply);
191 } else if (log.isDebugEnabled() && numLogEntries > 0) {
192 log.debug("{}: handleAppendEntries returning : {}", logName(), reply);
195 // Reply to the leader before applying any previous state so as not to hold up leader consensus.
196 sender.tell(reply, actor());
198 updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
200 // If leaderCommit > lastApplied, increment lastApplied and apply log[lastApplied] to state machine (§5.3).
201 // lastApplied can be equal to lastIndex.
202 if (appendEntries.getLeaderCommit() > context.getLastApplied() && context.getLastApplied() < lastIndex) {
203 if (log.isDebugEnabled()) {
204 log.debug("{}: applyLogToStateMachine, appendEntries.getLeaderCommit(): {}, "
205 + "context.getLastApplied(): {}, lastIndex(): {}", logName(),
206 appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
209 applyLogToStateMachine(appendEntries.getLeaderCommit());
212 if (!context.getSnapshotManager().isCapturing()) {
213 super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
216 appendEntriesMessageAssembler.checkExpiredAssembledMessageState();
221 private boolean processNewEntries(final AppendEntries appendEntries, final ActorRef sender) {
222 int numLogEntries = appendEntries.getEntries().size();
223 if (numLogEntries == 0) {
227 log.debug("{}: Number of entries to be appended = {}", logName(), numLogEntries);
229 long lastIndex = lastIndex();
230 int addEntriesFrom = 0;
232 // First check for conflicting entries. If an existing entry conflicts with a new one (same index but different
233 // term), delete the existing entry and all that follow it (§5.3)
234 if (context.getReplicatedLog().size() > 0) {
235 // Find the entry up until the one that is not in the follower's log
236 for (int i = 0;i < numLogEntries; i++, addEntriesFrom++) {
237 ReplicatedLogEntry matchEntry = appendEntries.getEntries().get(i);
239 if (!isLogEntryPresent(matchEntry.getIndex())) {
240 // newEntry not found in the log
244 long existingEntryTerm = getLogEntryTerm(matchEntry.getIndex());
246 log.debug("{}: matchEntry {} is present: existingEntryTerm: {}", logName(), matchEntry,
249 // existingEntryTerm == -1 means it's in the snapshot and not in the log. We don't know
250 // what the term was so we'll assume it matches.
251 if (existingEntryTerm == -1 || existingEntryTerm == matchEntry.getTerm()) {
255 if (!context.getRaftPolicy().applyModificationToStateBeforeConsensus()) {
256 log.info("{}: Removing entries from log starting at {}, commitIndex: {}, lastApplied: {}",
257 logName(), matchEntry.getIndex(), context.getCommitIndex(), context.getLastApplied());
259 // Entries do not match so remove all subsequent entries but only if the existing entries haven't
260 // been applied to the state yet.
261 if (matchEntry.getIndex() <= context.getLastApplied()
262 || !context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex())) {
263 // Could not remove the entries - this means the matchEntry index must be in the
264 // snapshot and not the log. In this case the prior entries are part of the state
265 // so we must send back a reply to force a snapshot to completely re-sync the
266 // follower's log and state.
268 log.info("{}: Could not remove entries - sending reply to force snapshot", logName());
269 sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
270 lastTerm(), context.getPayloadVersion(), true), actor());
276 sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
277 lastTerm(), context.getPayloadVersion(), true), actor());
283 lastIndex = lastIndex();
284 log.debug("{}: After cleanup, lastIndex: {}, entries to be added from: {}", logName(), lastIndex,
287 // When persistence successfully completes for each new log entry appended, we need to determine if we
288 // should capture a snapshot to compact the persisted log. shouldCaptureSnapshot tracks whether or not
289 // one of the log entries has exceeded the log size threshold whereby a snapshot should be taken. However
290 // we don't initiate the snapshot at that log entry but rather after the last log entry has been persisted.
291 // This is done because subsequent log entries after the one that tripped the threshold may have been
292 // applied to the state already, as the persistence callback occurs async, and we want those entries
293 // purged from the persisted log as well.
294 final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
295 final Procedure<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
296 final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
297 final ReplicatedLogEntry lastEntryToAppend = entries.get(entries.size() - 1);
298 if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
299 context.getSnapshotManager().capture(context.getReplicatedLog().last(), getReplicatedToAllIndex());
303 // Append any new entries not already in the log
304 for (int i = addEntriesFrom; i < numLogEntries; i++) {
305 ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
307 log.debug("{}: Append entry to log {}", logName(), entry.getData());
309 context.getReplicatedLog().appendAndPersist(entry, appendAndPersistCallback, false);
311 shouldCaptureSnapshot.compareAndSet(false,
312 context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
314 if (entry.getData() instanceof ServerConfigurationPayload) {
315 context.updatePeerIds((ServerConfigurationPayload)entry.getData());
319 log.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
324 private boolean isOutOfSync(final AppendEntries appendEntries, final ActorRef sender) {
326 final long lastIndex = lastIndex();
327 if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
329 // The follower's log is out of sync because the leader does have an entry at prevLogIndex and this
330 // follower has no entries in it's log.
332 log.info("{}: The followers log is empty and the senders prevLogIndex is {}", logName(),
333 appendEntries.getPrevLogIndex());
335 sendOutOfSyncAppendEntriesReply(sender, false);
339 if (lastIndex > -1) {
340 if (isLogEntryPresent(appendEntries.getPrevLogIndex())) {
341 final long prevLogTerm = getLogEntryTerm(appendEntries.getPrevLogIndex());
342 if (prevLogTerm != appendEntries.getPrevLogTerm()) {
344 // The follower's log is out of sync because the Leader's prevLogIndex entry does exist
345 // in the follower's log but it has a different term in it
347 log.info("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append "
348 + "entries prevLogTerm {} - lastIndex: {}, snapshotIndex: {}", logName(),
349 appendEntries.getPrevLogIndex(), prevLogTerm, appendEntries.getPrevLogTerm(), lastIndex,
350 context.getReplicatedLog().getSnapshotIndex());
352 sendOutOfSyncAppendEntriesReply(sender, false);
355 } else if (appendEntries.getPrevLogIndex() != -1) {
357 // The follower's log is out of sync because the Leader's prevLogIndex entry was not found in it's log
359 log.info("{}: The log is not empty but the prevLogIndex {} was not found in it - lastIndex: {}, "
360 + "snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
361 context.getReplicatedLog().getSnapshotIndex());
363 sendOutOfSyncAppendEntriesReply(sender, false);
368 if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
369 && appendEntries.getReplicatedToAllIndex() != -1) {
370 if (!isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
371 // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
372 // the previous entry in it's in-memory journal
374 log.info("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the "
375 + "in-memory journal", logName(), appendEntries.getReplicatedToAllIndex());
377 sendOutOfSyncAppendEntriesReply(sender, false);
381 final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
382 if (entries.size() > 0 && !isLogEntryPresent(entries.get(0).getIndex() - 1)) {
383 log.info("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
384 + "in-memory journal", logName(), entries.get(0).getIndex() - 1);
386 sendOutOfSyncAppendEntriesReply(sender, false);
394 private void sendOutOfSyncAppendEntriesReply(final ActorRef sender, boolean forceInstallSnapshot) {
395 // We found that the log was out of sync so just send a negative reply.
396 final AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex(),
397 lastTerm(), context.getPayloadVersion(), forceInstallSnapshot);
399 log.info("{}: Follower is out-of-sync so sending negative reply: {}", logName(), reply);
400 sender.tell(reply, actor());
404 protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
405 final AppendEntriesReply appendEntriesReply) {
410 protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
411 final RequestVoteReply requestVoteReply) {
416 public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
417 if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
418 return handleElectionTimeout(message);
421 if (appendEntriesMessageAssembler.handleMessage(message, actor())) {
425 if (!(message instanceof RaftRPC)) {
426 // The rest of the processing requires the message to be a RaftRPC
430 final RaftRPC rpc = (RaftRPC) message;
431 // If RPC request or response contains term T > currentTerm:
432 // set currentTerm = T, convert to follower (§5.1)
433 // This applies to all RPC messages and responses
434 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
435 log.info("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
436 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
438 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
441 if (rpc instanceof InstallSnapshot) {
442 handleInstallSnapshot(sender, (InstallSnapshot) rpc);
443 restartLastLeaderMessageTimer();
444 scheduleElection(electionDuration());
448 if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
449 restartLastLeaderMessageTimer();
450 scheduleElection(electionDuration());
453 return super.handleMessage(sender, rpc);
456 private RaftActorBehavior handleElectionTimeout(final Object message) {
457 // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
458 // during the election timeout interval. It may that the election timer expired b/c this actor
459 // was busy and messages got delayed, in which case leader messages would be backed up in the
460 // queue but would be processed before the ElectionTimeout message and thus would restart the
461 // lastLeaderMessageTimer.
462 long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
463 long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
464 boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
465 || lastLeaderMessageInterval >= electionTimeoutInMillis;
467 if (canStartElection()) {
468 if (message instanceof TimeoutNow) {
469 log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
470 return internalSwitchBehavior(RaftState.Candidate);
471 } else if (noLeaderMessageReceived) {
472 // Check the cluster state to see if the leader is known to be up before we go to Candidate.
473 // However if we haven't heard from the leader in a long time even though the cluster state
474 // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
476 long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
477 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
478 log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
479 scheduleElection(electionDuration());
481 log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
482 return internalSwitchBehavior(RaftState.Candidate);
485 log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
486 logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
487 scheduleElection(electionDuration());
489 } else if (message instanceof ElectionTimeout) {
490 if (noLeaderMessageReceived) {
494 scheduleElection(electionDuration());
500 private boolean isLeaderAvailabilityKnown() {
501 if (leaderId == null) {
505 Optional<Cluster> cluster = context.getCluster();
506 if (!cluster.isPresent()) {
510 ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
511 if (leaderActor == null) {
515 Address leaderAddress = leaderActor.anchorPath().address();
517 CurrentClusterState state = cluster.get().state();
518 Set<Member> unreachable = state.getUnreachable();
520 log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
523 for (Member m: unreachable) {
524 if (leaderAddress.equals(m.address())) {
525 log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
530 for (Member m: state.getMembers()) {
531 if (leaderAddress.equals(m.address())) {
532 if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
533 log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
534 leaderAddress, m.status());
537 log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
538 leaderAddress, m.status());
544 log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
549 private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
551 log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
553 leaderId = installSnapshot.getLeaderId();
555 if (snapshotTracker == null) {
556 snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId(),
560 updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
563 final InstallSnapshotReply reply = new InstallSnapshotReply(
564 currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
566 if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
567 installSnapshot.getLastChunkHashCode())) {
569 log.info("{}: Snapshot installed from leader: {}", logName(), installSnapshot.getLeaderId());
571 Snapshot snapshot = Snapshot.create(
572 context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
574 installSnapshot.getLastIncludedIndex(),
575 installSnapshot.getLastIncludedTerm(),
576 installSnapshot.getLastIncludedIndex(),
577 installSnapshot.getLastIncludedTerm(),
578 context.getTermInformation().getCurrentTerm(),
579 context.getTermInformation().getVotedFor(),
580 installSnapshot.getServerConfig().orNull());
582 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
584 public void onSuccess() {
585 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
587 sender.tell(reply, actor());
591 public void onFailure() {
592 sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
596 actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
598 closeSnapshotTracker();
600 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
602 sender.tell(reply, actor());
604 } catch (IOException e) {
605 log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
607 sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
608 -1, false), actor());
610 closeSnapshotTracker();
614 private void closeSnapshotTracker() {
615 if (snapshotTracker != null) {
616 snapshotTracker.close();
617 snapshotTracker = null;
622 public void close() {
623 closeSnapshotTracker();
625 appendEntriesMessageAssembler.close();
629 SnapshotTracker getSnapshotTracker() {
630 return snapshotTracker;