2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.raft.behaviors;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Address;
14 import akka.cluster.Cluster;
15 import akka.cluster.ClusterEvent.CurrentClusterState;
16 import akka.cluster.Member;
17 import akka.cluster.MemberStatus;
18 import akka.japi.Procedure;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Stopwatch;
21 import java.util.ArrayList;
22 import java.util.Optional;
24 import java.util.concurrent.TimeUnit;
25 import java.util.concurrent.atomic.AtomicBoolean;
26 import javax.annotation.Nullable;
27 import org.opendaylight.controller.cluster.raft.RaftActorContext;
28 import org.opendaylight.controller.cluster.raft.RaftState;
29 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
30 import org.opendaylight.controller.cluster.raft.Snapshot;
31 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
32 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
33 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
34 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
35 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
36 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
37 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
38 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
39 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
40 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
41 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
44 * The behavior of a RaftActor in the Follower raft state.
46 * <li> Respond to RPCs from candidates and leaders
47 * <li> If election timeout elapses without receiving AppendEntries
48 * RPC from current leader or granting vote to candidate:
49 * convert to candidate
52 public class Follower extends AbstractRaftActorBehavior {
53 private static final int SYNC_THRESHOLD = 10;
55 private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
57 private final SyncStatusTracker initialSyncStatusTracker;
59 private final Stopwatch lastLeaderMessageTimer = Stopwatch.createStarted();
60 private SnapshotTracker snapshotTracker = null;
61 private String leaderId;
62 private short leaderPayloadVersion;
64 public Follower(RaftActorContext context) {
65 this(context, null, (short)-1);
68 public Follower(RaftActorContext context, String initialLeaderId, short initialLeaderPayloadVersion) {
69 super(context, RaftState.Follower);
70 this.leaderId = initialLeaderId;
71 this.leaderPayloadVersion = initialLeaderPayloadVersion;
73 initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), SYNC_THRESHOLD);
75 if (context.getPeerIds().isEmpty() && getLeaderId() == null) {
76 actor().tell(TimeoutNow.INSTANCE, actor());
78 scheduleElection(electionDuration());
83 public final String getLeaderId() {
88 protected final void setLeaderId(@Nullable final String leaderId) {
89 this.leaderId = leaderId;
93 public short getLeaderPayloadVersion() {
94 return leaderPayloadVersion;
98 protected final void setLeaderPayloadVersion(short leaderPayloadVersion) {
99 this.leaderPayloadVersion = leaderPayloadVersion;
102 private void restartLastLeaderMessageTimer() {
103 if (lastLeaderMessageTimer.isRunning()) {
104 lastLeaderMessageTimer.reset();
107 lastLeaderMessageTimer.start();
110 private boolean isLogEntryPresent(long index) {
111 if (context.getReplicatedLog().isInSnapshot(index)) {
115 ReplicatedLogEntry entry = context.getReplicatedLog().get(index);
116 return entry != null;
120 private void updateInitialSyncStatus(long currentLeaderCommit, String newLeaderId) {
121 initialSyncStatusTracker.update(newLeaderId, currentLeaderCommit, context.getCommitIndex());
125 protected RaftActorBehavior handleAppendEntries(ActorRef sender, AppendEntries appendEntries) {
127 int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
128 if (log.isTraceEnabled()) {
129 log.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
130 } else if (log.isDebugEnabled() && numLogEntries > 0) {
131 log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
134 // TODO : Refactor this method into a bunch of smaller methods
135 // to make it easier to read. Before refactoring ensure tests
136 // cover the code properly
138 if (snapshotTracker != null && !snapshotTracker.getLeaderId().equals(appendEntries.getLeaderId())) {
139 log.debug("{}: snapshot install is in progress but the prior snapshot leaderId {} does not match the "
140 + "AppendEntries leaderId {}", logName(), snapshotTracker.getLeaderId(), appendEntries.getLeaderId());
141 snapshotTracker = null;
144 if (snapshotTracker != null || context.getSnapshotManager().isApplying()) {
145 // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
146 AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
147 lastIndex(), lastTerm(), context.getPayloadVersion());
149 log.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
150 sender.tell(reply, actor());
155 // If we got here then we do appear to be talking to the leader
156 leaderId = appendEntries.getLeaderId();
157 leaderPayloadVersion = appendEntries.getPayloadVersion();
159 updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
160 // First check if the logs are in sync or not
161 long lastIndex = lastIndex();
163 if (isOutOfSync(appendEntries)) {
164 // We found that the log was out of sync so just send a negative
167 log.debug("{}: Follower is out-of-sync, so sending negative reply, lastIndex: {}, lastTerm: {}",
168 logName(), lastIndex, lastTerm());
170 sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
171 lastTerm(), context.getPayloadVersion()), actor());
175 if (appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
177 log.debug("{}: Number of entries to be appended = {}", logName(),
178 appendEntries.getEntries().size());
180 // 3. If an existing entry conflicts with a new one (same index
181 // but different terms), delete the existing entry and all that
183 int addEntriesFrom = 0;
184 if (context.getReplicatedLog().size() > 0) {
186 // Find the entry up until the one that is not in the follower's log
187 for (int i = 0;i < appendEntries.getEntries().size(); i++, addEntriesFrom++) {
188 ReplicatedLogEntry matchEntry = appendEntries.getEntries().get(i);
190 if (!isLogEntryPresent(matchEntry.getIndex())) {
191 // newEntry not found in the log
195 long existingEntryTerm = getLogEntryTerm(matchEntry.getIndex());
197 log.debug("{}: matchEntry {} is present: existingEntryTerm: {}", logName(), matchEntry,
200 // existingEntryTerm == -1 means it's in the snapshot and not in the log. We don't know
201 // what the term was so we'll assume it matches.
202 if (existingEntryTerm == -1 || existingEntryTerm == matchEntry.getTerm()) {
206 if (!context.getRaftPolicy().applyModificationToStateBeforeConsensus()) {
208 log.debug("{}: Removing entries from log starting at {}", logName(),
209 matchEntry.getIndex());
211 // Entries do not match so remove all subsequent entries
212 if (!context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex())) {
213 // Could not remove the entries - this means the matchEntry index must be in the
214 // snapshot and not the log. In this case the prior entries are part of the state
215 // so we must send back a reply to force a snapshot to completely re-sync the
216 // follower's log and state.
218 log.debug("{}: Could not remove entries - sending reply to force snapshot", logName());
219 sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
220 lastTerm(), context.getPayloadVersion(), true), actor());
226 sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
227 lastTerm(), context.getPayloadVersion(), true), actor());
233 lastIndex = lastIndex();
234 log.debug("{}: After cleanup, lastIndex: {}, entries to be added from: {}", logName(),
235 lastIndex, addEntriesFrom);
237 // When persistence successfully completes for each new log entry appended, we need to determine if we
238 // should capture a snapshot to compact the persisted log. shouldCaptureSnapshot tracks whether or not
239 // one of the log entries has exceeded the log size threshold whereby a snapshot should be taken. However
240 // we don't initiate the snapshot at that log entry but rather after the last log entry has been persisted.
241 // This is done because subsequent log entries after the one that tripped the threshold may have been
242 // applied to the state already, as the persistence callback occurs async, and we want those entries
243 // purged from the persisted log as well.
244 final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
245 final Procedure<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
246 final ReplicatedLogEntry lastEntryToAppend = appendEntries.getEntries().get(
247 appendEntries.getEntries().size() - 1);
248 if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
249 context.getSnapshotManager().capture(context.getReplicatedLog().last(), getReplicatedToAllIndex());
253 // 4. Append any new entries not already in the log
254 for (int i = addEntriesFrom; i < appendEntries.getEntries().size(); i++) {
255 ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
257 log.debug("{}: Append entry to log {}", logName(), entry.getData());
259 context.getReplicatedLog().appendAndPersist(entry, appendAndPersistCallback, false);
261 shouldCaptureSnapshot.compareAndSet(false,
262 context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
264 if (entry.getData() instanceof ServerConfigurationPayload) {
265 context.updatePeerIds((ServerConfigurationPayload)entry.getData());
269 log.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
272 // 5. If leaderCommit > commitIndex, set commitIndex =
273 // min(leaderCommit, index of last new entry)
275 lastIndex = lastIndex();
276 long prevCommitIndex = context.getCommitIndex();
278 if (appendEntries.getLeaderCommit() > prevCommitIndex) {
279 context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
282 if (prevCommitIndex != context.getCommitIndex()) {
283 log.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
286 // If commitIndex > lastApplied: increment lastApplied, apply
287 // log[lastApplied] to state machine (§5.3)
288 // check if there are any entries to be applied. last-applied can be equal to last-index
289 if (appendEntries.getLeaderCommit() > context.getLastApplied()
290 && context.getLastApplied() < lastIndex) {
291 if (log.isDebugEnabled()) {
292 log.debug("{}: applyLogToStateMachine, appendEntries.getLeaderCommit(): {},"
293 + "context.getLastApplied(): {}, lastIndex(): {}", logName(),
294 appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
297 applyLogToStateMachine(appendEntries.getLeaderCommit());
300 AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
301 lastIndex, lastTerm(), context.getPayloadVersion());
303 if (log.isTraceEnabled()) {
304 log.trace("{}: handleAppendEntries returning : {}", logName(), reply);
305 } else if (log.isDebugEnabled() && numLogEntries > 0) {
306 log.debug("{}: handleAppendEntries returning : {}", logName(), reply);
309 sender.tell(reply, actor());
311 if (!context.getSnapshotManager().isCapturing()) {
312 super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
318 private boolean isOutOfSync(AppendEntries appendEntries) {
320 long prevLogTerm = getLogEntryTerm(appendEntries.getPrevLogIndex());
321 boolean prevEntryPresent = isLogEntryPresent(appendEntries.getPrevLogIndex());
322 long lastIndex = lastIndex();
323 int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
324 boolean outOfSync = true;
326 if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
328 // The follower's log is out of sync because the leader does have
329 // an entry at prevLogIndex and this follower has no entries in
332 log.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
333 logName(), appendEntries.getPrevLogIndex());
334 } else if (lastIndex > -1 && appendEntries.getPrevLogIndex() != -1 && !prevEntryPresent) {
336 // The follower's log is out of sync because the Leader's
337 // prevLogIndex entry was not found in it's log
339 log.debug("{}: The log is not empty but the prevLogIndex {} was not found in it - "
340 + "lastIndex: {}, snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
341 context.getReplicatedLog().getSnapshotIndex());
342 } else if (lastIndex > -1 && prevEntryPresent && prevLogTerm != appendEntries.getPrevLogTerm()) {
344 // The follower's log is out of sync because the Leader's
345 // prevLogIndex entry does exist in the follower's log but it has
346 // a different term in it
348 log.debug("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append entries"
349 + "prevLogTerm {} - lastIndex: {}, snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(),
350 prevLogTerm, appendEntries.getPrevLogTerm(), lastIndex,
351 context.getReplicatedLog().getSnapshotIndex());
352 } else if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
353 && appendEntries.getReplicatedToAllIndex() != -1
354 && !isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
355 // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
356 // the previous entry in it's in-memory journal
358 log.debug("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the"
359 + " in-memory journal", logName(), appendEntries.getReplicatedToAllIndex());
360 } else if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
361 && appendEntries.getReplicatedToAllIndex() != -1 && numLogEntries > 0
362 && !isLogEntryPresent(appendEntries.getEntries().get(0).getIndex() - 1)) {
363 log.debug("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
364 + " in-memory journal", logName(), appendEntries.getEntries().get(0).getIndex() - 1);
372 protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
373 AppendEntriesReply appendEntriesReply) {
378 protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
379 RequestVoteReply requestVoteReply) {
384 public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
385 if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
386 return handleElectionTimeout(message);
389 if (!(message instanceof RaftRPC)) {
390 // The rest of the processing requires the message to be a RaftRPC
394 final RaftRPC rpc = (RaftRPC) message;
395 // If RPC request or response contains term T > currentTerm:
396 // set currentTerm = T, convert to follower (§5.1)
397 // This applies to all RPC messages and responses
398 if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
399 log.debug("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
400 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
402 context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
405 if (rpc instanceof InstallSnapshot) {
406 handleInstallSnapshot(sender, (InstallSnapshot) rpc);
407 restartLastLeaderMessageTimer();
408 scheduleElection(electionDuration());
412 if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
413 restartLastLeaderMessageTimer();
414 scheduleElection(electionDuration());
417 return super.handleMessage(sender, rpc);
420 private RaftActorBehavior handleElectionTimeout(Object message) {
421 // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
422 // during the election timeout interval. It may that the election timer expired b/c this actor
423 // was busy and messages got delayed, in which case leader messages would be backed up in the
424 // queue but would be processed before the ElectionTimeout message and thus would restart the
425 // lastLeaderMessageTimer.
426 long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
427 long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
428 boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
429 || lastLeaderMessageInterval >= electionTimeoutInMillis;
431 if (canStartElection()) {
432 if (message instanceof TimeoutNow) {
433 log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
434 return internalSwitchBehavior(RaftState.Candidate);
435 } else if (noLeaderMessageReceived) {
436 // Check the cluster state to see if the leader is known to be up before we go to Candidate.
437 // However if we haven't heard from the leader in a long time even though the cluster state
438 // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
440 long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
441 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
442 log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
443 scheduleElection(electionDuration());
445 log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
446 return internalSwitchBehavior(RaftState.Candidate);
449 log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
450 logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
451 scheduleElection(electionDuration());
453 } else if (message instanceof ElectionTimeout) {
454 if (noLeaderMessageReceived) {
458 scheduleElection(electionDuration());
464 private boolean isLeaderAvailabilityKnown() {
465 if (leaderId == null) {
469 Optional<Cluster> cluster = context.getCluster();
470 if (!cluster.isPresent()) {
474 ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
475 if (leaderActor == null) {
479 Address leaderAddress = leaderActor.anchorPath().address();
481 CurrentClusterState state = cluster.get().state();
482 Set<Member> unreachable = state.getUnreachable();
484 log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
487 for (Member m: unreachable) {
488 if (leaderAddress.equals(m.address())) {
489 log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
494 for (Member m: state.getMembers()) {
495 if (leaderAddress.equals(m.address())) {
496 if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
497 log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
498 leaderAddress, m.status());
501 log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
502 leaderAddress, m.status());
508 log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
513 private void handleInstallSnapshot(final ActorRef sender, InstallSnapshot installSnapshot) {
515 log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
517 leaderId = installSnapshot.getLeaderId();
519 if (snapshotTracker == null) {
520 snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId());
523 updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
526 final InstallSnapshotReply reply = new InstallSnapshotReply(
527 currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
529 if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
530 installSnapshot.getLastChunkHashCode())) {
531 Snapshot snapshot = Snapshot.create(snapshotTracker.getSnapshot(),
533 installSnapshot.getLastIncludedIndex(),
534 installSnapshot.getLastIncludedTerm(),
535 installSnapshot.getLastIncludedIndex(),
536 installSnapshot.getLastIncludedTerm(),
537 context.getTermInformation().getCurrentTerm(),
538 context.getTermInformation().getVotedFor(),
539 installSnapshot.getServerConfig().orNull());
541 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
543 public void onSuccess() {
544 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
546 sender.tell(reply, actor());
550 public void onFailure() {
551 sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
555 actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
557 snapshotTracker = null;
559 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
561 sender.tell(reply, actor());
563 } catch (SnapshotTracker.InvalidChunkException e) {
564 log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
566 sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
567 -1, false), actor());
568 snapshotTracker = null;
574 public void close() {
579 SnapshotTracker getSnapshotTracker() {
580 return snapshotTracker;