b642ee43a563588f7a95c08e4b97495b94526186
[controller.git] / opendaylight / md-sal / sal-akka-raft / src / main / java / org / opendaylight / controller / cluster / raft / behaviors / Follower.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.raft.behaviors;
9
10 import akka.actor.ActorRef;
11 import akka.actor.ActorSelection;
12 import akka.actor.Address;
13 import akka.cluster.Cluster;
14 import akka.cluster.ClusterEvent.CurrentClusterState;
15 import akka.cluster.Member;
16 import akka.cluster.MemberStatus;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.Stopwatch;
19 import java.io.IOException;
20 import java.util.ArrayList;
21 import java.util.List;
22 import java.util.Optional;
23 import java.util.Set;
24 import java.util.concurrent.TimeUnit;
25 import java.util.concurrent.atomic.AtomicBoolean;
26 import java.util.function.Consumer;
27 import org.eclipse.jdt.annotation.Nullable;
28 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
29 import org.opendaylight.controller.cluster.raft.RaftActorContext;
30 import org.opendaylight.controller.cluster.raft.RaftState;
31 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
32 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
33 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
34 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
35 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
36 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
37 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
38 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
39 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
40 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
41 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
42 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
43 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
44 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
45
46 /**
47  * The behavior of a RaftActor in the Follower raft state.
48  * <ul>
49  * <li> Respond to RPCs from candidates and leaders
50  * <li> If election timeout elapses without receiving AppendEntries
51  * RPC from current leader or granting vote to candidate:
52  * convert to candidate
53  * </ul>
54  */
55 public class Follower extends AbstractRaftActorBehavior {
56     private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
57
58     private final SyncStatusTracker initialSyncStatusTracker;
59
60     private final MessageAssembler appendEntriesMessageAssembler;
61
62     private final Stopwatch lastLeaderMessageTimer = Stopwatch.createStarted();
63     private SnapshotTracker snapshotTracker = null;
64     private String leaderId;
65     private short leaderPayloadVersion;
66
67     public Follower(final RaftActorContext context) {
68         this(context, null, (short)-1);
69     }
70
71     public Follower(final RaftActorContext context, final String initialLeaderId,
72             final short initialLeaderPayloadVersion) {
73         super(context, RaftState.Follower);
74         this.leaderId = initialLeaderId;
75         this.leaderPayloadVersion = initialLeaderPayloadVersion;
76
77         initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
78             .getSyncIndexThreshold());
79
80         appendEntriesMessageAssembler = MessageAssembler.builder().logContext(logName())
81                 .fileBackedStreamFactory(context.getFileBackedOutputStreamFactory())
82                 .assembledMessageCallback((message, sender) -> handleMessage(sender, message)).build();
83
84         if (context.getPeerIds().isEmpty() && getLeaderId() == null) {
85             actor().tell(TimeoutNow.INSTANCE, actor());
86         } else {
87             scheduleElection(electionDuration());
88         }
89     }
90
91     @Override
92     public final String getLeaderId() {
93         return leaderId;
94     }
95
96     @VisibleForTesting
97     protected final void setLeaderId(final @Nullable String leaderId) {
98         this.leaderId = leaderId;
99     }
100
101     @Override
102     public short getLeaderPayloadVersion() {
103         return leaderPayloadVersion;
104     }
105
106     @VisibleForTesting
107     protected final void setLeaderPayloadVersion(final short leaderPayloadVersion) {
108         this.leaderPayloadVersion = leaderPayloadVersion;
109     }
110
111     private void restartLastLeaderMessageTimer() {
112         if (lastLeaderMessageTimer.isRunning()) {
113             lastLeaderMessageTimer.reset();
114         }
115
116         lastLeaderMessageTimer.start();
117     }
118
119     private boolean isLogEntryPresent(final long index) {
120         if (context.getReplicatedLog().isInSnapshot(index)) {
121             return true;
122         }
123
124         ReplicatedLogEntry entry = context.getReplicatedLog().get(index);
125         return entry != null;
126
127     }
128
129     private void updateInitialSyncStatus(final long currentLeaderCommit, final String newLeaderId) {
130         initialSyncStatusTracker.update(newLeaderId, currentLeaderCommit, context.getCommitIndex());
131     }
132
133     @Override
134     protected RaftActorBehavior handleAppendEntries(final ActorRef sender, final AppendEntries appendEntries) {
135         int numLogEntries = appendEntries.getEntries().size();
136         if (log.isTraceEnabled()) {
137             log.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
138         } else if (log.isDebugEnabled() && numLogEntries > 0) {
139             log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
140         }
141
142         if (snapshotTracker != null && !snapshotTracker.getLeaderId().equals(appendEntries.getLeaderId())) {
143             log.debug("{}: snapshot install is in progress but the prior snapshot leaderId {} does not match the "
144                 + "AppendEntries leaderId {}", logName(), snapshotTracker.getLeaderId(), appendEntries.getLeaderId());
145             closeSnapshotTracker();
146         }
147
148         if (snapshotTracker != null || context.getSnapshotManager().isApplying()) {
149             // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
150             AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
151                     lastIndex(), lastTerm(), context.getPayloadVersion(), false, needsLeaderAddress(),
152                     appendEntries.getLeaderRaftVersion());
153
154             log.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
155             sender.tell(reply, actor());
156
157             return this;
158         }
159
160         // If we got here then we do appear to be talking to the leader
161         leaderId = appendEntries.getLeaderId();
162         leaderPayloadVersion = appendEntries.getPayloadVersion();
163
164         if (appendEntries.getLeaderAddress().isPresent()) {
165             final String address = appendEntries.getLeaderAddress().get();
166             log.debug("New leader address: {}", address);
167
168             context.setPeerAddress(leaderId, address);
169             context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, address);
170         }
171
172         // First check if the logs are in sync or not
173         if (isOutOfSync(appendEntries, sender)) {
174             updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
175             return this;
176         }
177
178         if (!processNewEntries(appendEntries, sender)) {
179             updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
180             return this;
181         }
182
183         long lastIndex = lastIndex();
184         long prevCommitIndex = context.getCommitIndex();
185
186         // If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry)
187         if (appendEntries.getLeaderCommit() > prevCommitIndex) {
188             context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
189         }
190
191         if (prevCommitIndex != context.getCommitIndex()) {
192             log.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
193         }
194
195         AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
196                 lastIndex, lastTerm(), context.getPayloadVersion(), false, needsLeaderAddress(),
197                 appendEntries.getLeaderRaftVersion());
198
199         if (log.isTraceEnabled()) {
200             log.trace("{}: handleAppendEntries returning : {}", logName(), reply);
201         } else if (log.isDebugEnabled() && numLogEntries > 0) {
202             log.debug("{}: handleAppendEntries returning : {}", logName(), reply);
203         }
204
205         // Reply to the leader before applying any previous state so as not to hold up leader consensus.
206         sender.tell(reply, actor());
207
208         updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
209
210         // If leaderCommit > lastApplied, increment lastApplied and apply log[lastApplied] to state machine (§5.3).
211         // lastApplied can be equal to lastIndex.
212         if (appendEntries.getLeaderCommit() > context.getLastApplied() && context.getLastApplied() < lastIndex) {
213             if (log.isDebugEnabled()) {
214                 log.debug("{}: applyLogToStateMachine, appendEntries.getLeaderCommit(): {}, "
215                         + "context.getLastApplied(): {}, lastIndex(): {}", logName(),
216                     appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
217             }
218
219             applyLogToStateMachine(appendEntries.getLeaderCommit());
220         }
221
222         if (!context.getSnapshotManager().isCapturing()) {
223             super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
224         }
225
226         appendEntriesMessageAssembler.checkExpiredAssembledMessageState();
227
228         return this;
229     }
230
231     private boolean processNewEntries(final AppendEntries appendEntries, final ActorRef sender) {
232         int numLogEntries = appendEntries.getEntries().size();
233         if (numLogEntries == 0) {
234             return true;
235         }
236
237         log.debug("{}: Number of entries to be appended = {}", logName(), numLogEntries);
238
239         long lastIndex = lastIndex();
240         int addEntriesFrom = 0;
241
242         // First check for conflicting entries. If an existing entry conflicts with a new one (same index but different
243         // term), delete the existing entry and all that follow it (§5.3)
244         if (context.getReplicatedLog().size() > 0) {
245             // Find the entry up until the one that is not in the follower's log
246             for (int i = 0;i < numLogEntries; i++, addEntriesFrom++) {
247                 ReplicatedLogEntry matchEntry = appendEntries.getEntries().get(i);
248
249                 if (!isLogEntryPresent(matchEntry.getIndex())) {
250                     // newEntry not found in the log
251                     break;
252                 }
253
254                 long existingEntryTerm = getLogEntryTerm(matchEntry.getIndex());
255
256                 log.debug("{}: matchEntry {} is present: existingEntryTerm: {}", logName(), matchEntry,
257                         existingEntryTerm);
258
259                 // existingEntryTerm == -1 means it's in the snapshot and not in the log. We don't know
260                 // what the term was so we'll assume it matches.
261                 if (existingEntryTerm == -1 || existingEntryTerm == matchEntry.getTerm()) {
262                     continue;
263                 }
264
265                 if (!context.getRaftPolicy().applyModificationToStateBeforeConsensus()) {
266                     log.info("{}: Removing entries from log starting at {}, commitIndex: {}, lastApplied: {}",
267                             logName(), matchEntry.getIndex(), context.getCommitIndex(), context.getLastApplied());
268
269                     // Entries do not match so remove all subsequent entries but only if the existing entries haven't
270                     // been applied to the state yet.
271                     if (matchEntry.getIndex() <= context.getLastApplied()
272                             || !context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex())) {
273                         // Could not remove the entries - this means the matchEntry index must be in the
274                         // snapshot and not the log. In this case the prior entries are part of the state
275                         // so we must send back a reply to force a snapshot to completely re-sync the
276                         // follower's log and state.
277
278                         log.info("{}: Could not remove entries - sending reply to force snapshot", logName());
279                         sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
280                                 lastTerm(), context.getPayloadVersion(), true, needsLeaderAddress(),
281                                 appendEntries.getLeaderRaftVersion()), actor());
282                         return false;
283                     }
284
285                     break;
286                 } else {
287                     sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
288                             lastTerm(), context.getPayloadVersion(), true, needsLeaderAddress(),
289                             appendEntries.getLeaderRaftVersion()), actor());
290                     return false;
291                 }
292             }
293         }
294
295         lastIndex = lastIndex();
296         log.debug("{}: After cleanup, lastIndex: {}, entries to be added from: {}", logName(), lastIndex,
297                 addEntriesFrom);
298
299         // When persistence successfully completes for each new log entry appended, we need to determine if we
300         // should capture a snapshot to compact the persisted log. shouldCaptureSnapshot tracks whether or not
301         // one of the log entries has exceeded the log size threshold whereby a snapshot should be taken. However
302         // we don't initiate the snapshot at that log entry but rather after the last log entry has been persisted.
303         // This is done because subsequent log entries after the one that tripped the threshold may have been
304         // applied to the state already, as the persistence callback occurs async, and we want those entries
305         // purged from the persisted log as well.
306         final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
307         final Consumer<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
308             final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
309             final ReplicatedLogEntry lastEntryToAppend = entries.get(entries.size() - 1);
310             if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
311                 context.getSnapshotManager().capture(context.getReplicatedLog().last(), getReplicatedToAllIndex());
312             }
313         };
314
315         // Append any new entries not already in the log
316         for (int i = addEntriesFrom; i < numLogEntries; i++) {
317             ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
318
319             log.debug("{}: Append entry to log {}", logName(), entry.getData());
320
321             context.getReplicatedLog().appendAndPersist(entry, appendAndPersistCallback, false);
322
323             shouldCaptureSnapshot.compareAndSet(false,
324                     context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
325
326             if (entry.getData() instanceof ServerConfigurationPayload) {
327                 context.updatePeerIds((ServerConfigurationPayload)entry.getData());
328             }
329         }
330
331         log.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
332
333         return true;
334     }
335
336     private boolean isOutOfSync(final AppendEntries appendEntries, final ActorRef sender) {
337
338         final long lastIndex = lastIndex();
339         if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
340
341             // The follower's log is out of sync because the leader does have an entry at prevLogIndex and this
342             // follower has no entries in it's log.
343
344             log.info("{}: The followers log is empty and the senders prevLogIndex is {}", logName(),
345                 appendEntries.getPrevLogIndex());
346
347             sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
348             return true;
349         }
350
351         if (lastIndex > -1) {
352             if (isLogEntryPresent(appendEntries.getPrevLogIndex())) {
353                 final long leadersPrevLogTermInFollowersLogOrSnapshot =
354                         getLogEntryOrSnapshotTerm(appendEntries.getPrevLogIndex());
355                 if (leadersPrevLogTermInFollowersLogOrSnapshot != appendEntries.getPrevLogTerm()) {
356
357                     // The follower's log is out of sync because the Leader's prevLogIndex entry does exist
358                     // in the follower's log or snapshot but it has a different term.
359
360                     log.info("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append "
361                         + "entries prevLogTerm {} - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
362                         appendEntries.getPrevLogIndex(), leadersPrevLogTermInFollowersLogOrSnapshot,
363                         appendEntries.getPrevLogTerm(), lastIndex, context.getReplicatedLog().getSnapshotIndex(),
364                         context.getReplicatedLog().getSnapshotTerm());
365
366                     sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
367                     return true;
368                 }
369             } else if (appendEntries.getPrevLogIndex() != -1) {
370
371                 // The follower's log is out of sync because the Leader's prevLogIndex entry was not found in it's log
372
373                 log.info("{}: The log is not empty but the prevLogIndex {} was not found in it - lastIndex: {}, "
374                         + "snapshotIndex: {}, snapshotTerm: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
375                         context.getReplicatedLog().getSnapshotIndex(), context.getReplicatedLog().getSnapshotTerm());
376
377                 sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
378                 return true;
379             }
380         }
381
382         if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
383                 && appendEntries.getReplicatedToAllIndex() != -1) {
384             if (!isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
385                 // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
386                 // the previous entry in it's in-memory journal
387
388                 log.info("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the "
389                         + "in-memory journal - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
390                         appendEntries.getReplicatedToAllIndex(), lastIndex,
391                         context.getReplicatedLog().getSnapshotIndex(), context.getReplicatedLog().getSnapshotTerm());
392
393                 sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
394                 return true;
395             }
396
397             final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
398             if (entries.size() > 0 && !isLogEntryPresent(entries.get(0).getIndex() - 1)) {
399                 log.info("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
400                         + "in-memory journal - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
401                         entries.get(0).getIndex() - 1, lastIndex, context.getReplicatedLog().getSnapshotIndex(),
402                         context.getReplicatedLog().getSnapshotTerm());
403
404                 sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
405                 return true;
406             }
407         }
408
409         return false;
410     }
411
412     private void sendOutOfSyncAppendEntriesReply(final ActorRef sender, final boolean forceInstallSnapshot,
413             final short leaderRaftVersion) {
414         // We found that the log was out of sync so just send a negative reply.
415         final AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex(),
416                 lastTerm(), context.getPayloadVersion(), forceInstallSnapshot, needsLeaderAddress(),
417                 leaderRaftVersion);
418
419         log.info("{}: Follower is out-of-sync so sending negative reply: {}", logName(), reply);
420         sender.tell(reply, actor());
421     }
422
423     private boolean needsLeaderAddress() {
424         return context.getPeerAddress(leaderId) == null;
425     }
426
427     @Override
428     protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
429         final AppendEntriesReply appendEntriesReply) {
430         return this;
431     }
432
433     @Override
434     protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
435         final RequestVoteReply requestVoteReply) {
436         return this;
437     }
438
439     @Override
440     final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
441         return new ApplyState(null, null, entry);
442     }
443
444     @Override
445     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
446         if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
447             return handleElectionTimeout(message);
448         }
449
450         if (appendEntriesMessageAssembler.handleMessage(message, actor())) {
451             return this;
452         }
453
454         if (!(message instanceof RaftRPC)) {
455             // The rest of the processing requires the message to be a RaftRPC
456             return null;
457         }
458
459         final RaftRPC rpc = (RaftRPC) message;
460         // If RPC request or response contains term T > currentTerm:
461         // set currentTerm = T, convert to follower (§5.1)
462         // This applies to all RPC messages and responses
463         if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) {
464             log.info("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
465                 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
466
467             context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
468         }
469
470         if (rpc instanceof InstallSnapshot) {
471             handleInstallSnapshot(sender, (InstallSnapshot) rpc);
472             restartLastLeaderMessageTimer();
473             scheduleElection(electionDuration());
474             return this;
475         }
476
477         if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
478             restartLastLeaderMessageTimer();
479             scheduleElection(electionDuration());
480         }
481
482         return super.handleMessage(sender, rpc);
483     }
484
485     private RaftActorBehavior handleElectionTimeout(final Object message) {
486         // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
487         // during the election timeout interval. It may that the election timer expired b/c this actor
488         // was busy and messages got delayed, in which case leader messages would be backed up in the
489         // queue but would be processed before the ElectionTimeout message and thus would restart the
490         // lastLeaderMessageTimer.
491         long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
492         long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
493         boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
494                 || lastLeaderMessageInterval >= electionTimeoutInMillis;
495
496         if (canStartElection()) {
497             if (message instanceof TimeoutNow) {
498                 log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
499                 return internalSwitchBehavior(RaftState.Candidate);
500             } else if (noLeaderMessageReceived) {
501                 // Check the cluster state to see if the leader is known to be up before we go to Candidate.
502                 // However if we haven't heard from the leader in a long time even though the cluster state
503                 // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
504                 // to Candidate,
505                 long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
506                 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
507                     log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
508                     scheduleElection(electionDuration());
509                 } else {
510                     log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
511                     return internalSwitchBehavior(RaftState.Candidate);
512                 }
513             } else {
514                 log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
515                         logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
516                 scheduleElection(electionDuration());
517             }
518         } else if (message instanceof ElectionTimeout) {
519             if (noLeaderMessageReceived) {
520                 setLeaderId(null);
521             }
522
523             scheduleElection(electionDuration());
524         }
525
526         return this;
527     }
528
529     private boolean isLeaderAvailabilityKnown() {
530         if (leaderId == null) {
531             return false;
532         }
533
534         Optional<Cluster> cluster = context.getCluster();
535         if (!cluster.isPresent()) {
536             return false;
537         }
538
539         ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
540         if (leaderActor == null) {
541             return false;
542         }
543
544         Address leaderAddress = leaderActor.anchorPath().address();
545
546         CurrentClusterState state = cluster.get().state();
547         Set<Member> unreachable = state.getUnreachable();
548
549         log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
550                 unreachable);
551
552         for (Member m: unreachable) {
553             if (leaderAddress.equals(m.address())) {
554                 log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
555                 return false;
556             }
557         }
558
559         for (Member m: state.getMembers()) {
560             if (leaderAddress.equals(m.address())) {
561                 if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
562                     log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
563                             leaderAddress, m.status());
564                     return true;
565                 } else {
566                     log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
567                             leaderAddress, m.status());
568                     return false;
569                 }
570             }
571         }
572
573         log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
574
575         return false;
576     }
577
578     private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
579
580         log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
581
582         leaderId = installSnapshot.getLeaderId();
583
584         if (snapshotTracker == null) {
585             snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId(),
586                     context);
587         }
588
589         updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
590
591         try {
592             final InstallSnapshotReply reply = new InstallSnapshotReply(
593                     currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
594
595             if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
596                     installSnapshot.getLastChunkHashCode())) {
597
598                 log.info("{}: Snapshot installed from leader: {}", logName(), installSnapshot.getLeaderId());
599
600                 Snapshot snapshot = Snapshot.create(
601                         context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
602                         new ArrayList<>(),
603                         installSnapshot.getLastIncludedIndex(),
604                         installSnapshot.getLastIncludedTerm(),
605                         installSnapshot.getLastIncludedIndex(),
606                         installSnapshot.getLastIncludedTerm(),
607                         context.getTermInformation().getCurrentTerm(),
608                         context.getTermInformation().getVotedFor(),
609                         installSnapshot.getServerConfig().orElse(null));
610
611                 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
612                     @Override
613                     public void onSuccess() {
614                         log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
615
616                         sender.tell(reply, actor());
617                     }
618
619                     @Override
620                     public void onFailure() {
621                         sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
622                     }
623                 };
624
625                 actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
626
627                 closeSnapshotTracker();
628             } else {
629                 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
630
631                 sender.tell(reply, actor());
632             }
633         } catch (IOException e) {
634             log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
635
636             sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
637                     -1, false), actor());
638
639             closeSnapshotTracker();
640         }
641     }
642
643     private void closeSnapshotTracker() {
644         if (snapshotTracker != null) {
645             snapshotTracker.close();
646             snapshotTracker = null;
647         }
648     }
649
650     @Override
651     public void close() {
652         closeSnapshotTracker();
653         stopElection();
654         appendEntriesMessageAssembler.close();
655     }
656
657     @VisibleForTesting
658     SnapshotTracker getSnapshotTracker() {
659         return snapshotTracker;
660     }
661 }