Bug 7362: Notify applyState synchronously
[controller.git] / opendaylight / md-sal / sal-akka-raft / src / main / java / org / opendaylight / controller / cluster / raft / behaviors / Follower.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.raft.behaviors;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Address;
14 import akka.cluster.Cluster;
15 import akka.cluster.ClusterEvent.CurrentClusterState;
16 import akka.cluster.Member;
17 import akka.cluster.MemberStatus;
18 import akka.japi.Procedure;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Stopwatch;
21 import java.util.ArrayList;
22 import java.util.Optional;
23 import java.util.Set;
24 import java.util.concurrent.TimeUnit;
25 import java.util.concurrent.atomic.AtomicBoolean;
26 import javax.annotation.Nullable;
27 import org.opendaylight.controller.cluster.raft.RaftActorContext;
28 import org.opendaylight.controller.cluster.raft.RaftState;
29 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
30 import org.opendaylight.controller.cluster.raft.Snapshot;
31 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
32 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
33 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
34 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
35 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
36 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
37 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
38 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
39 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
40 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
41 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
42
43 /**
44  * The behavior of a RaftActor in the Follower raft state.
45  * <ul>
46  * <li> Respond to RPCs from candidates and leaders
47  * <li> If election timeout elapses without receiving AppendEntries
48  * RPC from current leader or granting vote to candidate:
49  * convert to candidate
50  * </ul>
51  */
52 public class Follower extends AbstractRaftActorBehavior {
53     private static final int SYNC_THRESHOLD = 10;
54
55     private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
56
57     private final SyncStatusTracker initialSyncStatusTracker;
58
59     private final Stopwatch lastLeaderMessageTimer = Stopwatch.createStarted();
60     private SnapshotTracker snapshotTracker = null;
61     private String leaderId;
62     private short leaderPayloadVersion;
63
64     public Follower(RaftActorContext context) {
65         this(context, null, (short)-1);
66     }
67
68     public Follower(RaftActorContext context, String initialLeaderId, short initialLeaderPayloadVersion) {
69         super(context, RaftState.Follower);
70         this.leaderId = initialLeaderId;
71         this.leaderPayloadVersion = initialLeaderPayloadVersion;
72
73         initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), SYNC_THRESHOLD);
74
75         if (context.getPeerIds().isEmpty() && getLeaderId() == null) {
76             actor().tell(TimeoutNow.INSTANCE, actor());
77         } else {
78             scheduleElection(electionDuration());
79         }
80     }
81
82     @Override
83     public final String getLeaderId() {
84         return leaderId;
85     }
86
87     @VisibleForTesting
88     protected final void setLeaderId(@Nullable final String leaderId) {
89         this.leaderId = leaderId;
90     }
91
92     @Override
93     public short getLeaderPayloadVersion() {
94         return leaderPayloadVersion;
95     }
96
97     @VisibleForTesting
98     protected final void setLeaderPayloadVersion(short leaderPayloadVersion) {
99         this.leaderPayloadVersion = leaderPayloadVersion;
100     }
101
102     private void restartLastLeaderMessageTimer() {
103         if (lastLeaderMessageTimer.isRunning()) {
104             lastLeaderMessageTimer.reset();
105         }
106
107         lastLeaderMessageTimer.start();
108     }
109
110     private boolean isLogEntryPresent(long index) {
111         if (context.getReplicatedLog().isInSnapshot(index)) {
112             return true;
113         }
114
115         ReplicatedLogEntry entry = context.getReplicatedLog().get(index);
116         return entry != null;
117
118     }
119
120     private void updateInitialSyncStatus(long currentLeaderCommit, String newLeaderId) {
121         initialSyncStatusTracker.update(newLeaderId, currentLeaderCommit, context.getCommitIndex());
122     }
123
124     @Override
125     protected RaftActorBehavior handleAppendEntries(ActorRef sender, AppendEntries appendEntries) {
126
127         int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
128         if (log.isTraceEnabled()) {
129             log.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
130         } else if (log.isDebugEnabled() && numLogEntries > 0) {
131             log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
132         }
133
134         // TODO : Refactor this method into a bunch of smaller methods
135         // to make it easier to read. Before refactoring ensure tests
136         // cover the code properly
137
138         if (snapshotTracker != null && !snapshotTracker.getLeaderId().equals(appendEntries.getLeaderId())) {
139             log.debug("{}: snapshot install is in progress but the prior snapshot leaderId {} does not match the "
140                 + "AppendEntries leaderId {}", logName(), snapshotTracker.getLeaderId(), appendEntries.getLeaderId());
141             snapshotTracker = null;
142         }
143
144         if (snapshotTracker != null || context.getSnapshotManager().isApplying()) {
145             // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
146             AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
147                     lastIndex(), lastTerm(), context.getPayloadVersion());
148
149             log.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
150             sender.tell(reply, actor());
151
152             return this;
153         }
154
155         // If we got here then we do appear to be talking to the leader
156         leaderId = appendEntries.getLeaderId();
157         leaderPayloadVersion = appendEntries.getPayloadVersion();
158
159         updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
160         // First check if the logs are in sync or not
161         long lastIndex = lastIndex();
162
163         if (isOutOfSync(appendEntries)) {
164             // We found that the log was out of sync so just send a negative
165             // reply and return
166
167             log.debug("{}: Follower is out-of-sync, so sending negative reply, lastIndex: {}, lastTerm: {}",
168                         logName(), lastIndex, lastTerm());
169
170             sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
171                     lastTerm(), context.getPayloadVersion()), actor());
172             return this;
173         }
174
175         if (appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
176
177             log.debug("{}: Number of entries to be appended = {}", logName(),
178                         appendEntries.getEntries().size());
179
180             // 3. If an existing entry conflicts with a new one (same index
181             // but different terms), delete the existing entry and all that
182             // follow it (§5.3)
183             int addEntriesFrom = 0;
184             if (context.getReplicatedLog().size() > 0) {
185
186                 // Find the entry up until the one that is not in the follower's log
187                 for (int i = 0;i < appendEntries.getEntries().size(); i++, addEntriesFrom++) {
188                     ReplicatedLogEntry matchEntry = appendEntries.getEntries().get(i);
189
190                     if (!isLogEntryPresent(matchEntry.getIndex())) {
191                         // newEntry not found in the log
192                         break;
193                     }
194
195                     long existingEntryTerm = getLogEntryTerm(matchEntry.getIndex());
196
197                     log.debug("{}: matchEntry {} is present: existingEntryTerm: {}", logName(), matchEntry,
198                             existingEntryTerm);
199
200                     // existingEntryTerm == -1 means it's in the snapshot and not in the log. We don't know
201                     // what the term was so we'll assume it matches.
202                     if (existingEntryTerm == -1 || existingEntryTerm == matchEntry.getTerm()) {
203                         continue;
204                     }
205
206                     if (!context.getRaftPolicy().applyModificationToStateBeforeConsensus()) {
207
208                         log.debug("{}: Removing entries from log starting at {}", logName(),
209                                 matchEntry.getIndex());
210
211                         // Entries do not match so remove all subsequent entries
212                         if (!context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex())) {
213                             // Could not remove the entries - this means the matchEntry index must be in the
214                             // snapshot and not the log. In this case the prior entries are part of the state
215                             // so we must send back a reply to force a snapshot to completely re-sync the
216                             // follower's log and state.
217
218                             log.debug("{}: Could not remove entries - sending reply to force snapshot", logName());
219                             sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
220                                     lastTerm(), context.getPayloadVersion(), true), actor());
221                             return this;
222                         }
223
224                         break;
225                     } else {
226                         sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
227                                 lastTerm(), context.getPayloadVersion(), true), actor());
228                         return this;
229                     }
230                 }
231             }
232
233             lastIndex = lastIndex();
234             log.debug("{}: After cleanup, lastIndex: {}, entries to be added from: {}", logName(),
235                     lastIndex, addEntriesFrom);
236
237             // When persistence successfully completes for each new log entry appended, we need to determine if we
238             // should capture a snapshot to compact the persisted log. shouldCaptureSnapshot tracks whether or not
239             // one of the log entries has exceeded the log size threshold whereby a snapshot should be taken. However
240             // we don't initiate the snapshot at that log entry but rather after the last log entry has been persisted.
241             // This is done because subsequent log entries after the one that tripped the threshold may have been
242             // applied to the state already, as the persistence callback occurs async, and we want those entries
243             // purged from the persisted log as well.
244             final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
245             final Procedure<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
246                 final ReplicatedLogEntry lastEntryToAppend = appendEntries.getEntries().get(
247                         appendEntries.getEntries().size() - 1);
248                 if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
249                     context.getSnapshotManager().capture(context.getReplicatedLog().last(), getReplicatedToAllIndex());
250                 }
251             };
252
253             // 4. Append any new entries not already in the log
254             for (int i = addEntriesFrom; i < appendEntries.getEntries().size(); i++) {
255                 ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
256
257                 log.debug("{}: Append entry to log {}", logName(), entry.getData());
258
259                 context.getReplicatedLog().appendAndPersist(entry, appendAndPersistCallback, false);
260
261                 shouldCaptureSnapshot.compareAndSet(false,
262                         context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
263
264                 if (entry.getData() instanceof ServerConfigurationPayload) {
265                     context.updatePeerIds((ServerConfigurationPayload)entry.getData());
266                 }
267             }
268
269             log.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
270         }
271
272         // 5. If leaderCommit > commitIndex, set commitIndex =
273         // min(leaderCommit, index of last new entry)
274
275         lastIndex = lastIndex();
276         long prevCommitIndex = context.getCommitIndex();
277
278         if (appendEntries.getLeaderCommit() > prevCommitIndex) {
279             context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
280         }
281
282         if (prevCommitIndex != context.getCommitIndex()) {
283             log.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
284         }
285
286         // If commitIndex > lastApplied: increment lastApplied, apply
287         // log[lastApplied] to state machine (§5.3)
288         // check if there are any entries to be applied. last-applied can be equal to last-index
289         if (appendEntries.getLeaderCommit() > context.getLastApplied()
290                 && context.getLastApplied() < lastIndex) {
291             if (log.isDebugEnabled()) {
292                 log.debug("{}: applyLogToStateMachine, appendEntries.getLeaderCommit(): {},"
293                         + "context.getLastApplied(): {}, lastIndex(): {}", logName(),
294                     appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
295             }
296
297             applyLogToStateMachine(appendEntries.getLeaderCommit());
298         }
299
300         AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
301             lastIndex, lastTerm(), context.getPayloadVersion());
302
303         if (log.isTraceEnabled()) {
304             log.trace("{}: handleAppendEntries returning : {}", logName(), reply);
305         } else if (log.isDebugEnabled() && numLogEntries > 0) {
306             log.debug("{}: handleAppendEntries returning : {}", logName(), reply);
307         }
308
309         sender.tell(reply, actor());
310
311         if (!context.getSnapshotManager().isCapturing()) {
312             super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
313         }
314
315         return this;
316     }
317
318     private boolean isOutOfSync(AppendEntries appendEntries) {
319
320         long prevLogTerm = getLogEntryTerm(appendEntries.getPrevLogIndex());
321         boolean prevEntryPresent = isLogEntryPresent(appendEntries.getPrevLogIndex());
322         long lastIndex = lastIndex();
323         int numLogEntries = appendEntries.getEntries() != null ? appendEntries.getEntries().size() : 0;
324         boolean outOfSync = true;
325
326         if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
327
328             // The follower's log is out of sync because the leader does have
329             // an entry at prevLogIndex and this follower has no entries in
330             // it's log.
331
332             log.debug("{}: The followers log is empty and the senders prevLogIndex is {}",
333                         logName(), appendEntries.getPrevLogIndex());
334         } else if (lastIndex > -1 && appendEntries.getPrevLogIndex() != -1 && !prevEntryPresent) {
335
336             // The follower's log is out of sync because the Leader's
337             // prevLogIndex entry was not found in it's log
338
339             log.debug("{}: The log is not empty but the prevLogIndex {} was not found in it - "
340                     + "lastIndex: {}, snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
341                     context.getReplicatedLog().getSnapshotIndex());
342         } else if (lastIndex > -1 && prevEntryPresent && prevLogTerm != appendEntries.getPrevLogTerm()) {
343
344             // The follower's log is out of sync because the Leader's
345             // prevLogIndex entry does exist in the follower's log but it has
346             // a different term in it
347
348             log.debug("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append entries"
349                       + "prevLogTerm {} - lastIndex: {}, snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(),
350                       prevLogTerm, appendEntries.getPrevLogTerm(), lastIndex,
351                       context.getReplicatedLog().getSnapshotIndex());
352         } else if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
353                 && appendEntries.getReplicatedToAllIndex() != -1
354                 && !isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
355             // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
356             // the previous entry in it's in-memory journal
357
358             log.debug("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the"
359                     + " in-memory journal", logName(), appendEntries.getReplicatedToAllIndex());
360         } else if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
361                 && appendEntries.getReplicatedToAllIndex() != -1 && numLogEntries > 0
362                 && !isLogEntryPresent(appendEntries.getEntries().get(0).getIndex() - 1)) {
363             log.debug("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
364                     + " in-memory journal", logName(), appendEntries.getEntries().get(0).getIndex() - 1);
365         } else {
366             outOfSync = false;
367         }
368         return outOfSync;
369     }
370
371     @Override
372     protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
373         AppendEntriesReply appendEntriesReply) {
374         return this;
375     }
376
377     @Override
378     protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
379         RequestVoteReply requestVoteReply) {
380         return this;
381     }
382
383     @Override
384     public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
385         if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
386             return handleElectionTimeout(message);
387         }
388
389         if (!(message instanceof RaftRPC)) {
390             // The rest of the processing requires the message to be a RaftRPC
391             return null;
392         }
393
394         final RaftRPC rpc = (RaftRPC) message;
395         // If RPC request or response contains term T > currentTerm:
396         // set currentTerm = T, convert to follower (§5.1)
397         // This applies to all RPC messages and responses
398         if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
399             log.debug("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
400                 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
401
402             context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
403         }
404
405         if (rpc instanceof InstallSnapshot) {
406             handleInstallSnapshot(sender, (InstallSnapshot) rpc);
407             restartLastLeaderMessageTimer();
408             scheduleElection(electionDuration());
409             return this;
410         }
411
412         if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
413             restartLastLeaderMessageTimer();
414             scheduleElection(electionDuration());
415         }
416
417         return super.handleMessage(sender, rpc);
418     }
419
420     private RaftActorBehavior handleElectionTimeout(Object message) {
421         // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
422         // during the election timeout interval. It may that the election timer expired b/c this actor
423         // was busy and messages got delayed, in which case leader messages would be backed up in the
424         // queue but would be processed before the ElectionTimeout message and thus would restart the
425         // lastLeaderMessageTimer.
426         long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
427         long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
428         boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
429                 || lastLeaderMessageInterval >= electionTimeoutInMillis;
430
431         if (canStartElection()) {
432             if (message instanceof TimeoutNow) {
433                 log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
434                 return internalSwitchBehavior(RaftState.Candidate);
435             } else if (noLeaderMessageReceived) {
436                 // Check the cluster state to see if the leader is known to be up before we go to Candidate.
437                 // However if we haven't heard from the leader in a long time even though the cluster state
438                 // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
439                 // to Candidate,
440                 long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
441                 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
442                     log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
443                     scheduleElection(electionDuration());
444                 } else {
445                     log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
446                     return internalSwitchBehavior(RaftState.Candidate);
447                 }
448             } else {
449                 log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
450                         logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
451                 scheduleElection(electionDuration());
452             }
453         } else if (message instanceof ElectionTimeout) {
454             if (noLeaderMessageReceived) {
455                 setLeaderId(null);
456             }
457
458             scheduleElection(electionDuration());
459         }
460
461         return this;
462     }
463
464     private boolean isLeaderAvailabilityKnown() {
465         if (leaderId == null) {
466             return false;
467         }
468
469         Optional<Cluster> cluster = context.getCluster();
470         if (!cluster.isPresent()) {
471             return false;
472         }
473
474         ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
475         if (leaderActor == null) {
476             return false;
477         }
478
479         Address leaderAddress = leaderActor.anchorPath().address();
480
481         CurrentClusterState state = cluster.get().state();
482         Set<Member> unreachable = state.getUnreachable();
483
484         log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
485                 unreachable);
486
487         for (Member m: unreachable) {
488             if (leaderAddress.equals(m.address())) {
489                 log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
490                 return false;
491             }
492         }
493
494         for (Member m: state.getMembers()) {
495             if (leaderAddress.equals(m.address())) {
496                 if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
497                     log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
498                             leaderAddress, m.status());
499                     return true;
500                 } else {
501                     log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
502                             leaderAddress, m.status());
503                     return false;
504                 }
505             }
506         }
507
508         log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
509
510         return false;
511     }
512
513     private void handleInstallSnapshot(final ActorRef sender, InstallSnapshot installSnapshot) {
514
515         log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
516
517         leaderId = installSnapshot.getLeaderId();
518
519         if (snapshotTracker == null) {
520             snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId());
521         }
522
523         updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
524
525         try {
526             final InstallSnapshotReply reply = new InstallSnapshotReply(
527                     currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
528
529             if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
530                     installSnapshot.getLastChunkHashCode())) {
531                 Snapshot snapshot = Snapshot.create(snapshotTracker.getSnapshot(),
532                         new ArrayList<>(),
533                         installSnapshot.getLastIncludedIndex(),
534                         installSnapshot.getLastIncludedTerm(),
535                         installSnapshot.getLastIncludedIndex(),
536                         installSnapshot.getLastIncludedTerm(),
537                         context.getTermInformation().getCurrentTerm(),
538                         context.getTermInformation().getVotedFor(),
539                         installSnapshot.getServerConfig().orNull());
540
541                 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
542                     @Override
543                     public void onSuccess() {
544                         log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
545
546                         sender.tell(reply, actor());
547                     }
548
549                     @Override
550                     public void onFailure() {
551                         sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
552                     }
553                 };
554
555                 actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
556
557                 snapshotTracker = null;
558             } else {
559                 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
560
561                 sender.tell(reply, actor());
562             }
563         } catch (SnapshotTracker.InvalidChunkException e) {
564             log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
565
566             sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
567                     -1, false), actor());
568             snapshotTracker = null;
569
570         }
571     }
572
573     @Override
574     public void close() {
575         stopElection();
576     }
577
578     @VisibleForTesting
579     SnapshotTracker getSnapshotTracker() {
580         return snapshotTracker;
581     }
582 }