Enforce non-null entries field in AppendEntries
[controller.git] / opendaylight / md-sal / sal-akka-raft / src / main / java / org / opendaylight / controller / cluster / raft / behaviors / Follower.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.raft.behaviors;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Address;
14 import akka.cluster.Cluster;
15 import akka.cluster.ClusterEvent.CurrentClusterState;
16 import akka.cluster.Member;
17 import akka.cluster.MemberStatus;
18 import akka.japi.Procedure;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Stopwatch;
21 import java.io.IOException;
22 import java.util.ArrayList;
23 import java.util.List;
24 import java.util.Optional;
25 import java.util.Set;
26 import java.util.concurrent.TimeUnit;
27 import java.util.concurrent.atomic.AtomicBoolean;
28 import javax.annotation.Nullable;
29 import org.opendaylight.controller.cluster.raft.RaftActorContext;
30 import org.opendaylight.controller.cluster.raft.RaftState;
31 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
32 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
33 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
34 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
35 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
36 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
37 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
38 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
39 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
40 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
41 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
42 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
43 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
44
45 /**
46  * The behavior of a RaftActor in the Follower raft state.
47  * <ul>
48  * <li> Respond to RPCs from candidates and leaders
49  * <li> If election timeout elapses without receiving AppendEntries
50  * RPC from current leader or granting vote to candidate:
51  * convert to candidate
52  * </ul>
53  */
54 public class Follower extends AbstractRaftActorBehavior {
55     private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
56
57     private final SyncStatusTracker initialSyncStatusTracker;
58
59     private final Stopwatch lastLeaderMessageTimer = Stopwatch.createStarted();
60     private SnapshotTracker snapshotTracker = null;
61     private String leaderId;
62     private short leaderPayloadVersion;
63
64     public Follower(final RaftActorContext context) {
65         this(context, null, (short)-1);
66     }
67
68     public Follower(final RaftActorContext context, final String initialLeaderId,
69             final short initialLeaderPayloadVersion) {
70         super(context, RaftState.Follower);
71         this.leaderId = initialLeaderId;
72         this.leaderPayloadVersion = initialLeaderPayloadVersion;
73
74         initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
75             .getSyncIndexThreshold());
76
77         if (context.getPeerIds().isEmpty() && getLeaderId() == null) {
78             actor().tell(TimeoutNow.INSTANCE, actor());
79         } else {
80             scheduleElection(electionDuration());
81         }
82     }
83
84     @Override
85     public final String getLeaderId() {
86         return leaderId;
87     }
88
89     @VisibleForTesting
90     protected final void setLeaderId(@Nullable final String leaderId) {
91         this.leaderId = leaderId;
92     }
93
94     @Override
95     public short getLeaderPayloadVersion() {
96         return leaderPayloadVersion;
97     }
98
99     @VisibleForTesting
100     protected final void setLeaderPayloadVersion(final short leaderPayloadVersion) {
101         this.leaderPayloadVersion = leaderPayloadVersion;
102     }
103
104     private void restartLastLeaderMessageTimer() {
105         if (lastLeaderMessageTimer.isRunning()) {
106             lastLeaderMessageTimer.reset();
107         }
108
109         lastLeaderMessageTimer.start();
110     }
111
112     private boolean isLogEntryPresent(final long index) {
113         if (context.getReplicatedLog().isInSnapshot(index)) {
114             return true;
115         }
116
117         ReplicatedLogEntry entry = context.getReplicatedLog().get(index);
118         return entry != null;
119
120     }
121
122     private void updateInitialSyncStatus(final long currentLeaderCommit, final String newLeaderId) {
123         initialSyncStatusTracker.update(newLeaderId, currentLeaderCommit, context.getCommitIndex());
124     }
125
126     @Override
127     protected RaftActorBehavior handleAppendEntries(final ActorRef sender, final AppendEntries appendEntries) {
128
129         int numLogEntries = appendEntries.getEntries().size();
130         if (log.isTraceEnabled()) {
131             log.trace("{}: handleAppendEntries: {}", logName(), appendEntries);
132         } else if (log.isDebugEnabled() && numLogEntries > 0) {
133             log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
134         }
135
136         // TODO : Refactor this method into a bunch of smaller methods
137         // to make it easier to read. Before refactoring ensure tests
138         // cover the code properly
139
140         if (snapshotTracker != null && !snapshotTracker.getLeaderId().equals(appendEntries.getLeaderId())) {
141             log.debug("{}: snapshot install is in progress but the prior snapshot leaderId {} does not match the "
142                 + "AppendEntries leaderId {}", logName(), snapshotTracker.getLeaderId(), appendEntries.getLeaderId());
143             closeSnapshotTracker();
144         }
145
146         if (snapshotTracker != null || context.getSnapshotManager().isApplying()) {
147             // if snapshot install is in progress, follower should just acknowledge append entries with a reply.
148             AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
149                     lastIndex(), lastTerm(), context.getPayloadVersion());
150
151             log.debug("{}: snapshot install is in progress, replying immediately with {}", logName(), reply);
152             sender.tell(reply, actor());
153
154             return this;
155         }
156
157         // If we got here then we do appear to be talking to the leader
158         leaderId = appendEntries.getLeaderId();
159         leaderPayloadVersion = appendEntries.getPayloadVersion();
160
161         // First check if the logs are in sync or not
162         long lastIndex = lastIndex();
163
164         if (isOutOfSync(appendEntries)) {
165             // We found that the log was out of sync so just send a negative
166             // reply and return
167
168             final AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
169                     lastTerm(), context.getPayloadVersion());
170
171             log.info("{}: Follower is out-of-sync so sending negative reply: {}", logName(), reply);
172             updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
173             sender.tell(reply, actor());
174             return this;
175         }
176
177         if (numLogEntries > 0) {
178             log.debug("{}: Number of entries to be appended = {}", logName(), numLogEntries);
179
180             // 3. If an existing entry conflicts with a new one (same index
181             // but different terms), delete the existing entry and all that
182             // follow it (§5.3)
183             int addEntriesFrom = 0;
184             if (context.getReplicatedLog().size() > 0) {
185
186                 // Find the entry up until the one that is not in the follower's log
187                 for (int i = 0;i < numLogEntries; i++, addEntriesFrom++) {
188                     ReplicatedLogEntry matchEntry = appendEntries.getEntries().get(i);
189
190                     if (!isLogEntryPresent(matchEntry.getIndex())) {
191                         // newEntry not found in the log
192                         break;
193                     }
194
195                     long existingEntryTerm = getLogEntryTerm(matchEntry.getIndex());
196
197                     log.debug("{}: matchEntry {} is present: existingEntryTerm: {}", logName(), matchEntry,
198                             existingEntryTerm);
199
200                     // existingEntryTerm == -1 means it's in the snapshot and not in the log. We don't know
201                     // what the term was so we'll assume it matches.
202                     if (existingEntryTerm == -1 || existingEntryTerm == matchEntry.getTerm()) {
203                         continue;
204                     }
205
206                     if (!context.getRaftPolicy().applyModificationToStateBeforeConsensus()) {
207
208                         log.info("{}: Removing entries from log starting at {}", logName(), matchEntry.getIndex());
209
210                         // Entries do not match so remove all subsequent entries
211                         if (!context.getReplicatedLog().removeFromAndPersist(matchEntry.getIndex())) {
212                             // Could not remove the entries - this means the matchEntry index must be in the
213                             // snapshot and not the log. In this case the prior entries are part of the state
214                             // so we must send back a reply to force a snapshot to completely re-sync the
215                             // follower's log and state.
216
217                             log.info("{}: Could not remove entries - sending reply to force snapshot", logName());
218                             updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
219                             sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
220                                     lastTerm(), context.getPayloadVersion(), true), actor());
221                             return this;
222                         }
223
224                         break;
225                     } else {
226                         updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
227                         sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex,
228                                 lastTerm(), context.getPayloadVersion(), true), actor());
229                         return this;
230                     }
231                 }
232             }
233
234             lastIndex = lastIndex();
235             log.debug("{}: After cleanup, lastIndex: {}, entries to be added from: {}", logName(),
236                     lastIndex, addEntriesFrom);
237
238             // When persistence successfully completes for each new log entry appended, we need to determine if we
239             // should capture a snapshot to compact the persisted log. shouldCaptureSnapshot tracks whether or not
240             // one of the log entries has exceeded the log size threshold whereby a snapshot should be taken. However
241             // we don't initiate the snapshot at that log entry but rather after the last log entry has been persisted.
242             // This is done because subsequent log entries after the one that tripped the threshold may have been
243             // applied to the state already, as the persistence callback occurs async, and we want those entries
244             // purged from the persisted log as well.
245             final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
246             final Procedure<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
247                 final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
248                 final ReplicatedLogEntry lastEntryToAppend = entries.get(entries.size() - 1);
249                 if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
250                     context.getSnapshotManager().capture(context.getReplicatedLog().last(), getReplicatedToAllIndex());
251                 }
252             };
253
254             // 4. Append any new entries not already in the log
255             for (int i = addEntriesFrom; i < numLogEntries; i++) {
256                 ReplicatedLogEntry entry = appendEntries.getEntries().get(i);
257
258                 log.debug("{}: Append entry to log {}", logName(), entry.getData());
259
260                 context.getReplicatedLog().appendAndPersist(entry, appendAndPersistCallback, false);
261
262                 shouldCaptureSnapshot.compareAndSet(false,
263                         context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
264
265                 if (entry.getData() instanceof ServerConfigurationPayload) {
266                     context.updatePeerIds((ServerConfigurationPayload)entry.getData());
267                 }
268             }
269
270             log.debug("{}: Log size is now {}", logName(), context.getReplicatedLog().size());
271         }
272
273         // 5. If leaderCommit > commitIndex, set commitIndex =
274         // min(leaderCommit, index of last new entry)
275
276         lastIndex = lastIndex();
277         long prevCommitIndex = context.getCommitIndex();
278
279         if (appendEntries.getLeaderCommit() > prevCommitIndex) {
280             context.setCommitIndex(Math.min(appendEntries.getLeaderCommit(), lastIndex));
281         }
282
283         if (prevCommitIndex != context.getCommitIndex()) {
284             log.debug("{}: Commit index set to {}", logName(), context.getCommitIndex());
285         }
286
287         AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), true,
288                 lastIndex, lastTerm(), context.getPayloadVersion());
289
290         if (log.isTraceEnabled()) {
291             log.trace("{}: handleAppendEntries returning : {}", logName(), reply);
292         } else if (log.isDebugEnabled() && numLogEntries > 0) {
293             log.debug("{}: handleAppendEntries returning : {}", logName(), reply);
294         }
295
296         // Reply to the leader before applying any previous state so as not to hold up leader consensus.
297         updateInitialSyncStatus(appendEntries.getLeaderCommit(), appendEntries.getLeaderId());
298         sender.tell(reply, actor());
299
300         // If commitIndex > lastApplied: increment lastApplied, apply
301         // log[lastApplied] to state machine (§5.3)
302         // check if there are any entries to be applied. last-applied can be equal to last-index
303         if (appendEntries.getLeaderCommit() > context.getLastApplied()
304                 && context.getLastApplied() < lastIndex) {
305             if (log.isDebugEnabled()) {
306                 log.debug("{}: applyLogToStateMachine, appendEntries.getLeaderCommit(): {}, "
307                         + "context.getLastApplied(): {}, lastIndex(): {}", logName(),
308                     appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex);
309             }
310
311             applyLogToStateMachine(appendEntries.getLeaderCommit());
312         }
313
314         if (!context.getSnapshotManager().isCapturing()) {
315             super.performSnapshotWithoutCapture(appendEntries.getReplicatedToAllIndex());
316         }
317
318         return this;
319     }
320
321     private boolean isOutOfSync(final AppendEntries appendEntries) {
322
323         final long lastIndex = lastIndex();
324         if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
325
326             // The follower's log is out of sync because the leader does have an entry at prevLogIndex and this
327             // follower has no entries in it's log.
328
329             log.info("{}: The followers log is empty and the senders prevLogIndex is {}", logName(),
330                 appendEntries.getPrevLogIndex());
331             return true;
332         }
333
334         if (lastIndex > -1) {
335             if (isLogEntryPresent(appendEntries.getPrevLogIndex())) {
336                 final long prevLogTerm = getLogEntryTerm(appendEntries.getPrevLogIndex());
337                 if (prevLogTerm != appendEntries.getPrevLogTerm()) {
338
339                     // The follower's log is out of sync because the Leader's prevLogIndex entry does exist
340                     // in the follower's log but it has a different term in it
341
342                     log.info("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append "
343                             + "entries prevLogTerm {} - lastIndex: {}, snapshotIndex: {}", logName(),
344                             appendEntries.getPrevLogIndex(), prevLogTerm, appendEntries.getPrevLogTerm(), lastIndex,
345                             context.getReplicatedLog().getSnapshotIndex());
346                     return true;
347                 }
348             } else if (appendEntries.getPrevLogIndex() != -1) {
349
350                 // The follower's log is out of sync because the Leader's prevLogIndex entry was not found in it's log
351
352                 log.info("{}: The log is not empty but the prevLogIndex {} was not found in it - lastIndex: {}, "
353                         + "snapshotIndex: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
354                         context.getReplicatedLog().getSnapshotIndex());
355                 return true;
356             }
357         }
358
359         if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
360                 && appendEntries.getReplicatedToAllIndex() != -1) {
361             if (!isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
362                 // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
363                 // the previous entry in it's in-memory journal
364
365                 log.info("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the "
366                         + "in-memory journal", logName(), appendEntries.getReplicatedToAllIndex());
367                 return true;
368             }
369
370             final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
371             if (entries.size() > 0 && !isLogEntryPresent(entries.get(0).getIndex() - 1)) {
372                 log.info("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
373                         + "in-memory journal", logName(), entries.get(0).getIndex() - 1);
374                 return true;
375             }
376         }
377
378         return false;
379     }
380
381     @Override
382     protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
383         final AppendEntriesReply appendEntriesReply) {
384         return this;
385     }
386
387     @Override
388     protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
389         final RequestVoteReply requestVoteReply) {
390         return this;
391     }
392
393     @Override
394     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
395         if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
396             return handleElectionTimeout(message);
397         }
398
399         if (!(message instanceof RaftRPC)) {
400             // The rest of the processing requires the message to be a RaftRPC
401             return null;
402         }
403
404         final RaftRPC rpc = (RaftRPC) message;
405         // If RPC request or response contains term T > currentTerm:
406         // set currentTerm = T, convert to follower (§5.1)
407         // This applies to all RPC messages and responses
408         if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
409             log.info("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
410                 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
411
412             context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
413         }
414
415         if (rpc instanceof InstallSnapshot) {
416             handleInstallSnapshot(sender, (InstallSnapshot) rpc);
417             restartLastLeaderMessageTimer();
418             scheduleElection(electionDuration());
419             return this;
420         }
421
422         if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
423             restartLastLeaderMessageTimer();
424             scheduleElection(electionDuration());
425         }
426
427         return super.handleMessage(sender, rpc);
428     }
429
430     private RaftActorBehavior handleElectionTimeout(final Object message) {
431         // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
432         // during the election timeout interval. It may that the election timer expired b/c this actor
433         // was busy and messages got delayed, in which case leader messages would be backed up in the
434         // queue but would be processed before the ElectionTimeout message and thus would restart the
435         // lastLeaderMessageTimer.
436         long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
437         long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
438         boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
439                 || lastLeaderMessageInterval >= electionTimeoutInMillis;
440
441         if (canStartElection()) {
442             if (message instanceof TimeoutNow) {
443                 log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
444                 return internalSwitchBehavior(RaftState.Candidate);
445             } else if (noLeaderMessageReceived) {
446                 // Check the cluster state to see if the leader is known to be up before we go to Candidate.
447                 // However if we haven't heard from the leader in a long time even though the cluster state
448                 // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
449                 // to Candidate,
450                 long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
451                 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
452                     log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
453                     scheduleElection(electionDuration());
454                 } else {
455                     log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
456                     return internalSwitchBehavior(RaftState.Candidate);
457                 }
458             } else {
459                 log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
460                         logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
461                 scheduleElection(electionDuration());
462             }
463         } else if (message instanceof ElectionTimeout) {
464             if (noLeaderMessageReceived) {
465                 setLeaderId(null);
466             }
467
468             scheduleElection(electionDuration());
469         }
470
471         return this;
472     }
473
474     private boolean isLeaderAvailabilityKnown() {
475         if (leaderId == null) {
476             return false;
477         }
478
479         Optional<Cluster> cluster = context.getCluster();
480         if (!cluster.isPresent()) {
481             return false;
482         }
483
484         ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
485         if (leaderActor == null) {
486             return false;
487         }
488
489         Address leaderAddress = leaderActor.anchorPath().address();
490
491         CurrentClusterState state = cluster.get().state();
492         Set<Member> unreachable = state.getUnreachable();
493
494         log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
495                 unreachable);
496
497         for (Member m: unreachable) {
498             if (leaderAddress.equals(m.address())) {
499                 log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
500                 return false;
501             }
502         }
503
504         for (Member m: state.getMembers()) {
505             if (leaderAddress.equals(m.address())) {
506                 if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
507                     log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
508                             leaderAddress, m.status());
509                     return true;
510                 } else {
511                     log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
512                             leaderAddress, m.status());
513                     return false;
514                 }
515             }
516         }
517
518         log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
519
520         return false;
521     }
522
523     private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
524
525         log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
526
527         leaderId = installSnapshot.getLeaderId();
528
529         if (snapshotTracker == null) {
530             snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId(),
531                     context);
532         }
533
534         updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
535
536         try {
537             final InstallSnapshotReply reply = new InstallSnapshotReply(
538                     currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
539
540             if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
541                     installSnapshot.getLastChunkHashCode())) {
542
543                 log.info("{}: Snapshot installed from leader: {}", logName(), installSnapshot.getLeaderId());
544
545                 Snapshot snapshot = Snapshot.create(
546                         context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
547                         new ArrayList<>(),
548                         installSnapshot.getLastIncludedIndex(),
549                         installSnapshot.getLastIncludedTerm(),
550                         installSnapshot.getLastIncludedIndex(),
551                         installSnapshot.getLastIncludedTerm(),
552                         context.getTermInformation().getCurrentTerm(),
553                         context.getTermInformation().getVotedFor(),
554                         installSnapshot.getServerConfig().orNull());
555
556                 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
557                     @Override
558                     public void onSuccess() {
559                         log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
560
561                         sender.tell(reply, actor());
562                     }
563
564                     @Override
565                     public void onFailure() {
566                         sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
567                     }
568                 };
569
570                 actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
571
572                 closeSnapshotTracker();
573             } else {
574                 log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
575
576                 sender.tell(reply, actor());
577             }
578         } catch (IOException e) {
579             log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
580
581             sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
582                     -1, false), actor());
583
584             closeSnapshotTracker();
585         }
586     }
587
588     private void closeSnapshotTracker() {
589         if (snapshotTracker != null) {
590             snapshotTracker.close();
591             snapshotTracker = null;
592         }
593     }
594
595     @Override
596     public void close() {
597         closeSnapshotTracker();
598         stopElection();
599     }
600
601     @VisibleForTesting
602     SnapshotTracker getSnapshotTracker() {
603         return snapshotTracker;
604     }
605 }