+ followerLogInformation.markFollowerActive();
+ followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
+ followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
+ followerLogInformation.setNeedsLeaderAddress(appendEntriesReply.isNeedsLeaderAddress());
+
+ long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
+ boolean updated = false;
+ if (appendEntriesReply.getLogLastIndex() > context.getReplicatedLog().lastIndex()) {
+ // The follower's log is actually ahead of the leader's log. Normally this doesn't happen
+ // in raft as a node cannot become leader if it's log is behind another's. However, the
+ // non-voting semantics deviate a bit from raft. Only voting members participate in
+ // elections and can become leader so it's possible for a non-voting follower to be ahead
+ // of the leader. This can happen if persistence is disabled and all voting members are
+ // restarted. In this case, the voting leader will start out with an empty log however
+ // the non-voting followers still retain the previous data in memory. On the first
+ // AppendEntries, the non-voting follower returns a successful reply b/c the prevLogIndex
+ // sent by the leader is -1 and thus the integrity checks pass. However the follower's returned
+ // lastLogIndex may be higher in which case we want to reset the follower by installing a
+ // snapshot. It's also possible that the follower's last log index is behind the leader's.
+ // However in this case the log terms won't match and the logs will conflict - this is handled
+ // elsewhere.
+ log.info("{}: handleAppendEntriesReply: follower {} lastIndex {} is ahead of our lastIndex {} "
+ + "(snapshotIndex {}, snapshotTerm {}) - forcing install snaphot", logName(),
+ followerLogInformation.getId(), appendEntriesReply.getLogLastIndex(),
+ context.getReplicatedLog().lastIndex(), context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm());
+
+ followerLogInformation.setMatchIndex(-1);
+ followerLogInformation.setNextIndex(-1);
+
+ initiateCaptureSnapshot(followerId);
+
+ updated = true;
+ } else if (appendEntriesReply.isSuccess()) {
+ long followersLastLogTermInLeadersLog = getLogEntryTerm(followerLastLogIndex);
+ if (followerLastLogIndex >= 0 && followersLastLogTermInLeadersLog >= 0
+ && followersLastLogTermInLeadersLog != appendEntriesReply.getLogLastTerm()) {
+ // The follower's last entry is present in the leader's journal but the terms don't match so the
+ // follower has a conflicting entry. Since the follower didn't report that it's out of sync, this means
+ // either the previous leader entry sent didn't conflict or the previous leader entry is in the snapshot
+ // and no longer in the journal. Either way, we set the follower's next index to 1 less than the last
+ // index reported by the follower. For the former case, the leader will send all entries starting with
+ // the previous follower's index and the follower will remove and replace the conflicting entries as
+ // needed. For the latter, the leader will initiate an install snapshot.
+
+ followerLogInformation.setNextIndex(followerLastLogIndex - 1);
+ updated = true;
+
+ log.info("{}: handleAppendEntriesReply: follower {} last log term {} for index {} conflicts with the "
+ + "leader's {} - set the follower's next index to {}", logName(),
+ followerId, appendEntriesReply.getLogLastTerm(), appendEntriesReply.getLogLastIndex(),
+ followersLastLogTermInLeadersLog, followerLogInformation.getNextIndex());
+ } else {
+ updated = updateFollowerLogInformation(followerLogInformation, appendEntriesReply);
+ }