+ private boolean isOutOfSync(final AppendEntries appendEntries, final ActorRef sender) {
+
+ final long lastIndex = lastIndex();
+ if (lastIndex == -1 && appendEntries.getPrevLogIndex() != -1) {
+
+ // The follower's log is out of sync because the leader does have an entry at prevLogIndex and this
+ // follower has no entries in it's log.
+
+ log.info("{}: The followers log is empty and the senders prevLogIndex is {}", logName(),
+ appendEntries.getPrevLogIndex());
+
+ sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
+ return true;
+ }
+
+ if (lastIndex > -1) {
+ if (isLogEntryPresent(appendEntries.getPrevLogIndex())) {
+ final long leadersPrevLogTermInFollowersLogOrSnapshot =
+ getLogEntryOrSnapshotTerm(appendEntries.getPrevLogIndex());
+ if (leadersPrevLogTermInFollowersLogOrSnapshot != appendEntries.getPrevLogTerm()) {
+
+ // The follower's log is out of sync because the Leader's prevLogIndex entry does exist
+ // in the follower's log or snapshot but it has a different term.
+
+ log.info("{}: The prevLogIndex {} was found in the log but the term {} is not equal to the append "
+ + "entries prevLogTerm {} - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
+ appendEntries.getPrevLogIndex(), leadersPrevLogTermInFollowersLogOrSnapshot,
+ appendEntries.getPrevLogTerm(), lastIndex, context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm());
+
+ sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
+ return true;
+ }
+ } else if (appendEntries.getPrevLogIndex() != -1) {
+
+ // The follower's log is out of sync because the Leader's prevLogIndex entry was not found in it's log
+
+ log.info("{}: The log is not empty but the prevLogIndex {} was not found in it - lastIndex: {}, "
+ + "snapshotIndex: {}, snapshotTerm: {}", logName(), appendEntries.getPrevLogIndex(), lastIndex,
+ context.getReplicatedLog().getSnapshotIndex(), context.getReplicatedLog().getSnapshotTerm());
+
+ sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
+ return true;
+ }
+ }
+
+ if (appendEntries.getPrevLogIndex() == -1 && appendEntries.getPrevLogTerm() == -1
+ && appendEntries.getReplicatedToAllIndex() != -1) {
+ if (!isLogEntryPresent(appendEntries.getReplicatedToAllIndex())) {
+ // This append entry comes from a leader who has it's log aggressively trimmed and so does not have
+ // the previous entry in it's in-memory journal
+
+ log.info("{}: Cannot append entries because the replicatedToAllIndex {} does not appear to be in the "
+ + "in-memory journal - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
+ appendEntries.getReplicatedToAllIndex(), lastIndex,
+ context.getReplicatedLog().getSnapshotIndex(), context.getReplicatedLog().getSnapshotTerm());
+
+ sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
+ return true;
+ }
+
+ final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
+ if (entries.size() > 0 && !isLogEntryPresent(entries.get(0).getIndex() - 1)) {
+ log.info("{}: Cannot append entries because the calculated previousIndex {} was not found in the "
+ + "in-memory journal - lastIndex: {}, snapshotIndex: {}, snapshotTerm: {}", logName(),
+ entries.get(0).getIndex() - 1, lastIndex, context.getReplicatedLog().getSnapshotIndex(),
+ context.getReplicatedLog().getSnapshotTerm());
+
+ sendOutOfSyncAppendEntriesReply(sender, false, appendEntries.getLeaderRaftVersion());
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ private void sendOutOfSyncAppendEntriesReply(final ActorRef sender, final boolean forceInstallSnapshot,
+ final short leaderRaftVersion) {
+ // We found that the log was out of sync so just send a negative reply.
+ final AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex(),
+ lastTerm(), context.getPayloadVersion(), forceInstallSnapshot, needsLeaderAddress(),
+ leaderRaftVersion);
+
+ log.info("{}: Follower is out-of-sync so sending negative reply: {}", logName(), reply);
+ sender.tell(reply, actor());
+ }
+
+ private boolean needsLeaderAddress() {
+ return context.getPeerAddress(leaderId) == null;
+ }
+
+ @Override
+ protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
+ final AppendEntriesReply appendEntriesReply) {
+ return this;
+ }
+
+ @Override
+ protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
+ final RequestVoteReply requestVoteReply) {
+ return this;
+ }
+
+ @Override
+ final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+ return new ApplyState(null, null, entry);
+ }
+
+ @Override
+ public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
+ if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
+ return handleElectionTimeout(message);
+ }
+
+ if (appendEntriesMessageAssembler.handleMessage(message, actor())) {
+ return this;
+ }
+
+ if (!(message instanceof RaftRPC)) {
+ // The rest of the processing requires the message to be a RaftRPC
+ return null;
+ }
+
+ final RaftRPC rpc = (RaftRPC) message;
+ // If RPC request or response contains term T > currentTerm:
+ // set currentTerm = T, convert to follower (§5.1)
+ // This applies to all RPC messages and responses
+ if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) {
+ log.info("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
+ logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
+ context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+ }
+
+ if (rpc instanceof InstallSnapshot) {
+ handleInstallSnapshot(sender, (InstallSnapshot) rpc);
+ restartLastLeaderMessageTimer();
+ scheduleElection(electionDuration());
+ return this;
+ }
+
+ if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
+ restartLastLeaderMessageTimer();
+ scheduleElection(electionDuration());
+ }
+
+ return super.handleMessage(sender, rpc);
+ }
+
+ private RaftActorBehavior handleElectionTimeout(final Object message) {
+ // If the message is ElectionTimeout, verify we haven't actually seen a message from the leader
+ // during the election timeout interval. It may that the election timer expired b/c this actor
+ // was busy and messages got delayed, in which case leader messages would be backed up in the
+ // queue but would be processed before the ElectionTimeout message and thus would restart the
+ // lastLeaderMessageTimer.
+ long lastLeaderMessageInterval = lastLeaderMessageTimer.elapsed(TimeUnit.MILLISECONDS);
+ long electionTimeoutInMillis = context.getConfigParams().getElectionTimeOutInterval().toMillis();
+ boolean noLeaderMessageReceived = !lastLeaderMessageTimer.isRunning()
+ || lastLeaderMessageInterval >= electionTimeoutInMillis;
+
+ if (canStartElection()) {
+ if (message instanceof TimeoutNow) {
+ log.debug("{}: Received TimeoutNow - switching to Candidate", logName());
+ return internalSwitchBehavior(RaftState.Candidate);
+ } else if (noLeaderMessageReceived) {
+ // Check the cluster state to see if the leader is known to be up before we go to Candidate.
+ // However if we haven't heard from the leader in a long time even though the cluster state
+ // indicates it's up then something is wrong - leader might be stuck indefinitely - so switch
+ // to Candidate,
+ long maxElectionTimeout = electionTimeoutInMillis * MAX_ELECTION_TIMEOUT_FACTOR;
+ if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
+ log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
+ scheduleElection(electionDuration());
+ } else if (isThisFollowerIsolated()) {
+ log.debug("{}: this follower is isolated. Do not switch to Candidate for now.", logName());
+ setLeaderId(null);
+ scheduleElection(electionDuration());
+ } else {
+ log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
+ return internalSwitchBehavior(RaftState.Candidate);
+ }
+ } else {
+ log.debug("{}: Received ElectionTimeout but lastLeaderMessageInterval {} < election timeout {}",
+ logName(), lastLeaderMessageInterval, context.getConfigParams().getElectionTimeOutInterval());
+ scheduleElection(electionDuration());
+ }
+ } else if (message instanceof ElectionTimeout) {
+ if (noLeaderMessageReceived) {
+ setLeaderId(null);
+ }
+
+ scheduleElection(electionDuration());
+ }
+
+ return this;
+ }
+
+ private boolean isLeaderAvailabilityKnown() {
+ if (leaderId == null) {
+ return false;
+ }
+
+ Optional<Cluster> cluster = context.getCluster();
+ if (!cluster.isPresent()) {
+ return false;
+ }
+
+ ActorSelection leaderActor = context.getPeerActorSelection(leaderId);
+ if (leaderActor == null) {
+ return false;
+ }
+
+ Address leaderAddress = leaderActor.anchorPath().address();
+
+ CurrentClusterState state = cluster.get().state();
+ Set<Member> unreachable = state.getUnreachable();
+
+ log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
+ unreachable);
+
+ for (Member m: unreachable) {
+ if (leaderAddress.equals(m.address())) {
+ log.info("{}: Leader {} is unreachable", logName(), leaderAddress);
+ return false;
+ }
+ }
+
+ for (Member m: state.getMembers()) {
+ if (leaderAddress.equals(m.address())) {
+ if (m.status() == MemberStatus.up() || m.status() == MemberStatus.weaklyUp()) {
+ log.debug("{}: Leader {} cluster status is {} - leader is available", logName(),
+ leaderAddress, m.status());
+ return true;
+ } else {
+ log.debug("{}: Leader {} cluster status is {} - leader is unavailable", logName(),
+ leaderAddress, m.status());
+ return false;
+ }
+ }
+ }
+
+ log.debug("{}: Leader {} not found in the cluster member set", logName(), leaderAddress);
+
+ return false;
+ }
+
+ private boolean isThisFollowerIsolated() {
+ final Optional<Cluster> maybeCluster = context.getCluster();
+ if (!maybeCluster.isPresent()) {
+ return false;
+ }
+
+ final Cluster cluster = maybeCluster.get();
+ final Member selfMember = cluster.selfMember();
+
+ final CurrentClusterState state = cluster.state();
+ final Set<Member> unreachable = state.getUnreachable();
+ final Iterable<Member> members = state.getMembers();
+
+ log.debug("{}: Checking if this node is isolated in the cluster unreachable set {},"
+ + "all members {} self member: {}", logName(), unreachable, members, selfMember);
+
+ // no unreachable peers means we cannot be isolated
+ if (unreachable.size() == 0) {
+ return false;
+ }
+
+ final Set<Member> membersToCheck = new HashSet<>();
+ members.forEach(membersToCheck::add);
+
+ membersToCheck.removeAll(unreachable);
+
+ // check if the only member not unreachable is us
+ if (membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
+
+ log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
+
+ leaderId = installSnapshot.getLeaderId();
+
+ if (snapshotTracker == null) {
+ snapshotTracker = new SnapshotTracker(log, installSnapshot.getTotalChunks(), installSnapshot.getLeaderId(),
+ context);
+ }
+
+ updateInitialSyncStatus(installSnapshot.getLastIncludedIndex(), installSnapshot.getLeaderId());
+
+ try {
+ final InstallSnapshotReply reply = new InstallSnapshotReply(
+ currentTerm(), context.getId(), installSnapshot.getChunkIndex(), true);
+
+ if (snapshotTracker.addChunk(installSnapshot.getChunkIndex(), installSnapshot.getData(),
+ installSnapshot.getLastChunkHashCode())) {
+
+ log.info("{}: Snapshot installed from leader: {}", logName(), installSnapshot.getLeaderId());
+
+ Snapshot snapshot = Snapshot.create(
+ context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
+ new ArrayList<>(),
+ installSnapshot.getLastIncludedIndex(),
+ installSnapshot.getLastIncludedTerm(),
+ installSnapshot.getLastIncludedIndex(),
+ installSnapshot.getLastIncludedTerm(),
+ context.getTermInformation().getCurrentTerm(),
+ context.getTermInformation().getVotedFor(),
+ installSnapshot.getServerConfig().orElse(null));
+
+ ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
+ @Override
+ public void onSuccess() {
+ log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
+ }
+
+ @Override
+ public void onFailure() {
+ sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
+ }
+ };
+
+ actor().tell(new ApplySnapshot(snapshot, applySnapshotCallback), actor());
+
+ closeSnapshotTracker();
+ } else {
+ log.debug("{}: handleInstallSnapshot returning: {}", logName(), reply);
+
+ sender.tell(reply, actor());
+ }
+ } catch (IOException e) {
+ log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
+
+ sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
+ -1, false), actor());
+
+ closeSnapshotTracker();
+ }