followerLogInformation
.setNextIndex(appendEntriesReply.getLogLastIndex() + 1);
} else {
+
+ // TODO: When we find that the follower is out of sync with the
+ // Leader we simply decrement that followers next index by 1.
+ // Would it be possible to do better than this? The RAFT spec
+ // does not explicitly deal with it but may be something for us to
+ // think about
+
followerLogInformation.decrNextIndex();
}
// set currentTerm = T, convert to follower (ยง5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
- context.getTermInformation().update(rpc.getTerm(), null);
+ context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
return RaftState.Follower;
}
}
} else if (message instanceof Replicate) {
replicate((Replicate) message);
} else if (message instanceof InstallSnapshotReply){
- // FIXME : Should I be checking the term here too?
handleInstallSnapshotReply(
(InstallSnapshotReply) message);
}
List<ReplicatedLogEntry> entries = Collections.emptyList();
if(context.getReplicatedLog().isPresent(nextIndex)){
+ // TODO: Instead of sending all entries from nextIndex
+ // only send a fixed number of entries to each follower
+ // This is to avoid the situation where there are a lot of
+ // entries to install for a fresh follower or to a follower
+ // that has fallen too far behind with the log but yet is not
+ // eligible to receive a snapshot
entries =
context.getReplicatedLog().getFrom(nextIndex);
}
}
}
+ /**
+ * An installSnapshot is scheduled at a interval that is a multiple of
+ * a HEARTBEAT_INTERVAL. This is to avoid the need to check for installing
+ * snapshots at every heartbeat.
+ */
private void installSnapshotIfNeeded(){
for (String followerId : followerToActor.keySet()) {
ActorSelection followerActor =