import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
// will be used during recovery
//in case if the above code throws an error and this message is not sent, it would be fine
// as the append entries received later would initiate add this message to the journal
- actor().tell(new ApplyLogEntries((int) context.getLastApplied()), actor());
+ actor().tell(new ApplyJournalEntries(context.getLastApplied()), actor());
}
protected Object fromSerializableMessage(Object serializable){
long lastApplied = context.getLastApplied();
long tempMin = Math.min(snapshotCapturedIndex, (lastApplied > -1 ? lastApplied - 1 : -1));
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("{}: performSnapshotWithoutCapture: snapshotCapturedIndex: {}, lastApplied: {}, tempMin: {}",
+ logName, snapshotCapturedIndex, lastApplied, tempMin);
+ }
+
if (tempMin > -1 && context.getReplicatedLog().isPresent(tempMin)) {
LOG.debug("{}: fakeSnapshot purging log to {} for term {}", logName(), tempMin,
context.getTermInformation().getCurrentTerm());
context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
context.getReplicatedLog().snapshotCommit();
setReplicatedToAllIndex(tempMin);
+ } else if(tempMin > getReplicatedToAllIndex()) {
+ // It's possible a follower was lagging and an install snapshot advanced its match index past
+ // the current replicatedToAllIndex. Since the follower is now caught up we should advance the
+ // replicatedToAllIndex (to tempMin). The fact that tempMin wasn't found in the log is likely
+ // due to a previous snapshot triggered by the memory threshold exceeded, in that case we
+ // trim the log to the last applied index even if previous entries weren't replicated to all followers.
+ setReplicatedToAllIndex(tempMin);
}
}
+ protected String getId(){
+ return context.getId();
+ }
+
}