X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-akka-raft%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fraft%2Fbehaviors%2FAbstractLeader.java;h=f97343f736550846a5fe2df0425e74274e509f4b;hb=b4d30424d8fd2e2a7ae37a9ff169b672e8e43550;hp=6560ad76c3937285300173f16df53f4c60d9ae1d;hpb=78d3eea7d730f07f89c36fe24afbf51781a21bc3;p=controller.git diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java index 6560ad76c3..f97343f736 100644 --- a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java +++ b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java @@ -48,16 +48,16 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat; import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.AppendEntries; import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply; +import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot; import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply; +import org.opendaylight.controller.cluster.raft.messages.Payload; import org.opendaylight.controller.cluster.raft.messages.RaftRPC; import org.opendaylight.controller.cluster.raft.messages.RequestVote; import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply; import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply; import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload; -import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload; import scala.concurrent.duration.FiniteDuration; /** @@ -166,7 +166,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { followerToLog.remove(followerId); } - public void updateMinReplicaCount() { + public final void updateMinReplicaCount() { int numVoting = 0; for (PeerInfo peer: context.getPeers()) { if (peer.isVoting()) { @@ -469,8 +469,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { // and became the leader again,. We still want to apply this as a local modification because // we have resumed leadership with that log entry having been committed. final Payload payload = entry.getData(); - if (payload instanceof IdentifiablePayload) { - return new ApplyState(null, ((IdentifiablePayload) payload).getIdentifier(), entry); + if (payload instanceof IdentifiablePayload identifiable) { + return new ApplyState(null, identifiable.getIdentifier(), entry); } return new ApplyState(null, null, entry); @@ -493,47 +493,45 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { return this; } - if (message instanceof RaftRPC) { - RaftRPC rpc = (RaftRPC) message; - // If RPC request or response contains term T > currentTerm: - // set currentTerm = T, convert to follower (§5.1) - // This applies to all RPC messages and responses - if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) { - log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower", - logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm()); - - context.getTermInformation().updateAndPersist(rpc.getTerm(), null); - - // This is a special case. Normally when stepping down as leader we don't process and reply to the - // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a - // RequestVote, process the RequestVote before switching to Follower. This enables the requesting - // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower - // state and starting a new election and grabbing leadership back before the other candidate node can - // start a new election due to lack of responses. This case would only occur if there isn't a majority - // of other nodes available that can elect the requesting candidate. Since we're transferring - // leadership, we should make every effort to get the requesting node elected. - if (rpc instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) { - log.debug("{}: Leadership transfer in progress - processing RequestVote", logName()); - super.handleMessage(sender, rpc); - } - - return internalSwitchBehavior(RaftState.Follower); + // If RPC request or response contains term T > currentTerm: + // set currentTerm = T, convert to follower (§5.1) + // This applies to all RPC messages and responses + if (message instanceof RaftRPC rpc && rpc.getTerm() > context.getTermInformation().getCurrentTerm() + && shouldUpdateTerm(rpc)) { + + log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower", + logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm()); + + context.getTermInformation().updateAndPersist(rpc.getTerm(), null); + + // This is a special case. Normally when stepping down as leader we don't process and reply to the + // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a + // RequestVote, process the RequestVote before switching to Follower. This enables the requesting + // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower + // state and starting a new election and grabbing leadership back before the other candidate node can + // start a new election due to lack of responses. This case would only occur if there isn't a majority + // of other nodes available that can elect the requesting candidate. Since we're transferring + // leadership, we should make every effort to get the requesting node elected. + if (rpc instanceof RequestVote requestVote && context.getRaftActorLeadershipTransferCohort() != null) { + log.debug("{}: Leadership transfer in progress - processing RequestVote", logName()); + requestVote(sender, requestVote); } + + return internalSwitchBehavior(RaftState.Follower); } if (message instanceof SendHeartBeat) { beforeSendHeartbeat(); sendHeartBeat(); scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval()); - } else if (message instanceof SendInstallSnapshot) { - SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message; + } else if (message instanceof SendInstallSnapshot sendInstallSnapshot) { setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(), sendInstallSnapshot.getSnapshotBytes())); sendInstallSnapshot(); - } else if (message instanceof Replicate) { - replicate((Replicate) message); - } else if (message instanceof InstallSnapshotReply) { - handleInstallSnapshotReply((InstallSnapshotReply) message); + } else if (message instanceof Replicate replicate) { + replicate(replicate); + } else if (message instanceof InstallSnapshotReply installSnapshotReply) { + handleInstallSnapshotReply(installSnapshotReply); } else if (message instanceof CheckConsensusReached) { possiblyUpdateCommitIndex(); } else { @@ -783,7 +781,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { // If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If // that is the case, then we need to slice it into smaller chunks. - if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) { + if (entries.size() != 1 || entries.get(0).getData().serializedSize() <= maxDataSize) { // Don't need to slice. return entries; } @@ -904,7 +902,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { } boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(), - this.getReplicatedToAllIndex(), followerId); + getReplicatedToAllIndex(), followerId); if (captureInitiated) { followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState( context.getConfigParams().getSnapshotChunkSize(), logName())); @@ -981,7 +979,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { } catch (IOException e) { log.warn("{}: Unable to send chunk: {}/{}. Reseting snapshot progress. Snapshot state: {}", logName(), installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks(), - installSnapshotState); + installSnapshotState, e); installSnapshotState.reset(); } } @@ -1001,8 +999,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { chunkIndex, installSnapshotState.getTotalChunks(), OptionalInt.of(installSnapshotState.getLastChunkHashCode()), - serverConfig - ).toSerializable(followerLogInfo.getRaftVersion()), + serverConfig, + followerLogInfo.getRaftVersion()), actor() ); } @@ -1124,8 +1122,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior { private final ByteSource snapshotBytes; SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) { - this.lastIncludedTerm = snapshot.getLastAppliedTerm(); - this.lastIncludedIndex = snapshot.getLastAppliedIndex(); + lastIncludedTerm = snapshot.getLastAppliedTerm(); + lastIncludedIndex = snapshot.getLastAppliedIndex(); this.snapshotBytes = snapshotBytes; }