Fix remaining CS errors in sal-akka-raft and enable enforcement 05/46805/3
authorTom Pantelis <tpanteli@brocade.com>
Tue, 11 Oct 2016 20:19:16 +0000 (16:19 -0400)
committerTom Pantelis <tpanteli@brocade.com>
Wed, 12 Oct 2016 16:19:57 +0000 (16:19 +0000)
Some checkstyle violations were missed in previous patches which
enabling enforcement revealed.

Change-Id: I3a31b24aea69adfe8d50071fdce27fbd69c04b58
Signed-off-by: Tom Pantelis <tpanteli@brocade.com>
18 files changed:
opendaylight/md-sal/sal-akka-raft/pom.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTerm.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformationImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/Snapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java

index 9e35228c2297f2df80738f547e4d07123877e3e6..c4943cd2723e1161ba774188cb5743d1c9413e15 100644 (file)
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <configuration>
-            <excludes>**/protobuff/**/*,**/target/**/*</excludes>
+          <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
         </configuration>
       </plugin>
 
index 26a828b9b0e4f829b42f105771977fbe76fb838d..652771b4f1c6c6250600e0463ee8242520692757 100644 (file)
@@ -16,7 +16,7 @@ import scala.concurrent.duration.FiniteDuration;
  * <p/>
  * Any component using this implementation might want to provide an implementation of
  * this interface to configure
- *
+ * <p/>
  * A default implementation will be used if none is provided.
  *
  * @author Kamal Rameshan
@@ -51,7 +51,8 @@ public interface ConfigParams {
     FiniteDuration getElectionTimeOutInterval();
 
     /**
-     * Returns the maximum election time variance. The election is scheduled using both the election timeout and variance.
+     * Returns the maximum election time variance. The election is scheduled using both the election timeout
+     * and variance.
      *
      * @return the election time variance.
      */
@@ -91,8 +92,7 @@ public interface ConfigParams {
     /**
      * Returns the RaftPolicy used to determine certain Raft behaviors.
      *
-     * @return an instance of org.opendaylight.controller.cluster.raft.policy.RaftPolicy, if set,  or an instance of the
-     * DefaultRaftPolicy.
+     * @return an instance of RaftPolicy, if set, or an instance of the DefaultRaftPolicy.
      */
     @Nonnull
     RaftPolicy getRaftPolicy();
index 6faf5df2e89c12ab7ed30137757e9cc4c4339401..2d2f95bd767a566edfd0db331b3ece1d08c64d43 100644 (file)
@@ -20,9 +20,7 @@ import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * Default implementation of the ConfigParams
- *
- * If no implementation is provided for ConfigParams, then this will be used.
+ * Default implementation of the ConfigParams.
  */
 public class DefaultConfigParamsImpl implements ConfigParams {
 
@@ -33,7 +31,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     private static final int JOURNAL_RECOVERY_LOG_BATCH_SIZE = 1000;
 
     /**
-     * The maximum election time variance
+     * The maximum election time variance.
      */
     private static final int ELECTION_TIME_MAX_VARIANCE = 100;
 
@@ -42,7 +40,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
 
     /**
      * The interval at which a heart beat message will be sent to the remote
-     * RaftActor
+     * RaftActor.
      * <p/>
      * Since this is set to 100 milliseconds the Election timeout should be
      * at least 200 milliseconds
@@ -78,7 +76,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         this.snapshotBatchCount = snapshotBatchCount;
     }
 
-    public void setSnapshotDataThresholdPercentage(int snapshotDataThresholdPercentage){
+    public void setSnapshotDataThresholdPercentage(int snapshotDataThresholdPercentage) {
         this.snapshotDataThresholdPercentage = snapshotDataThresholdPercentage;
     }
 
@@ -94,12 +92,12 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         this.isolatedLeaderCheckInterval = isolatedLeaderCheckInterval.toMillis();
     }
 
-    public void setElectionTimeoutFactor(long electionTimeoutFactor){
+    public void setElectionTimeoutFactor(long electionTimeoutFactor) {
         this.electionTimeoutFactor = electionTimeoutFactor;
         electionTimeOutInterval = null;
     }
 
-    public void setCustomRaftPolicyImplementationClass(String customRaftPolicyImplementationClass){
+    public void setCustomRaftPolicyImplementationClass(String customRaftPolicyImplementationClass) {
         this.customRaftPolicyImplementationClass = customRaftPolicyImplementationClass;
     }
 
@@ -126,7 +124,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
 
     @Override
     public FiniteDuration getElectionTimeOutInterval() {
-        if(electionTimeOutInterval == null) {
+        if (electionTimeOutInterval == null) {
             electionTimeOutInterval = getHeartBeatInterval().$times(electionTimeoutFactor);
         }
 
@@ -163,23 +161,25 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         return policySupplier.get();
     }
 
-    private class PolicySupplier implements Supplier<RaftPolicy>{
+    private class PolicySupplier implements Supplier<RaftPolicy> {
         @Override
+        @SuppressWarnings("checkstyle:IllegalCatch")
         public RaftPolicy get() {
-            if(Strings.isNullOrEmpty(DefaultConfigParamsImpl.this.customRaftPolicyImplementationClass)){
+            if (Strings.isNullOrEmpty(DefaultConfigParamsImpl.this.customRaftPolicyImplementationClass)) {
                 LOG.debug("No custom RaftPolicy specified. Using DefaultRaftPolicy");
                 return DefaultRaftPolicy.INSTANCE;
             }
+
             try {
                 String className = DefaultConfigParamsImpl.this.customRaftPolicyImplementationClass;
                 LOG.info("Trying to use custom RaftPolicy {}", className);
-                Class<?> c = Class.forName(className);
-                return (RaftPolicy)c.newInstance();
+                return (RaftPolicy)Class.forName(className).newInstance();
             } catch (Exception e) {
-                if(LOG.isDebugEnabled()) {
+                if (LOG.isDebugEnabled()) {
                     LOG.error("Could not create custom raft policy, will stick with default", e);
                 } else {
-                    LOG.error("Could not create custom raft policy, will stick with default : cause = {}", e.getMessage());
+                    LOG.error("Could not create custom raft policy, will stick with default : cause = {}",
+                            e.getMessage());
                 }
             }
             return DefaultRaftPolicy.INSTANCE;
index ff79004bcebdcaea3ff541951c3a5a170e5e74ce..e8c9b458c6e4e2434b8e9cc4461458a6a93aaf14 100644 (file)
@@ -12,10 +12,10 @@ import javax.annotation.Nullable;
 
 /**
  * ElectionTerm contains information about a RaftActors election term.
- * <p>
+ * <p/>
  * This information includes the last known current term of the RaftActor
  * and which candidate was voted for by the RaftActor in that term.
- * <p>
+ * <p/>
  * This class ensures that election term information is persisted.
  */
 public interface ElectionTerm {
index f1016357575e6ca11b76853ce7d9e3f66e68620b..b78c4fac223dcda8d98a8571dfa9cf4a33f28ba6 100644 (file)
@@ -157,7 +157,7 @@ public class FollowerLogInformationImpl implements FollowerLogInformation {
         return true;
     }
 
-    private void resetLastReplicated(){
+    private void resetLastReplicated() {
         lastReplicatedIndex = getNextIndex();
         if (lastReplicatedStopwatch.isRunning()) {
             lastReplicatedStopwatch.reset();
index cb07e59c5b9a44ad1c7fe778bf74de4743b576ea..8d29614105c5d86e4ab683f48a8b80b326cf14bd 100644 (file)
@@ -179,6 +179,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     }
 
     @VisibleForTesting
+    @SuppressWarnings("checkstyle:IllegalCatch")
     protected void changeCurrentBehavior(RaftActorBehavior newBehavior) {
         final RaftActorBehavior currentBehavior = getCurrentBehavior();
         if (currentBehavior != null) {
@@ -206,6 +207,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     }
 
     /**
+     * Handles a message.
+     *
      * @deprecated This method is not final for testing purposes. DO NOT OVERRIDE IT, override
      * {@link #handleNonRaftCommand(Object)} instead.
      */
@@ -461,7 +464,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     private void handleBehaviorChange(BehaviorState oldBehaviorState, RaftActorBehavior currentBehavior) {
         RaftActorBehavior oldBehavior = oldBehaviorState.getBehavior();
 
-        if (oldBehavior != currentBehavior){
+        if (oldBehavior != currentBehavior) {
             onStateChanged();
         }
 
index f9f9478dd8afb5f4fb9d3f505d31c8a5438462c3..2896c44a2cef79ef12e002a5332b414e3368e4db 100644 (file)
@@ -58,8 +58,7 @@ public interface RaftActorContext {
     /**
      * Returns the reference to the RaftActor.
      *
-     * @return A reference to the RaftActor itself. This could be used to send messages
-     * to the RaftActor
+     * @return the reference to the RaftActor itself. This can be used to send messages to the RaftActor
      */
     ActorRef getActor();
 
@@ -173,8 +172,8 @@ public interface RaftActorContext {
     /**
      * Returns the PeerInfo for the given peer.
      *
-     * @param peerId
-     * @return the PeerInfo or null if not found.
+     * @param peerId the id of the peer
+     * @return the PeerInfo or null if not found
      */
     @Nullable
     PeerInfo getPeerInfo(String peerId);
index 65567d3ca53a713156109b91f61a1d1ad9ad22b2..dfbffb726ab4f7a079d4efc3c770f8ab3dbd5bb9 100644 (file)
@@ -95,7 +95,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         this.persistenceProvider = persistenceProvider;
         this.log = logger;
 
-        for(Map.Entry<String, String> e: peerAddresses.entrySet()) {
+        for (Map.Entry<String, String> e: peerAddresses.entrySet()) {
             peerInfoMap.put(e.getKey(), new PeerInfo(e.getKey(), e.getValue(), VotingState.VOTING));
         }
     }
@@ -115,12 +115,12 @@ public class RaftActorContextImpl implements RaftActorContext {
     }
 
     @Override
-    public ActorRef actorOf(Props props){
+    public ActorRef actorOf(Props props) {
         return context.actorOf(props);
     }
 
     @Override
-    public ActorSelection actorSelection(String path){
+    public ActorSelection actorSelection(String path) {
         return context.actorSelection(path);
     }
 
@@ -135,11 +135,12 @@ public class RaftActorContextImpl implements RaftActorContext {
     }
 
     @Override
+    @SuppressWarnings("checkstyle:IllegalCatch")
     public Optional<Cluster> getCluster() {
-        if(cluster == null) {
+        if (cluster == null) {
             try {
                 cluster = Optional.of(Cluster.get(getActorSystem()));
-            } catch(Exception e) {
+            } catch (Exception e) {
                 // An exception means there's no cluster configured. This will only happen in unit tests.
                 log.debug("{}: Could not obtain Cluster: {}", getId(), e);
                 cluster = Optional.empty();
@@ -210,9 +211,9 @@ public class RaftActorContextImpl implements RaftActorContext {
     public String getPeerAddress(String peerId) {
         String peerAddress;
         PeerInfo peerInfo = peerInfoMap.get(peerId);
-        if(peerInfo != null) {
+        if (peerInfo != null) {
             peerAddress = peerInfo.getAddress();
-            if(peerAddress == null) {
+            if (peerAddress == null) {
                 peerAddress = configParams.getPeerAddressResolver().resolve(peerId);
                 peerInfo.setAddress(peerAddress);
             }
@@ -224,19 +225,19 @@ public class RaftActorContextImpl implements RaftActorContext {
     }
 
     @Override
-    public void updatePeerIds(ServerConfigurationPayload serverConfig){
+    public void updatePeerIds(ServerConfigurationPayload serverConfig) {
         votingMember = true;
         boolean foundSelf = false;
         Set<String> currentPeers = new HashSet<>(this.getPeerIds());
-        for(ServerInfo server: serverConfig.getServerConfig()) {
-            if(getId().equals(server.getId())) {
+        for (ServerInfo server : serverConfig.getServerConfig()) {
+            if (getId().equals(server.getId())) {
                 foundSelf = true;
-                if(!server.isVoting()) {
+                if (!server.isVoting()) {
                     votingMember = false;
                 }
             } else {
-                VotingState votingState = server.isVoting() ? VotingState.VOTING: VotingState.NON_VOTING;
-                if(!currentPeers.contains(server.getId())) {
+                VotingState votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
+                if (!currentPeers.contains(server.getId())) {
                     this.addToPeers(server.getId(), null, votingState);
                 } else {
                     this.getPeerInfo(server.getId()).setVotingState(votingState);
@@ -245,11 +246,11 @@ public class RaftActorContextImpl implements RaftActorContext {
             }
         }
 
-        for(String peerIdToRemove: currentPeers) {
+        for (String peerIdToRemove : currentPeers) {
             this.removePeer(peerIdToRemove);
         }
 
-        if(!foundSelf) {
+        if (!foundSelf) {
             votingMember = false;
         }
 
@@ -270,7 +271,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public void removePeer(String name) {
-        if(getId().equals(name)) {
+        if (getId().equals(name)) {
             votingMember = false;
         } else {
             peerInfoMap.remove(name);
@@ -280,7 +281,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override public ActorSelection getPeerActorSelection(String peerId) {
         String peerAddress = getPeerAddress(peerId);
-        if(peerAddress != null){
+        if (peerAddress != null) {
             return actorSelection(peerAddress);
         }
         return null;
@@ -289,7 +290,7 @@ public class RaftActorContextImpl implements RaftActorContext {
     @Override
     public void setPeerAddress(String peerId, String peerAddress) {
         PeerInfo peerInfo = peerInfoMap.get(peerId);
-        if(peerInfo != null) {
+        if (peerInfo != null) {
             log.info("Peer address for peer {} set to {}", peerId, peerAddress);
             peerInfo.setAddress(peerAddress);
         }
@@ -297,7 +298,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public SnapshotManager getSnapshotManager() {
-        if(snapshotManager == null){
+        if (snapshotManager == null) {
             snapshotManager = new SnapshotManager(this, log);
         }
         return snapshotManager;
@@ -346,11 +347,11 @@ public class RaftActorContextImpl implements RaftActorContext {
         }
         Collection<PeerInfo> peers = getPeers();
         List<ServerInfo> newConfig = new ArrayList<>(peers.size() + 1);
-        for(PeerInfo peer: peers) {
+        for (PeerInfo peer: peers) {
             newConfig.add(new ServerInfo(peer.getId(), peer.isVoting()));
         }
 
-        if(includeSelf) {
+        if (includeSelf) {
             newConfig.add(new ServerInfo(getId(), votingMember));
         }
 
@@ -364,10 +365,10 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public boolean anyVotingPeers() {
-        if(numVotingPeers < 0) {
+        if (numVotingPeers < 0) {
             numVotingPeers = 0;
-            for(PeerInfo info: getPeers()) {
-                if(info.isVoting()) {
+            for (PeerInfo info: getPeers()) {
+                if (info.isVoting()) {
                     numVotingPeers++;
                 }
             }
@@ -385,6 +386,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         this.currentBehavior = Preconditions.checkNotNull(behavior);
     }
 
+    @SuppressWarnings("checkstyle:IllegalCatch")
     void close() {
         if (currentBehavior != null) {
             try {
index 7efc7586d8c196ea9750afde1169729125c0e5f4..f3ffe139471b3dd034047fb461bd896eb8c861b1 100644 (file)
@@ -26,7 +26,7 @@ import scala.concurrent.duration.FiniteDuration;
 /**
  * A raft actor support class that participates in leadership transfer. An instance is created upon
  * initialization of leadership transfer.
- * <p>
+ * <p/>
  * The transfer process is as follows:
  * <ol>
  * <li>Send a LeaderStateChanged message with a null leader Id to the local RoleChangeNotifier to notify
@@ -42,7 +42,7 @@ import scala.concurrent.duration.FiniteDuration;
  *     possibly complete work that was suspended while we were transferring.</li>
  * <li>On notification of the new leader from the RaftActor or on time out, notify {@link OnComplete} callbacks.</li>
  * </ol>
- * <p>
+ * <p/>
  * NOTE: All methods on this class must be called on the actor's thread dispatcher as they may access/modify
  * internal state.
  *
@@ -69,14 +69,14 @@ public class RaftActorLeadershipTransferCohort {
         transferTimer.start();
 
         Optional<ActorRef> roleChangeNotifier = raftActor.getRoleChangeNotifier();
-        if(roleChangeNotifier.isPresent()) {
+        if (roleChangeNotifier.isPresent()) {
             roleChangeNotifier.get().tell(raftActor.newLeaderStateChanged(context.getId(), null,
                     currentBehavior.getLeaderPayloadVersion()), raftActor.self());
         }
 
-        for(String peerId: context.getPeerIds()) {
+        for (String peerId: context.getPeerIds()) {
             ActorSelection followerActor = context.getPeerActorSelection(peerId);
-            if(followerActor != null) {
+            if (followerActor != null) {
                 followerActor.tell(LeaderTransitioning.INSTANCE, context.getActor());
             }
         }
@@ -102,7 +102,7 @@ public class RaftActorLeadershipTransferCohort {
     void doTransfer() {
         RaftActorBehavior behavior = raftActor.getCurrentBehavior();
         // Sanity check...
-        if(behavior instanceof Leader) {
+        if (behavior instanceof Leader) {
             isTransferring = true;
             ((Leader)behavior).transferLeadership(this);
         } else {
@@ -136,14 +136,14 @@ public class RaftActorLeadershipTransferCohort {
         // safely run on the actor's thread dispatcher.
         FiniteDuration timeout = FiniteDuration.create(newLeaderTimeoutInMillis, TimeUnit.MILLISECONDS);
         newLeaderTimer = raftActor.getContext().system().scheduler().scheduleOnce(timeout, raftActor.self(),
-                (Runnable) () -> {
-                    LOG.debug("{}: leader not elected in time", raftActor.persistenceId());
-                    finish(true);
-                }, raftActor.getContext().system().dispatcher(), raftActor.self());
+            (Runnable) () -> {
+                LOG.debug("{}: leader not elected in time", raftActor.persistenceId());
+                finish(true);
+            }, raftActor.getContext().system().dispatcher(), raftActor.self());
     }
 
     void onNewLeader(String newLeader) {
-        if(newLeader != null && newLeaderTimer != null) {
+        if (newLeader != null && newLeaderTimer != null) {
             LOG.debug("{}: leader changed to {}", raftActor.persistenceId(), newLeader);
             newLeaderTimer.cancel();
             finish(true);
@@ -152,9 +152,9 @@ public class RaftActorLeadershipTransferCohort {
 
     private void finish(boolean success) {
         isTransferring = false;
-        if(transferTimer.isRunning()) {
+        if (transferTimer.isRunning()) {
             transferTimer.stop();
-            if(success) {
+            if (success) {
                 LOG.info("{}: Successfully transferred leadership to {} in {}", raftActor.persistenceId(),
                         raftActor.getLeaderId(), transferTimer);
             } else {
@@ -162,8 +162,8 @@ public class RaftActorLeadershipTransferCohort {
             }
         }
 
-        for(OnComplete onComplete: onCompleteCallbacks) {
-            if(success) {
+        for (OnComplete onComplete: onCompleteCallbacks) {
+            if (success) {
                 onComplete.onSuccess(raftActor.self());
             } else {
                 onComplete.onFailure(raftActor.self());
@@ -186,6 +186,7 @@ public class RaftActorLeadershipTransferCohort {
 
     interface OnComplete {
         void onSuccess(ActorRef raftActorRef);
+
         void onFailure(ActorRef raftActorRef);
     }
 }
index d0217a6bc0d7223c0976609189a939050bae5e49..f0a7066d85eaba13ddd11d6f79876858c0395f58 100644 (file)
@@ -22,12 +22,12 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPay
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
 import org.slf4j.Logger;
+
 /**
  * Support class that handles persistence recovery for a RaftActor.
  *
  * @author Thomas Pantelis
  */
-
 class RaftActorRecoverySupport {
     private final RaftActorContext context;
     private final RaftActorRecoveryCohort cohort;
@@ -77,6 +77,7 @@ class RaftActorRecoverySupport {
         return recoveryComplete;
     }
 
+    @SuppressWarnings("checkstyle:IllegalCatch")
     private void possiblyRestoreFromSnapshot() {
         byte[] restoreFromSnapshot = cohort.getRestoreFromSnapshot();
         if (restoreFromSnapshot == null) {
index 16c091a4c3490c56ff7e10c3038d32f86cca2f84..782ecc06a87ac11b19c6ef6b8335e6a9aa820b1a 100644 (file)
@@ -50,6 +50,7 @@ import scala.concurrent.duration.FiniteDuration;
 class RaftActorServerConfigurationSupport {
     private static final Logger LOG = LoggerFactory.getLogger(RaftActorServerConfigurationSupport.class);
 
+    @SuppressWarnings("checkstyle:MemberName")
     private final OperationState IDLE = new Idle();
 
     private final RaftActor raftActor;
@@ -66,13 +67,13 @@ class RaftActorServerConfigurationSupport {
     }
 
     boolean handleMessage(Object message, ActorRef sender) {
-        if(message instanceof AddServer) {
+        if (message instanceof AddServer) {
             onAddServer((AddServer) message, sender);
             return true;
-        } else if(message instanceof RemoveServer) {
+        } else if (message instanceof RemoveServer) {
             onRemoveServer((RemoveServer) message, sender);
             return true;
-        } else if(message instanceof ChangeServersVotingStatus) {
+        } else if (message instanceof ChangeServersVotingStatus) {
             onChangeServersVotingStatus((ChangeServersVotingStatus) message, sender);
             return true;
         } else if (message instanceof ServerOperationTimeout) {
@@ -81,9 +82,9 @@ class RaftActorServerConfigurationSupport {
         } else if (message instanceof UnInitializedFollowerSnapshotReply) {
             currentOperationState.onUnInitializedFollowerSnapshotReply((UnInitializedFollowerSnapshotReply) message);
             return true;
-        } else if(message instanceof ApplyState) {
+        } else if (message instanceof ApplyState) {
             return onApplyState((ApplyState) message);
-        } else if(message instanceof SnapshotComplete) {
+        } else if (message instanceof SnapshotComplete) {
             currentOperationState.onSnapshotComplete();
             return false;
         } else {
@@ -111,10 +112,10 @@ class RaftActorServerConfigurationSupport {
         // Therefore, if the local server is currently non-voting and is to be changed to voting and there is
         // no current leader, we will try to elect a leader using the new server config in order to replicate
         // the change and progress.
-        boolean localServerChangingToVoting = Boolean.TRUE.equals(message.
-                getServerVotingStatusMap().get(raftActor.getRaftActorContext().getId()));
+        boolean localServerChangingToVoting = Boolean.TRUE.equals(message
+                .getServerVotingStatusMap().get(raftActor.getRaftActorContext().getId()));
         boolean hasNoLeader = raftActor.getLeaderId() == null;
-        if(localServerChangingToVoting && !raftContext.isVotingMember() && hasNoLeader) {
+        if (localServerChangingToVoting && !raftContext.isVotingMember() && hasNoLeader) {
             currentOperationState.onNewOperation(new ChangeServersVotingStatusContext(message, sender, true));
         } else {
             onNewOperation(new ChangeServersVotingStatusContext(message, sender, false));
@@ -124,10 +125,10 @@ class RaftActorServerConfigurationSupport {
     private void onRemoveServer(RemoveServer removeServer, ActorRef sender) {
         LOG.debug("{}: onRemoveServer: {}, state: {}", raftContext.getId(), removeServer, currentOperationState);
         boolean isSelf = removeServer.getServerId().equals(raftContext.getId());
-        if(isSelf && !raftContext.hasFollowers()) {
+        if (isSelf && !raftContext.hasFollowers()) {
             sender.tell(new RemoveServerReply(ServerChangeStatus.NOT_SUPPORTED, raftActor.getLeaderId()),
                     raftActor.getSelf());
-        } else if(!isSelf && !raftContext.getPeerIds().contains(removeServer.getServerId())) {
+        } else if (!isSelf && !raftContext.getPeerIds().contains(removeServer.getServerId())) {
             sender.tell(new RemoveServerReply(ServerChangeStatus.DOES_NOT_EXIST, raftActor.getLeaderId()),
                     raftActor.getSelf());
         } else {
@@ -139,7 +140,7 @@ class RaftActorServerConfigurationSupport {
 
     private boolean onApplyState(ApplyState applyState) {
         Payload data = applyState.getReplicatedLogEntry().getData();
-        if(data instanceof ServerConfigurationPayload) {
+        if (data instanceof ServerConfigurationPayload) {
             currentOperationState.onApplyState(applyState);
             return true;
         }
@@ -229,7 +230,7 @@ class RaftActorServerConfigurationSupport {
         void onNewLeader(String newLeader) {
         }
 
-        protected void persistNewServerConfiguration(ServerOperationContext<?> operationContext){
+        protected void persistNewServerConfiguration(ServerOperationContext<?> operationContext) {
             raftContext.setDynamicServerConfigurationInUse();
 
             ServerConfigurationPayload payload = raftContext.getPeerServerInfo(
@@ -244,8 +245,9 @@ class RaftActorServerConfigurationSupport {
             sendReply(operationContext, ServerChangeStatus.OK);
         }
 
-        protected void operationComplete(ServerOperationContext<?> operationContext, @Nullable ServerChangeStatus replyStatus) {
-            if(replyStatus != null) {
+        protected void operationComplete(ServerOperationContext<?> operationContext,
+                @Nullable ServerChangeStatus replyStatus) {
+            if (replyStatus != null) {
                 sendReply(operationContext, replyStatus);
             }
 
@@ -258,13 +260,14 @@ class RaftActorServerConfigurationSupport {
             currentOperationState = IDLE;
 
             ServerOperationContext<?> nextOperation = pendingOperationsQueue.poll();
-            if(nextOperation != null) {
+            if (nextOperation != null) {
                 RaftActorServerConfigurationSupport.this.onNewOperation(nextOperation);
             }
         }
 
         protected void sendReply(ServerOperationContext<?> operationContext, ServerChangeStatus status) {
-            LOG.debug("{}: Returning {} for operation {}", raftContext.getId(), status, operationContext.getOperation());
+            LOG.debug("{}: Returning {} for operation {}", raftContext.getId(), status,
+                    operationContext.getOperation());
 
             operationContext.getClientRequestor().tell(operationContext.newReply(status, raftActor.getLeaderId()),
                     raftActor.self());
@@ -318,7 +321,7 @@ class RaftActorServerConfigurationSupport {
         public void onApplyState(ApplyState applyState) {
             // Sanity check - we could get an ApplyState from a previous operation that timed out so make
             // sure it's meant for us.
-            if(operationContext.getContextId().equals(applyState.getIdentifier())) {
+            if (operationContext.getContextId().equals(applyState.getIdentifier())) {
                 LOG.info("{}: {} has been successfully replicated to a majority of followers", raftContext.getId(),
                         applyState.getReplicatedLogEntry().getData());
 
@@ -336,7 +339,7 @@ class RaftActorServerConfigurationSupport {
 
             // Fail any pending operations
             ServerOperationContext<?> nextOperation = pendingOperationsQueue.poll();
-            while(nextOperation != null) {
+            while (nextOperation != null) {
                 sendReply(nextOperation, ServerChangeStatus.PRIOR_REQUEST_CONSENSUS_TIMEOUT);
                 nextOperation = pendingOperationsQueue.poll();
             }
@@ -344,7 +347,7 @@ class RaftActorServerConfigurationSupport {
 
         @Override
         public void onNewOperation(ServerOperationContext<?> newOperationContext) {
-            if(timedOut) {
+            if (timedOut) {
                 sendReply(newOperationContext, ServerChangeStatus.PRIOR_REQUEST_CONSENSUS_TIMEOUT);
             } else {
                 super.onNewOperation(newOperationContext);
@@ -379,12 +382,13 @@ class RaftActorServerConfigurationSupport {
             raftContext.removePeer(serverId);
 
             boolean isLeader = raftActor.isLeader();
-            if(isLeader) {
+            if (isLeader) {
                 AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
                 leader.removeFollower(serverId);
             }
 
-            operationComplete(getAddServerContext(), isLeader ? ServerChangeStatus.TIMEOUT : ServerChangeStatus.NO_LEADER);
+            operationComplete(getAddServerContext(), isLeader ? ServerChangeStatus.TIMEOUT
+                    : ServerChangeStatus.NO_LEADER);
         }
 
     }
@@ -400,12 +404,12 @@ class RaftActorServerConfigurationSupport {
 
         @Override
         public void initiate() {
-            AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
+            final AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
             AddServer addServer = getAddServerContext().getOperation();
 
             LOG.debug("{}: Initiating {}", raftContext.getId(), addServer);
 
-            if(raftContext.getPeerInfo(addServer.getNewServerId()) != null) {
+            if (raftContext.getPeerInfo(addServer.getNewServerId()) != null) {
                 operationComplete(getAddServerContext(), ServerChangeStatus.ALREADY_EXISTS);
                 return;
             }
@@ -416,10 +420,10 @@ class RaftActorServerConfigurationSupport {
 
             leader.addFollower(addServer.getNewServerId());
 
-            if(votingState == VotingState.VOTING_NOT_INITIALIZED){
+            if (votingState == VotingState.VOTING_NOT_INITIALIZED) {
                 // schedule the install snapshot timeout timer
                 Cancellable installSnapshotTimer = newInstallSnapshotTimer();
-                if(leader.initiateCaptureSnapshot(addServer.getNewServerId())) {
+                if (leader.initiateCaptureSnapshot(addServer.getNewServerId())) {
                     LOG.debug("{}: Initiating capture snapshot for new server {}", raftContext.getId(),
                             addServer.getNewServerId());
 
@@ -467,7 +471,7 @@ class RaftActorServerConfigurationSupport {
 
             // Sanity check to guard against receiving an UnInitializedFollowerSnapshotReply from a prior
             // add server operation that timed out.
-            if(getAddServerContext().getOperation().getNewServerId().equals(followerId) && raftActor.isLeader()) {
+            if (getAddServerContext().getOperation().getNewServerId().equals(followerId) && raftActor.isLeader()) {
                 AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
                 raftContext.getPeerInfo(followerId).setVotingState(VotingState.VOTING);
                 leader.updateMinReplicaCount();
@@ -499,13 +503,13 @@ class RaftActorServerConfigurationSupport {
         public void onSnapshotComplete() {
             LOG.debug("{}: onSnapshotComplete", raftContext.getId());
 
-            if(!raftActor.isLeader()) {
+            if (!raftActor.isLeader()) {
                 LOG.debug("{}: No longer the leader", raftContext.getId());
                 return;
             }
 
             AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
-            if(leader.initiateCaptureSnapshot(getAddServerContext().getOperation().getNewServerId())) {
+            if (leader.initiateCaptureSnapshot(getAddServerContext().getOperation().getNewServerId())) {
                 LOG.debug("{}: Initiating capture snapshot for new server {}", raftContext.getId(),
                         getAddServerContext().getOperation().getNewServerId());
 
@@ -525,7 +529,8 @@ class RaftActorServerConfigurationSupport {
         }
     }
 
-    private static final class ServerOperationContextIdentifier extends AbstractUUIDIdentifier<ServerOperationContextIdentifier> {
+    private static final class ServerOperationContextIdentifier
+            extends AbstractUUIDIdentifier<ServerOperationContextIdentifier> {
         private static final long serialVersionUID = 1L;
 
         ServerOperationContextIdentifier() {
@@ -543,7 +548,7 @@ class RaftActorServerConfigurationSupport {
         private final ActorRef clientRequestor;
         private final Identifier contextId;
 
-        ServerOperationContext(T operation, ActorRef clientRequestor){
+        ServerOperationContext(T operation, ActorRef clientRequestor) {
             this.operation = operation;
             this.clientRequestor = clientRequestor;
             contextId = new ServerOperationContextIdentifier();
@@ -612,7 +617,7 @@ class RaftActorServerConfigurationSupport {
         }
     }
 
-    private final class InitialRemoveServerState extends RemoveServerState implements InitialOperationState{
+    private final class InitialRemoveServerState extends RemoveServerState implements InitialOperationState {
 
         protected InitialRemoveServerState(RemoveServerContext removeServerContext) {
             super(removeServerContext);
@@ -648,8 +653,9 @@ class RaftActorServerConfigurationSupport {
 
         @Override
         void operationComplete(RaftActor raftActor, boolean succeeded) {
-            if(peerAddress != null) {
-                raftActor.context().actorSelection(peerAddress).tell(new ServerRemoved(getOperation().getServerId()), raftActor.getSelf());
+            if (peerAddress != null) {
+                raftActor.context().actorSelection(peerAddress).tell(
+                        new ServerRemoved(getOperation().getServerId()), raftActor.getSelf());
             }
         }
 
@@ -687,8 +693,8 @@ class RaftActorServerConfigurationSupport {
         void operationComplete(final RaftActor raftActor, boolean succeeded) {
             // If this leader changed to non-voting we need to step down as leader so we'll try to transfer
             // leadership.
-            boolean localServerChangedToNonVoting = Boolean.FALSE.equals(getOperation().
-                    getServerVotingStatusMap().get(raftActor.getRaftActorContext().getId()));
+            boolean localServerChangedToNonVoting = Boolean.FALSE.equals(getOperation()
+                    .getServerVotingStatusMap().get(raftActor.getRaftActorContext().getId()));
             if (succeeded && localServerChangedToNonVoting) {
                 LOG.debug("Leader changed to non-voting - trying leadership transfer");
                 raftActor.becomeNonVoting();
@@ -715,9 +721,9 @@ class RaftActorServerConfigurationSupport {
         public void initiate() {
             LOG.debug("Initiating ChangeServersVotingStatusState");
 
-            if(tryToElectLeader) {
+            if (tryToElectLeader) {
                 initiateLocalLeaderElection();
-            } else if(updateLocalPeerInfo()) {
+            } else if (updateLocalPeerInfo()) {
                 persistNewServerConfiguration(changeVotingStatusContext);
             }
         }
@@ -726,7 +732,7 @@ class RaftActorServerConfigurationSupport {
             LOG.debug("{}: Sending local ElectionTimeout to start leader election", raftContext.getId());
 
             ServerConfigurationPayload previousServerConfig = raftContext.getPeerServerInfo(true);
-            if(!updateLocalPeerInfo()) {
+            if (!updateLocalPeerInfo()) {
                 return;
             }
 
@@ -740,20 +746,20 @@ class RaftActorServerConfigurationSupport {
 
             // Check if new voting state would leave us with no voting members.
             boolean atLeastOneVoting = false;
-            for(ServerInfo info: newServerInfoList) {
-                if(info.isVoting()) {
+            for (ServerInfo info: newServerInfoList) {
+                if (info.isVoting()) {
                     atLeastOneVoting = true;
                     break;
                 }
             }
 
-            if(!atLeastOneVoting) {
+            if (!atLeastOneVoting) {
                 operationComplete(changeVotingStatusContext, ServerChangeStatus.INVALID_REQUEST);
                 return false;
             }
 
             raftContext.updatePeerIds(new ServerConfigurationPayload(newServerInfoList));
-            if(raftActor.getCurrentBehavior() instanceof AbstractLeader) {
+            if (raftActor.getCurrentBehavior() instanceof AbstractLeader) {
                 AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
                 leader.updateMinReplicaCount();
             }
@@ -762,15 +768,17 @@ class RaftActorServerConfigurationSupport {
         }
 
         private List<ServerInfo> newServerInfoList() {
-            Map<String, Boolean> serverVotingStatusMap = changeVotingStatusContext.getOperation().getServerVotingStatusMap();
+            Map<String, Boolean> serverVotingStatusMap = changeVotingStatusContext.getOperation()
+                    .getServerVotingStatusMap();
             List<ServerInfo> newServerInfoList = new ArrayList<>();
-            for(String peerId: raftContext.getPeerIds()) {
-                newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId) ?
-                        serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting()));
+            for (String peerId: raftContext.getPeerIds()) {
+                newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId)
+                        serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting()));
             }
 
             newServerInfoList.add(new ServerInfo(raftContext.getId(), serverVotingStatusMap.containsKey(
-                    raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId()) : raftContext.isVotingMember()));
+                    raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId())
+                            : raftContext.isVotingMember()));
 
             return newServerInfoList;
         }
@@ -792,7 +800,7 @@ class RaftActorServerConfigurationSupport {
 
         @Override
         void onNewLeader(String newLeader) {
-            if(newLeader == null) {
+            if (newLeader == null) {
                 return;
             }
 
@@ -800,7 +808,7 @@ class RaftActorServerConfigurationSupport {
 
             timer.cancel();
 
-            if(raftActor.isLeader()) {
+            if (raftActor.isLeader()) {
                 persistNewServerConfiguration(operationContext);
             } else {
                 // Edge case - some other node became leader so forward the operation.
@@ -838,20 +846,20 @@ class RaftActorServerConfigurationSupport {
             // tried yet.
             Map<String, Boolean> serverVotingStatusMap = operationContext.getOperation().getServerVotingStatusMap();
             ActorSelection forwardToPeerActor = null;
-            for(Map.Entry<String, Boolean> e: serverVotingStatusMap.entrySet()) {
+            for (Map.Entry<String, Boolean> e: serverVotingStatusMap.entrySet()) {
                 Boolean isVoting = e.getValue();
                 String serverId = e.getKey();
                 PeerInfo peerInfo = raftContext.getPeerInfo(serverId);
-                if(isVoting && peerInfo != null && !peerInfo.isVoting() && !serversVisited.contains(serverId)) {
+                if (isVoting && peerInfo != null && !peerInfo.isVoting() && !serversVisited.contains(serverId)) {
                     ActorSelection actor = raftContext.getPeerActorSelection(serverId);
-                    if(actor != null) {
+                    if (actor != null) {
                         forwardToPeerActor = actor;
                         break;
                     }
                 }
             }
 
-            if(forwardToPeerActor != null) {
+            if (forwardToPeerActor != null) {
                 LOG.debug("{}: Found server {} to forward to", raftContext.getId(), forwardToPeerActor);
 
                 forwardToPeerActor.tell(new ChangeServersVotingStatus(serverVotingStatusMap, serversVisited),
@@ -866,8 +874,8 @@ class RaftActorServerConfigurationSupport {
     static class ServerOperationTimeout {
         private final String loggingContext;
 
-        ServerOperationTimeout(String loggingContext){
-           this.loggingContext = Preconditions.checkNotNull(loggingContext, "loggingContext should not be null");
+        ServerOperationTimeout(String loggingContext) {
+            this.loggingContext = Preconditions.checkNotNull(loggingContext, "loggingContext should not be null");
         }
 
         String getLoggingContext() {
index 2b527db174300d593126c112b6ce491b06da8235..73ab6ea66cfcbd04e835edf18f7e2210c5795b65 100644 (file)
@@ -14,7 +14,7 @@ import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
 /**
- * Represents the ReplicatedLog that needs to be kept in sync by the RaftActor
+ * Represents the ReplicatedLog that needs to be kept in sync by the RaftActor.
  */
 public interface ReplicatedLog {
     long NO_MAX_SIZE = -1;
@@ -24,7 +24,7 @@ public interface ReplicatedLog {
      *
      * @param index the index of the log entry
      * @return the ReplicatedLogEntry if found, otherwise null if the adjusted index less than 0 or
-     * greater than the size of the in-memory journal.
+     *         greater than the size of the in-memory journal
      */
     @Nullable
     ReplicatedLogEntry get(long index);
@@ -60,13 +60,13 @@ public interface ReplicatedLog {
     long removeFrom(long index);
 
     /**
-     * Removes entries from the in-memory log a nd the persisted log starting at the given index.
-     * <p>
+     * Removes entries from the in-memory log and the persisted log starting at the given index.
+     * <p/>
      * The persisted information would then be used during recovery to properly
      * reconstruct the state of the in-memory replicated log
      *
      * @param index the index of the first log entry to remove
-     * @return
+     * @return true if entries were removed, false otherwise
      */
     boolean removeFromAndPersist(long index);
 
@@ -121,13 +121,14 @@ public interface ReplicatedLog {
     @Nonnull List<ReplicatedLogEntry> getFrom(long index, int maxEntries, long maxDataSize);
 
     /**
+     * Returns the number of entries in the journal.
      *
-     * @return the number of entries in the journal
+     * @return the number of entries
      */
     long size();
 
     /**
-     * Checks if the entry at the specified index is present or not
+     * Checks if the entry at the specified index is present or not.
      *
      * @param index the index of the log entry
      * @return true if the entry is present in the in-memory journal
@@ -135,53 +136,55 @@ public interface ReplicatedLog {
     boolean isPresent(long index);
 
     /**
-     * Checks if the entry is present in a snapshot
+     * Checks if the entry is present in a snapshot.
      *
      * @param index the index of the log entry
-     * @return true if the entry is in the snapshot. false if the entry is not
-     * in the snapshot even if the entry may be present in the replicated log
+     * @return true if the entry is in the snapshot. false if the entry is not in the snapshot even if the entry may
+     *         be present in the replicated log
      */
     boolean isInSnapshot(long index);
 
     /**
-     * Get the index of the snapshot
+     * Returns the index of the snapshot.
      *
      * @return the index from which the snapshot was created. -1 otherwise.
      */
     long getSnapshotIndex();
 
     /**
-     * Get the term of the snapshot
+     * Returns the term of the snapshot.
      *
-     * @return the term of the index from which the snapshot was created. -1
-     * otherwise
+     * @return the term of the index from which the snapshot was created. -1 otherwise
      */
     long getSnapshotTerm();
 
     /**
-     * sets the snapshot index in the replicated log
-     * @param snapshotIndex
+     * Sets the snapshot index in the replicated log.
+     *
+     * @param snapshotIndex the index to set
      */
     void setSnapshotIndex(long snapshotIndex);
 
     /**
-     * sets snapshot term
-     * @param snapshotTerm
+     * Sets snapshot term.
+     *
+     * @param snapshotTerm the term to set
      */
     void setSnapshotTerm(long snapshotTerm);
 
     /**
-     * Clears the journal entries with startIndex(inclusive) and endIndex (exclusive)
-     * @param startIndex
-     * @param endIndex
+     * Clears the journal entries with startIndex (inclusive) and endIndex (exclusive).
+     *
+     * @param startIndex the start index (inclusive)
+     * @param endIndex the end index (exclusive)
      */
     void clear(int startIndex, int endIndex);
 
     /**
-     * Handles all the bookkeeping in order to perform a rollback in the
-     * event of SaveSnapshotFailure
-     * @param snapshotCapturedIndex
-     * @param snapshotCapturedTerm
+     * Handles all the bookkeeping in order to perform a rollback in the event of SaveSnapshotFailure.
+     *
+     * @param snapshotCapturedIndex the new snapshot index
+     * @param snapshotCapturedTerm the new snapshot term
      */
     void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
 
@@ -191,14 +194,14 @@ public interface ReplicatedLog {
     void snapshotCommit();
 
     /**
-     * Restores the replicated log to a state in the event of a save snapshot failure
+     * Restores the replicated log to a state in the event of a save snapshot failure.
      */
     void snapshotRollback();
 
     /**
-     * Returns the size of the data in the log (in bytes)
+     * Returns the size of the data in the log (in bytes).
      *
-     * @return the size of the data in the log (in bytes).
+     * @return the size of the data in the log (in bytes)
      */
     int dataSize();
 
index 4295633c0a9cd9e098740d0904b3fa307fbeeea3..9123c14d5d615553a2abf58d19e9a1d89ad63f39 100644 (file)
@@ -24,7 +24,8 @@ class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
     private final RaftActorContext context;
     private long dataSizeSinceLastSnapshot = 0L;
 
-    private ReplicatedLogImpl(final long snapshotIndex, final long snapshotTerm, final List<ReplicatedLogEntry> unAppliedEntries,
+    private ReplicatedLogImpl(final long snapshotIndex, final long snapshotTerm,
+            final List<ReplicatedLogEntry> unAppliedEntries,
             final RaftActorContext context) {
         super(snapshotIndex, snapshotTerm, unAppliedEntries, context.getId());
         this.context = Preconditions.checkNotNull(context);
@@ -43,7 +44,7 @@ class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
     public boolean removeFromAndPersist(final long logEntryIndex) {
         // FIXME: Maybe this should be done after the command is saved
         long adjustedIndex = removeFrom(logEntryIndex);
-        if(adjustedIndex >= 0) {
+        if (adjustedIndex >= 0) {
             context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), deleteProcedure);
             return true;
         }
@@ -51,11 +52,6 @@ class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
         return false;
     }
 
-    @Override
-    public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
-        appendAndPersist(replicatedLogEntry, null);
-    }
-
     @Override
     public void captureSnapshotIfReady(final ReplicatedLogEntry replicatedLogEntry) {
         final ConfigParams config = context.getConfigParams();
@@ -90,14 +86,20 @@ class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
         }
     }
 
+    @Override
+    public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
+        appendAndPersist(replicatedLogEntry, null);
+    }
+
     @Override
     public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
             final Procedure<ReplicatedLogEntry> callback)  {
 
         context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry);
 
-        // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
-        if(!append(replicatedLogEntry)) {
+        // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability
+        // of the logs
+        if (!append(replicatedLogEntry)) {
             return;
         }
 
index 8a836bffb9962b760f1bd45362b8555fc9c5f3f8..24132ea99979ca5b3ad45bdc0951c0195c30890b 100644 (file)
@@ -103,7 +103,7 @@ public class Snapshot implements Serializable {
     public String toString() {
         return "Snapshot [lastIndex=" + lastIndex + ", lastTerm=" + lastTerm + ", lastAppliedIndex=" + lastAppliedIndex
                 + ", lastAppliedTerm=" + lastAppliedTerm + ", unAppliedEntries size=" + unAppliedEntries.size()
-                + ", state size=" + state.length + ", electionTerm=" + electionTerm + ", electionVotedFor=" + electionVotedFor
-                + ", ServerConfigPayload="  + serverConfig + "]";
+                + ", state size=" + state.length + ", electionTerm=" + electionTerm + ", electionVotedFor="
+                + electionVotedFor + ", ServerConfigPayload="  + serverConfig + "]";
     }
 }
index 5db4706c623e3a8219554bcb40f08e584e2bda71..79f2ce9b4ca0fb2b0deca496eed67e069bc1a547 100644 (file)
@@ -27,8 +27,13 @@ import org.slf4j.Logger;
  */
 public class SnapshotManager implements SnapshotState {
 
+    @SuppressWarnings("checkstyle:MemberName")
     private final SnapshotState IDLE = new Idle();
+
+    @SuppressWarnings({"checkstyle:MemberName", "checkstyle:AbbreviationAsWordInName"})
     private final SnapshotState PERSISTING = new Persisting();
+
+    @SuppressWarnings({"checkstyle:MemberName", "checkstyle:AbbreviationAsWordInName"})
     private final SnapshotState CREATING = new Creating();
 
     private final Logger log;
@@ -159,8 +164,8 @@ public class SnapshotManager implements SnapshotState {
             lastLogEntryIndex = lastLogEntry.getIndex();
             lastLogEntryTerm = lastLogEntry.getTerm();
         } else {
-            log.debug("{}: Capturing Snapshot : lastLogEntry is null. Using lastAppliedIndex {} and lastAppliedTerm {} instead.",
-                    persistenceId(), lastAppliedIndex, lastAppliedTerm);
+            log.debug("{}: Capturing Snapshot : lastLogEntry is null. Using lastAppliedIndex {} and "
+                    + "lastAppliedTerm {} instead.", persistenceId(), lastAppliedIndex, lastAppliedTerm);
         }
 
         return new CaptureSnapshot(lastLogEntryIndex, lastLogEntryTerm, lastAppliedIndex, lastAppliedTerm,
@@ -254,6 +259,7 @@ public class SnapshotManager implements SnapshotState {
             return false;
         }
 
+        @SuppressWarnings("checkstyle:IllegalCatch")
         private boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower) {
             captureSnapshot = newCaptureSnapshot(lastLogEntry, replicatedToAllIndex, targetFollower != null);
 
@@ -344,12 +350,12 @@ public class SnapshotManager implements SnapshotState {
             if (dataSizeThresholdExceeded || logSizeExceededSnapshotBatchCount) {
                 if (log.isDebugEnabled()) {
                     if (dataSizeThresholdExceeded) {
-                        log.debug("{}: log data size {} exceeds the memory threshold {} - doing snapshotPreCommit with index {}",
-                                context.getId(), context.getReplicatedLog().dataSize(), dataThreshold,
-                                captureSnapshot.getLastAppliedIndex());
+                        log.debug("{}: log data size {} exceeds the memory threshold {} - doing snapshotPreCommit "
+                                + "with index {}", context.getId(), context.getReplicatedLog().dataSize(),
+                                dataThreshold, captureSnapshot.getLastAppliedIndex());
                     } else {
-                        log.debug("{}: log size {} exceeds the snapshot batch count {} - doing snapshotPreCommit with index {}",
-                                context.getId(), context.getReplicatedLog().size(),
+                        log.debug("{}: log size {} exceeds the snapshot batch count {} - doing snapshotPreCommit with "
+                                + "index {}", context.getId(), context.getReplicatedLog().size(),
                                 context.getConfigParams().getSnapshotBatchCount(),
                                 captureSnapshot.getLastAppliedIndex());
                     }
@@ -407,6 +413,7 @@ public class SnapshotManager implements SnapshotState {
     private class Persisting extends AbstractSnapshotState {
 
         @Override
+        @SuppressWarnings("checkstyle:IllegalCatch")
         public void commit(final long sequenceNumber, long timeStamp) {
             log.debug("{}: Snapshot success -  sequence number: {}", persistenceId(), sequenceNumber);
 
index 639fdee15599de2e4c30b2edf14c3e0e09709c15..e423737706bc0d42055d388bb72b7f4a7890256b 100644 (file)
@@ -18,27 +18,27 @@ import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
  */
 public interface SnapshotState {
     /**
-     * @return true when a snapshot is being captured
+     * Returns whether or not a capture is in progress.
+     *
+     * @return true when a snapshot is being captured, false otherwise
      */
     boolean isCapturing();
 
     /**
-     * Initiate capture snapshot
+     * Initiates a capture snapshot.
      *
      * @param lastLogEntry the last entry in the replicated log
      * @param replicatedToAllIndex the current replicatedToAllIndex
-     *
      * @return true if capture was started
      */
     boolean capture(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex);
 
     /**
-     * Initiate capture snapshot for the purposing of installing that snapshot
-     *
-     * @param lastLogEntry
-     * @param replicatedToAllIndex
-     * @param targetFollower
+     * Initiates a capture snapshot for the purposing of installing the snapshot on a follower.
      *
+     * @param lastLogEntry the last entry in the replicated log
+     * @param replicatedToAllIndex the current replicatedToAllIndex
+     * @param targetFollower the id of the follower on which to install
      * @return true if capture was started
      */
     boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower);
@@ -51,31 +51,30 @@ public interface SnapshotState {
     void apply(ApplySnapshot snapshot);
 
     /**
-     * Persist the snapshot
+     * Persists a snapshot.
      *
-     * @param snapshotBytes
-     * @param currentBehavior
-     * @param totalMemory
+     * @param snapshotBytes the snapshot bytes
+     * @param totalMemory the total memory threshold
      */
     void persist(byte[] snapshotBytes, long totalMemory);
 
     /**
-     * Commit the snapshot by trimming the log
+     * Commit the snapshot by trimming the log.
      *
-     * @param sequenceNumber
-     * @param timeStamp
+     * @param sequenceNumber the sequence number of the persisted snapshot
+     * @param timeStamp the time stamp of the persisted snapshot
      */
     void commit(long sequenceNumber, long timeStamp);
 
     /**
-     * Rollback the snapshot
+     * Rolls back the snapshot on failure.
      */
     void rollback();
 
     /**
-     * Trim the log
+     * Trims the in-memory log.
      *
-     * @param desiredTrimIndex
+     * @param desiredTrimIndex the desired index to trim from
      * @return the actual trim index
      */
     long trimLog(long desiredTrimIndex);
index 8000217c9e695fd08830acfbfeaa43bee5b0e718..e0d76a54a41b2a5bcd2afd14f2e1855b7595b163 100644 (file)
@@ -50,13 +50,13 @@ public final class LeaderInstallSnapshotState {
     }
 
     void setSnapshotBytes(ByteString snapshotBytes) {
-        if(this.snapshotBytes != null) {
+        if (this.snapshotBytes != null) {
             return;
         }
 
         this.snapshotBytes = snapshotBytes;
         int size = snapshotBytes.size();
-        totalChunks = (size / snapshotChunkSize) + (size % snapshotChunkSize > 0 ? 1 : 0);
+        totalChunks = size / snapshotChunkSize + (size % snapshotChunkSize > 0 ? 1 : 0);
 
         LOG.debug("{}: Snapshot {} bytes, total chunks to send: {}", logName, size, totalChunks);
 
@@ -65,7 +65,7 @@ public final class LeaderInstallSnapshotState {
     }
 
     int incrementOffset() {
-        if(replyStatus) {
+        if (replyStatus) {
             // if prev chunk failed, we would want to sent the same chunk again
             offset = offset + snapshotChunkSize;
         }
@@ -116,7 +116,7 @@ public final class LeaderInstallSnapshotState {
         int size = snapshotChunkSize;
         if (snapshotChunkSize > snapshotLength) {
             size = snapshotLength;
-        } else if ((start + snapshotChunkSize) > snapshotLength) {
+        } else if (start + snapshotChunkSize > snapshotLength) {
             size = snapshotLength - start;
         }
 
@@ -130,9 +130,9 @@ public final class LeaderInstallSnapshotState {
     }
 
     /**
-     * reset should be called when the Follower needs to be sent the snapshot from the beginning
+     * Reset should be called when the Follower needs to be sent the snapshot from the beginning.
      */
-    void reset(){
+    void reset() {
         offset = 0;
         replyStatus = false;
         replyReceivedForOffset = offset;
index e5f627c857d09efe89548ca4a07d24a95ce4e6dd..b7bb741e7335f58450f03387b7f1b0b496fdd3c5 100644 (file)
@@ -50,7 +50,7 @@ public class AppendEntriesTest {
 
         assertEquals("getEntries size", expected.getEntries().size(), actual.getEntries().size());
         Iterator<ReplicatedLogEntry> iter = expected.getEntries().iterator();
-        for(ReplicatedLogEntry e: actual.getEntries()) {
+        for (ReplicatedLogEntry e: actual.getEntries()) {
             verifyReplicatedLogEntry(iter.next(), e);
         }
     }