Bug 7805: Add make-leader-local rpc for module based shard.
[controller.git] / opendaylight / md-sal / sal-cluster-admin-impl / src / test / java / org / opendaylight / controller / cluster / datastore / admin / ClusterAdminRpcServiceTest.java
index 07acc18dc5069626d20c9fe4c87406097797cb61..90c6cd1b58aec2b77f51df7db50736b72e9f6d99 100644 (file)
@@ -7,9 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.admin;
 
+import static java.lang.Boolean.FALSE;
+import static java.lang.Boolean.TRUE;
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertThat;
@@ -35,6 +38,7 @@ import java.net.URI;
 import java.util.AbstractMap.SimpleEntry;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -46,16 +50,19 @@ import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
 import org.opendaylight.controller.cluster.datastore.MemberNode;
 import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
+import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
@@ -63,9 +70,16 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
@@ -74,12 +88,16 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -118,7 +136,7 @@ public class ClusterAdminRpcServiceTest {
         String fileName = "target/testBackupDatastore";
         new File(fileName).delete();
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore());
+        ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
                 .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
@@ -148,7 +166,7 @@ public class ClusterAdminRpcServiceTest {
 
         rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
                 .get(5, TimeUnit.SECONDS);
-        assertEquals("isSuccessful", false, rpcResult.isSuccessful());
+        assertFalse("isSuccessful", rpcResult.isSuccessful());
         assertEquals("getErrors", 1, rpcResult.getErrors().size());
     }
 
@@ -163,11 +181,97 @@ public class ClusterAdminRpcServiceTest {
         assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
     }
 
+    @Test
+    public void testAddRemovePrefixShardReplica() throws Exception {
+        String name = "testAddPrefixShardReplica";
+        String moduleShardsConfig = "module-shards-default.conf";
+
+        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+
+        member1.waitForMembersUp("member-2", "member-3");
+        replicaNode2.kit().waitForMembersUp("member-1", "member-3");
+        replicaNode3.kit().waitForMembersUp("member-1", "member-2");
+
+        final ActorRef shardManager1 = member1.configDataStore().getActorContext().getShardManager();
+
+        shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
+                        new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
+                        "prefix", Collections.singleton(MEMBER_1))),
+                ActorRef.noSender());
+
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(),
+                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
+
+        final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
+        final BindingNormalizedNodeSerializer serializer = Mockito.mock(BindingNormalizedNodeSerializer.class);
+        Mockito.doReturn(CarsModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(identifier);
+
+        addPrefixShardReplica(replicaNode2, identifier, serializer,
+                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1");
+
+        addPrefixShardReplica(replicaNode3, identifier, serializer,
+                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1", "member-2");
+
+        verifyRaftPeersPresent(member1.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
+                "member-2", "member-3");
+
+        removePrefixShardReplica(member1, identifier, "member-3", serializer,
+                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-2");
+
+        verifyNoShardPresent(replicaNode3.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
+        verifyRaftPeersPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
+                "member-1");
+
+        removePrefixShardReplica(member1, identifier, "member-2", serializer,
+                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
+
+        verifyNoShardPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
+    }
+
+    @Test
+    public void testModuleShardLeaderMovement() throws Exception {
+        String name = "testModuleShardLeaderMovement";
+        String moduleShardsConfig = "module-shards-member1.conf";
+
+        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+                .waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+
+        member1.waitForMembersUp("member-2", "member-3");
+
+        doAddShardReplica(replicaNode2, "cars", "member-1");
+        doAddShardReplica(replicaNode3, "cars", "member-1", "member-2");
+
+        verifyRaftPeersPresent(member1.configDataStore(), "cars", "member-2", "member-3");
+
+        verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
+
+        verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
+
+        doMakeShardLeaderLocal(member1, "cars", "member-1");
+        replicaNode2.kit().waitUntilLeader(replicaNode2.configDataStore().getActorContext(), "cars");
+        replicaNode3.kit().waitUntilLeader(replicaNode3.configDataStore().getActorContext(), "cars");
+
+        doMakeShardLeaderLocal(replicaNode2, "cars", "member-2");
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(), "cars");
+        replicaNode3.kit().waitUntilLeader(replicaNode3.configDataStore().getActorContext(), "cars");
+
+        doMakeShardLeaderLocal(replicaNode3, "cars", "member-3");
+    }
+
     @Test
     public void testAddShardReplica() throws Exception {
         String name = "testAddShardReplica";
         String moduleShardsConfig = "module-shards-cars-member-1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
+        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
 
         MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
@@ -231,7 +335,7 @@ public class ClusterAdminRpcServiceTest {
                 .moduleShardsConfig("module-shards-cars-member-1.conf").build();
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore());
+                memberNode.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
                 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
@@ -246,15 +350,15 @@ public class ClusterAdminRpcServiceTest {
         verifyFailedRpcResult(rpcResult);
     }
 
-    private static NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
-            DistributedDataStore readFromStore) throws Exception {
+    private static NormalizedNode<?, ?> writeCarsNodeAndVerify(AbstractDataStore writeToStore,
+            AbstractDataStore readFromStore) throws Exception {
         DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
         NormalizedNode<?, ?> carsNode = CarsModel.create();
         writeTx.write(CarsModel.BASE_PATH, carsNode);
 
         DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        Boolean canCommit = cohort .canCommit().get(7, TimeUnit.SECONDS);
-        assertEquals("canCommit", true, canCommit);
+        Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
+        assertEquals("canCommit", TRUE, canCommit);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
 
@@ -262,20 +366,61 @@ public class ClusterAdminRpcServiceTest {
         return carsNode;
     }
 
-    private static void readCarsNodeAndVerify(DistributedDataStore readFromStore,
+    private static void readCarsNodeAndVerify(AbstractDataStore readFromStore,
             NormalizedNode<?, ?> expCarsNode) throws Exception {
         Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction()
                 .read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
-        assertEquals("isPresent", true, optional.isPresent());
+        assertTrue("isPresent", optional.isPresent());
         assertEquals("Data node", expCarsNode, optional.get());
     }
 
+    private void addPrefixShardReplica(final MemberNode memberNode,
+                                       final InstanceIdentifier<?> identifier,
+                                       final BindingNormalizedNodeSerializer serializer,
+                                       final String shardName,
+                                       final String... peerMemberNames) throws Exception {
+
+        final AddPrefixShardReplicaInput input = new AddPrefixShardReplicaInputBuilder()
+                .setShardPrefix(identifier)
+                .setDataStoreType(DataStoreType.Config).build();
+
+        final ClusterAdminRpcService service =
+                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
+
+        final RpcResult<Void> rpcResult = service.addPrefixShardReplica(input).get(10, TimeUnit.SECONDS);
+        verifySuccessfulRpcResult(rpcResult);
+
+        verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
+        Optional<ActorRef> optional = memberNode.configDataStore().getActorContext().findLocalShard(shardName);
+        assertTrue("Replica shard not present", optional.isPresent());
+    }
+
+    private void removePrefixShardReplica(final MemberNode memberNode,
+                                          final InstanceIdentifier<?> identifier,
+                                          final String removeFromMember,
+                                          final BindingNormalizedNodeSerializer serializer,
+                                          final String shardName,
+                                          final String... peerMemberNames) throws Exception {
+        final RemovePrefixShardReplicaInput input = new RemovePrefixShardReplicaInputBuilder()
+                .setDataStoreType(DataStoreType.Config)
+                .setShardPrefix(identifier)
+                .setMemberName(removeFromMember).build();
+
+        final ClusterAdminRpcService service =
+                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
+
+        final RpcResult<Void> rpcResult = service.removePrefixShardReplica(input).get(10, TimeUnit.SECONDS);
+        verifySuccessfulRpcResult(rpcResult);
+
+        verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
+    }
+
     private static void doAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
             throws Exception {
         memberNode.waitForMembersUp(peerMemberNames);
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore());
+                memberNode.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
                 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
@@ -284,7 +429,7 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
 
         Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
-        assertEquals("Oper shard present", false, optional.isPresent());
+        assertFalse("Oper shard present", optional.isPresent());
 
         rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
                 .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
@@ -293,6 +438,22 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
     }
 
+    private static void doMakeShardLeaderLocal(final MemberNode memberNode, String shardName, String newLeader)
+            throws Exception {
+        ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
+                memberNode.operDataStore(), null);
+
+        final RpcResult<Void> rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+                .setDataStoreType(DataStoreType.Config).setShardName(shardName).build())
+                .get(10, TimeUnit.SECONDS);
+
+        verifySuccessfulRpcResult(rpcResult);
+
+        verifyRaftState(memberNode.configDataStore(), shardName, raftState -> assertThat(raftState.getLeader(),
+                containsString(newLeader)));
+
+    }
+
     private static <T> T verifySuccessfulRpcResult(RpcResult<T> rpcResult) {
         if (!rpcResult.isSuccessful()) {
             if (rpcResult.getErrors().size() > 0) {
@@ -307,7 +468,7 @@ public class ClusterAdminRpcServiceTest {
     }
 
     private static void verifyFailedRpcResult(RpcResult<Void> rpcResult) {
-        assertEquals("RpcResult", false, rpcResult.isSuccessful());
+        assertFalse("RpcResult", rpcResult.isSuccessful());
         assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
         RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
         assertNotNull("RpcResult error message null", error.getMessage());
@@ -337,7 +498,7 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to remove it's local shard
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
                 .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
@@ -362,7 +523,7 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-1 to remove member-2
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
         rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
                 .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
@@ -398,7 +559,7 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on leader member-1 to remove it's local shard
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
                 .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
@@ -418,7 +579,7 @@ public class ClusterAdminRpcServiceTest {
     public void testAddReplicasForAllShards() throws Exception {
         String name = "testAddReplicasForAllShards";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
+        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
 
         ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
@@ -445,7 +606,7 @@ public class ClusterAdminRpcServiceTest {
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
-                newReplicaNode2.operDataStore());
+                newReplicaNode2.operDataStore(), null);
 
         RpcResult<AddReplicasForAllShardsOutput> rpcResult =
                 service.addReplicasForAllShards().get(10, TimeUnit.SECONDS);
@@ -503,7 +664,7 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
                 new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
@@ -549,24 +710,24 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service3
                 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
                         .setShardName("cars").setDataStoreType(DataStoreType.Config)
                         .setMemberVotingState(ImmutableList.of(
-                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
-                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build()))
+                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
                         .build())
                 .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
-        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
-        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
     }
 
     @Test
@@ -583,18 +744,18 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
-                leaderNode.operDataStore());
+                leaderNode.operDataStore(), null);
 
         RpcResult<Void> rpcResult = service
                 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
                         .setShardName("cars").setDataStoreType(DataStoreType.Config)
                         .setMemberVotingState(ImmutableList
-                                .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(false).build()))
+                                .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(FALSE).build()))
                         .build())
                 .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", true));
+        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE));
     }
 
     @Test
@@ -623,12 +784,12 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
                 new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
-                        new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
-                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build())).build())
+                        new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build())
                 .get(10, TimeUnit.SECONDS);
         ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
@@ -636,11 +797,11 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
+                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
     }
 
     @Test
@@ -671,11 +832,11 @@ public class ClusterAdminRpcServiceTest {
         leaderNode1.operDataStore().waitTillReady();
         replicaNode3.configDataStore().waitTillReady();
         replicaNode3.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards()
                 .get(10, TimeUnit.SECONDS);
@@ -685,12 +846,12 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
-                new SimpleEntry<>("member-3", true));
+                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
+                new SimpleEntry<>("member-3", TRUE));
 
         // Leadership should have transferred to member 3 since it is the only remaining voting member.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -714,12 +875,12 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
-                new SimpleEntry<>("member-3", false));
+                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
+                new SimpleEntry<>("member-3", FALSE));
 
         // Leadership should have transferred to member 1 or 2.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -760,16 +921,16 @@ public class ClusterAdminRpcServiceTest {
 
         replicaNode1.waitForMembersUp("member-2", "member-3");
 
-        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", false),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false),
-                new SimpleEntry<>("member-4", true), new SimpleEntry<>("member-5", true),
-                new SimpleEntry<>("member-6", true));
+        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE),
+                new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE),
+                new SimpleEntry<>("member-6", TRUE));
 
         verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
             assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
-                replicaNode1.operDataStore());
+                replicaNode1.operDataStore(), null);
 
         RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
                 .get(10, TimeUnit.SECONDS);
@@ -779,13 +940,13 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
-                new SimpleEntry<>("member-3", true), new SimpleEntry<>("member-4", false),
-                new SimpleEntry<>("member-5", false), new SimpleEntry<>("member-6", false));
+                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
+                new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE),
+                new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE));
 
         // Since member 1 was changed to voting and there was no leader, it should've started and election
         // and become leader
@@ -830,13 +991,13 @@ public class ClusterAdminRpcServiceTest {
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", true),
-                new SimpleEntry<>("member-4", false), new SimpleEntry<>("member-5", false),
-                new SimpleEntry<>("member-6", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE),
+                new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE),
+                new SimpleEntry<>("member-6", FALSE));
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
         RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
                 .get(10, TimeUnit.SECONDS);
@@ -847,13 +1008,13 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("people", DataStoreType.Operational));
 
         // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
-                new SimpleEntry<>("member-3", false), new SimpleEntry<>("member-4", true),
-                new SimpleEntry<>("member-5", true), new SimpleEntry<>("member-6", true));
+                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
+                new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE),
+                new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE));
 
         // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
         // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
@@ -863,7 +1024,7 @@ public class ClusterAdminRpcServiceTest {
         });
     }
 
-    private void setupPersistedServerConfigPayload(ServerConfigurationPayload serverConfig,
+    private static void setupPersistedServerConfigPayload(ServerConfigurationPayload serverConfig,
             String member, String datastoreTypeSuffix, String... shards) {
         String[] datastoreTypes = {"config_", "oper_"};
         for (String type : datastoreTypes) {
@@ -884,9 +1045,9 @@ public class ClusterAdminRpcServiceTest {
     }
 
     @SafeVarargs
-    private static void verifyVotingStates(DistributedDataStore[] datastores, String[] shards,
+    private static void verifyVotingStates(AbstractDataStore[] datastores, String[] shards,
             SimpleEntry<String, Boolean>... expStates) throws Exception {
-        for (DistributedDataStore datastore: datastores) {
+        for (AbstractDataStore datastore: datastores) {
             for (String shard: shards) {
                 verifyVotingStates(datastore, shard, expStates);
             }
@@ -894,7 +1055,7 @@ public class ClusterAdminRpcServiceTest {
     }
 
     @SafeVarargs
-    private static void verifyVotingStates(DistributedDataStore datastore, String shardName,
+    private static void verifyVotingStates(AbstractDataStore datastore, String shardName,
             SimpleEntry<String, Boolean>... expStates) throws Exception {
         String localMemberName = datastore.getActorContext().getCurrentMemberName().getName();
         Map<String, Boolean> expStateMap = new HashMap<>();
@@ -937,10 +1098,10 @@ public class ClusterAdminRpcServiceTest {
     }
 
     private static ShardResult successShardResult(String shardName, DataStoreType type) {
-        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(true).build();
+        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(TRUE).build();
     }
 
     private static ShardResult failedShardResult(String shardName, DataStoreType type) {
-        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(false).build();
+        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(FALSE).build();
     }
 }