Remove DOMDataTreeProducer-related classes
[controller.git] / opendaylight / md-sal / sal-cluster-admin-impl / src / test / java / org / opendaylight / controller / cluster / datastore / admin / ClusterAdminRpcServiceTest.java
index 07acc18dc5069626d20c9fe4c87406097797cb61..1362db752e123014c7a6e832262f7bda4a6f00ac 100644 (file)
@@ -7,12 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore.admin;
 
+import static java.lang.Boolean.FALSE;
+import static java.lang.Boolean.TRUE;
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyNoShardPresent;
@@ -23,7 +26,6 @@ import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Status.Success;
 import akka.cluster.Cluster;
-import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
@@ -35,11 +37,13 @@ import java.net.URI;
 import java.util.AbstractMap.SimpleEntry;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
@@ -47,39 +51,48 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
 import org.opendaylight.controller.cluster.datastore.MemberNode;
 import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -118,9 +131,9 @@ public class ClusterAdminRpcServiceTest {
         String fileName = "target/testBackupDatastore";
         new File(fileName).delete();
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore());
+        ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
+        RpcResult<BackupDatastoreOutput> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
                 .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
@@ -130,30 +143,30 @@ public class ClusterAdminRpcServiceTest {
 
             ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
                     snapshots.get(1).getType(), snapshots.get(1));
-            verifyDatastoreSnapshot(node.configDataStore().getActorContext().getDataStoreName(),
-                    map.get(node.configDataStore().getActorContext().getDataStoreName()), "cars", "people");
+            verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(),
+                    map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people");
         } finally {
             new File(fileName).delete();
         }
 
         // Test failure by killing a shard.
 
-        node.configDataStore().getActorContext().getShardManager().tell(node.datastoreContextBuilder()
+        node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder()
                 .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
 
-        ActorRef carsShardActor = node.configDataStore().getActorContext().findLocalShard("cars").get();
+        ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get();
         node.kit().watch(carsShardActor);
         carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
         node.kit().expectTerminated(carsShardActor);
 
         rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
                 .get(5, TimeUnit.SECONDS);
-        assertEquals("isSuccessful", false, rpcResult.isSuccessful());
+        assertFalse("isSuccessful", rpcResult.isSuccessful());
         assertEquals("getErrors", 1, rpcResult.getErrors().size());
     }
 
-    private static void verifyDatastoreSnapshot(String type, DatastoreSnapshot datastoreSnapshot,
-            String... expShardNames) {
+    private static void verifyDatastoreSnapshot(final String type, final DatastoreSnapshot datastoreSnapshot,
+            final String... expShardNames) {
         assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
         Set<String> shardNames = new HashSet<>();
         for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
@@ -163,11 +176,63 @@ public class ClusterAdminRpcServiceTest {
         assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
     }
 
+    @Test
+    public void testGetPrefixShardRole() throws Exception {
+        String name = "testGetPrefixShardRole";
+        String moduleShardsConfig = "module-shards-default-member-1.conf";
+
+        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
+    }
+
+    @Test
+    public void testModuleShardLeaderMovement() throws Exception {
+        String name = "testModuleShardLeaderMovement";
+        String moduleShardsConfig = "module-shards-member1.conf";
+
+        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+                .waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+                .moduleShardsConfig(moduleShardsConfig).build();
+
+        member1.waitForMembersUp("member-2", "member-3");
+        replicaNode2.waitForMembersUp("member-1");
+        replicaNode3.waitForMembersUp("member-1", "member-2");
+
+        doAddShardReplica(replicaNode2, "cars", "member-1");
+        doAddShardReplica(replicaNode3, "cars", "member-1", "member-2");
+
+        verifyRaftPeersPresent(member1.configDataStore(), "cars", "member-2", "member-3");
+
+        verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
+
+        verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
+
+        doMakeShardLeaderLocal(member1, "cars", "member-1");
+        verifyRaftState(replicaNode2.configDataStore(), "cars",
+            raftState -> assertThat(raftState.getLeader(),containsString("member-1")));
+        verifyRaftState(replicaNode3.configDataStore(), "cars",
+            raftState -> assertThat(raftState.getLeader(),containsString("member-1")));
+
+        doMakeShardLeaderLocal(replicaNode2, "cars", "member-2");
+        verifyRaftState(member1.configDataStore(), "cars",
+            raftState -> assertThat(raftState.getLeader(),containsString("member-2")));
+        verifyRaftState(replicaNode3.configDataStore(), "cars",
+            raftState -> assertThat(raftState.getLeader(),containsString("member-2")));
+
+        replicaNode2.waitForMembersUp("member-3");
+        doMakeShardLeaderLocal(replicaNode3, "cars", "member-3");
+    }
+
     @Test
     public void testAddShardReplica() throws Exception {
         String name = "testAddShardReplica";
         String moduleShardsConfig = "module-shards-cars-member-1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
+        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
 
         MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
@@ -195,12 +260,12 @@ public class ClusterAdminRpcServiceTest {
         // Write data to member-3's oper datastore and read/verify via member-2
         writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
 
-        // Verify all data has been replicated. We expect 3 log entries and thus last applied index of 2 -
-        // 2 ServerConfigurationPayload entries and the transaction payload entry.
+        // Verify all data has been replicated. We expect 4 log entries and thus last applied index of 3 -
+        // 2 ServerConfigurationPayload entries,  the transaction payload entry plus a purge payload.
 
         RaftStateVerifier verifier = raftState -> {
-            assertEquals("Commit index", 2, raftState.getCommitIndex());
-            assertEquals("Last applied index", 2, raftState.getLastApplied());
+            assertEquals("Commit index", 4, raftState.getCommitIndex());
+            assertEquals("Last applied index", 4, raftState.getLastApplied());
         };
 
         verifyRaftState(leaderNode1.configDataStore(), "cars", verifier);
@@ -231,9 +296,9 @@ public class ClusterAdminRpcServiceTest {
                 .moduleShardsConfig("module-shards-cars-member-1.conf").build();
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore());
+                memberNode.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
                 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
@@ -246,15 +311,15 @@ public class ClusterAdminRpcServiceTest {
         verifyFailedRpcResult(rpcResult);
     }
 
-    private static NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
-            DistributedDataStore readFromStore) throws Exception {
+    private static NormalizedNode<?, ?> writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
+            final AbstractDataStore readFromStore) throws Exception {
         DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
         NormalizedNode<?, ?> carsNode = CarsModel.create();
         writeTx.write(CarsModel.BASE_PATH, carsNode);
 
         DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        Boolean canCommit = cohort .canCommit().get(7, TimeUnit.SECONDS);
-        assertEquals("canCommit", true, canCommit);
+        Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
+        assertEquals("canCommit", TRUE, canCommit);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
 
@@ -262,29 +327,29 @@ public class ClusterAdminRpcServiceTest {
         return carsNode;
     }
 
-    private static void readCarsNodeAndVerify(DistributedDataStore readFromStore,
-            NormalizedNode<?, ?> expCarsNode) throws Exception {
-        Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction()
-                .read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
-        assertEquals("isPresent", true, optional.isPresent());
+    private static void readCarsNodeAndVerify(final AbstractDataStore readFromStore,
+            final NormalizedNode<?, ?> expCarsNode) throws Exception {
+        Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH)
+                .get(15, TimeUnit.SECONDS);
+        assertTrue("isPresent", optional.isPresent());
         assertEquals("Data node", expCarsNode, optional.get());
     }
 
-    private static void doAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
-            throws Exception {
+    private static void doAddShardReplica(final MemberNode memberNode, final String shardName,
+            final String... peerMemberNames) throws Exception {
         memberNode.waitForMembersUp(peerMemberNames);
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore());
+                memberNode.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
-                .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+            .setShardName(shardName).setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
 
-        Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
-        assertEquals("Oper shard present", false, optional.isPresent());
+        Optional<ActorRef> optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName);
+        assertFalse("Oper shard present", optional.isPresent());
 
         rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
                 .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
@@ -293,7 +358,22 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
     }
 
-    private static <T> T verifySuccessfulRpcResult(RpcResult<T> rpcResult) {
+    private static void doMakeShardLeaderLocal(final MemberNode memberNode, final String shardName,
+            final String newLeader) throws Exception {
+        ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
+                memberNode.operDataStore(), null);
+
+        final RpcResult<MakeLeaderLocalOutput> rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+                .setDataStoreType(DataStoreType.Config).setShardName(shardName).build())
+                .get(10, TimeUnit.SECONDS);
+
+        verifySuccessfulRpcResult(rpcResult);
+
+        verifyRaftState(memberNode.configDataStore(), shardName, raftState -> assertThat(raftState.getLeader(),
+                containsString(newLeader)));
+    }
+
+    private static <T> T verifySuccessfulRpcResult(final RpcResult<T> rpcResult) {
         if (!rpcResult.isSuccessful()) {
             if (rpcResult.getErrors().size() > 0) {
                 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
@@ -306,8 +386,8 @@ public class ClusterAdminRpcServiceTest {
         return rpcResult.getResult();
     }
 
-    private static void verifyFailedRpcResult(RpcResult<Void> rpcResult) {
-        assertEquals("RpcResult", false, rpcResult.isSuccessful());
+    private static void verifyFailedRpcResult(final RpcResult<?> rpcResult) {
+        assertFalse("RpcResult", rpcResult.isSuccessful());
         assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
         RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
         assertNotNull("RpcResult error message null", error.getMessage());
@@ -337,9 +417,9 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to remove it's local shard
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
+        RpcResult<RemoveShardReplicaOutput> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
                 .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
                 .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
@@ -362,7 +442,7 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-1 to remove member-2
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
         rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
                 .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
@@ -398,9 +478,9 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on leader member-1 to remove it's local shard
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+        RpcResult<RemoveShardReplicaOutput> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
                 .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
                 .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
@@ -418,15 +498,16 @@ public class ClusterAdminRpcServiceTest {
     public void testAddReplicasForAllShards() throws Exception {
         String name = "testAddReplicasForAllShards";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
+        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
 
         ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
-                "pets", null, Arrays.asList(MEMBER_1));
-        leaderNode1.configDataStore().getActorContext().getShardManager().tell(
+                                                                                 "pets", null,
+                                                                                 Collections.singletonList(MEMBER_1));
+        leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
-        leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorContext(), "pets");
+        leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets");
 
         MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
@@ -434,21 +515,23 @@ public class ClusterAdminRpcServiceTest {
         leaderNode1.waitForMembersUp("member-2");
         newReplicaNode2.waitForMembersUp("member-1");
 
-        newReplicaNode2.configDataStore().getActorContext().getShardManager().tell(
+        newReplicaNode2.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        newReplicaNode2.operDataStore().getActorContext().getShardManager().tell(
+        newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
-                        "no-leader", null, Arrays.asList(MEMBER_1)), Shard.builder(), null),
+                                                             "no-leader", null,
+                                                             Collections.singletonList(MEMBER_1)),
+                                Shard.builder(), null),
                                 newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
-                newReplicaNode2.operDataStore());
+                newReplicaNode2.operDataStore(), null);
 
-        RpcResult<AddReplicasForAllShardsOutput> rpcResult =
-                service.addReplicasForAllShards().get(10, TimeUnit.SECONDS);
+        RpcResult<AddReplicasForAllShardsOutput> rpcResult = service.addReplicasForAllShards(
+            new AddReplicasForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
         AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
@@ -486,15 +569,15 @@ public class ClusterAdminRpcServiceTest {
 
         ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
                 "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
-        leaderNode1.configDataStore().getActorContext().getShardManager().tell(
+        leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
 
-        replicaNode2.configDataStore().getActorContext().getShardManager().tell(
+        replicaNode2.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode2.kit().getRef());
         replicaNode2.kit().expectMsgClass(Success.class);
 
-        replicaNode3.configDataStore().getActorContext().getShardManager().tell(
+        replicaNode3.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode3.kit().getRef());
         replicaNode3.kit().expectMsgClass(Success.class);
 
@@ -503,7 +586,7 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
                 new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
@@ -549,24 +632,24 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service3
+        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service3
                 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
                         .setShardName("cars").setDataStoreType(DataStoreType.Config)
                         .setMemberVotingState(ImmutableList.of(
-                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
-                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build()))
+                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
                         .build())
                 .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
-        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
-        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
     }
 
     @Test
@@ -583,18 +666,18 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
-                leaderNode.operDataStore());
+                leaderNode.operDataStore(), null);
 
-        RpcResult<Void> rpcResult = service
+        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service
                 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
                         .setShardName("cars").setDataStoreType(DataStoreType.Config)
                         .setMemberVotingState(ImmutableList
-                                .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(false).build()))
+                                .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(FALSE).build()))
                         .build())
                 .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", true));
+        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE));
     }
 
     @Test
@@ -623,12 +706,12 @@ public class ClusterAdminRpcServiceTest {
         // Invoke RPC service on member-3 to change voting status
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
         RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
                 new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
-                        new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
-                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build())).build())
+                        new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build())
                 .get(10, TimeUnit.SECONDS);
         ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
@@ -636,11 +719,11 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
+                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
     }
 
     @Test
@@ -657,8 +740,8 @@ public class ClusterAdminRpcServiceTest {
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
         final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
-                        DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
+                .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder()
+                        .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10))
                 .build();
 
         final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
@@ -671,55 +754,56 @@ public class ClusterAdminRpcServiceTest {
         leaderNode1.operDataStore().waitTillReady();
         replicaNode3.configDataStore().waitTillReady();
         replicaNode3.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
 
         ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore());
+                replicaNode3.operDataStore(), null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards()
-                .get(10, TimeUnit.SECONDS);
+        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
         FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
-                new SimpleEntry<>("member-3", true));
+                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
+                new SimpleEntry<>("member-3", TRUE));
 
         // Leadership should have transferred to member 3 since it is the only remaining voting member.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
             assertNotNull("Expected non-null leader Id", raftState.getLeader());
-            assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
+            assertTrue("Expected leader member-3. Actual: " + raftState.getLeader(),
                     raftState.getLeader().contains("member-3"));
         });
 
         verifyRaftState(leaderNode1.operDataStore(), "cars", raftState -> {
             assertNotNull("Expected non-null leader Id", raftState.getLeader());
-            assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
+            assertTrue("Expected leader member-3. Actual: " + raftState.getLeader(),
                     raftState.getLeader().contains("member-3"));
         });
 
         // Flip the voting states back to the original states.
 
-        rpcResult = service3.flipMemberVotingStatesForAllShards(). get(10, TimeUnit.SECONDS);
+        rpcResult = service3.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
         result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
-                new SimpleEntry<>("member-3", false));
+                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
+                new SimpleEntry<>("member-3", FALSE));
 
         // Leadership should have transferred to member 1 or 2.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -760,32 +844,32 @@ public class ClusterAdminRpcServiceTest {
 
         replicaNode1.waitForMembersUp("member-2", "member-3");
 
-        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", false),
-                new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false),
-                new SimpleEntry<>("member-4", true), new SimpleEntry<>("member-5", true),
-                new SimpleEntry<>("member-6", true));
+        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE),
+                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE),
+                new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE),
+                new SimpleEntry<>("member-6", TRUE));
 
         verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
             assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
-                replicaNode1.operDataStore());
+                replicaNode1.operDataStore(), null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
-                .get(10, TimeUnit.SECONDS);
+        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
         FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new DistributedDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
-                new SimpleEntry<>("member-3", true), new SimpleEntry<>("member-4", false),
-                new SimpleEntry<>("member-5", false), new SimpleEntry<>("member-6", false));
+                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
+                new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE),
+                new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE));
 
         // Since member 1 was changed to voting and there was no leader, it should've started and election
         // and become leader
@@ -830,16 +914,16 @@ public class ClusterAdminRpcServiceTest {
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
-                new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", true),
-                new SimpleEntry<>("member-4", false), new SimpleEntry<>("member-5", false),
-                new SimpleEntry<>("member-6", false));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
+                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE),
+                new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE),
+                new SimpleEntry<>("member-6", FALSE));
 
         ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore());
+                leaderNode1.operDataStore(), null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
-                .get(10, TimeUnit.SECONDS);
+        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
         FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
@@ -847,13 +931,13 @@ public class ClusterAdminRpcServiceTest {
                 successShardResult("people", DataStoreType.Operational));
 
         // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
-        verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
                 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
                 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
                 new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
-                new SimpleEntry<>("member-3", false), new SimpleEntry<>("member-4", true),
-                new SimpleEntry<>("member-5", true), new SimpleEntry<>("member-6", true));
+                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
+                new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE),
+                new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE));
 
         // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
         // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
@@ -863,8 +947,8 @@ public class ClusterAdminRpcServiceTest {
         });
     }
 
-    private void setupPersistedServerConfigPayload(ServerConfigurationPayload serverConfig,
-            String member, String datastoreTypeSuffix, String... shards) {
+    private static void setupPersistedServerConfigPayload(final ServerConfigurationPayload serverConfig,
+            final String member, final String datastoreTypeSuffix, final String... shards) {
         String[] datastoreTypes = {"config_", "oper_"};
         for (String type : datastoreTypes) {
             for (String shard : shards) {
@@ -877,16 +961,16 @@ public class ClusterAdminRpcServiceTest {
                 String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
                         type + datastoreTypeSuffix).toString();
                 InMemoryJournal.addEntry(shardID, 1, new UpdateElectionTerm(1, null));
-                InMemoryJournal.addEntry(shardID, 2, new ReplicatedLogImplEntry(0, 1,
+                InMemoryJournal.addEntry(shardID, 2, new SimpleReplicatedLogEntry(0, 1,
                         new ServerConfigurationPayload(newServerInfo)));
             }
         }
     }
 
     @SafeVarargs
-    private static void verifyVotingStates(DistributedDataStore[] datastores, String[] shards,
-            SimpleEntry<String, Boolean>... expStates) throws Exception {
-        for (DistributedDataStore datastore: datastores) {
+    private static void verifyVotingStates(final AbstractDataStore[] datastores, final String[] shards,
+            final SimpleEntry<String, Boolean>... expStates) throws Exception {
+        for (AbstractDataStore datastore: datastores) {
             for (String shard: shards) {
                 verifyVotingStates(datastore, shard, expStates);
             }
@@ -894,18 +978,18 @@ public class ClusterAdminRpcServiceTest {
     }
 
     @SafeVarargs
-    private static void verifyVotingStates(DistributedDataStore datastore, String shardName,
-            SimpleEntry<String, Boolean>... expStates) throws Exception {
-        String localMemberName = datastore.getActorContext().getCurrentMemberName().getName();
+    private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName,
+            final SimpleEntry<String, Boolean>... expStates) throws Exception {
+        String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName();
         Map<String, Boolean> expStateMap = new HashMap<>();
         for (Entry<String, Boolean> e: expStates) {
             expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
-                    datastore.getActorContext().getDataStoreName()).toString(), e.getValue());
+                    datastore.getActorUtils().getDataStoreName()).toString(), e.getValue());
         }
 
         verifyRaftState(datastore, shardName, raftState -> {
             String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
-                    datastore.getActorContext().getDataStoreName()).toString();
+                    datastore.getActorUtils().getDataStoreName()).toString();
             assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
             for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
                 assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
@@ -913,18 +997,19 @@ public class ClusterAdminRpcServiceTest {
         });
     }
 
-    private static void verifyShardResults(List<ShardResult> shardResults, ShardResult... expShardResults) {
+    private static void verifyShardResults(final Map<ShardResultKey, ShardResult> shardResults,
+            final ShardResult... expShardResults) {
         Map<String, ShardResult> expResultsMap = new HashMap<>();
         for (ShardResult r: expShardResults) {
             expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
         }
 
-        for (ShardResult result: shardResults) {
+        for (ShardResult result: shardResults.values()) {
             ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
             assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
                     result.getDataStoreType()), exp);
-            assertEquals("isSucceeded", exp.isSucceeded(), result.isSucceeded());
-            if (exp.isSucceeded()) {
+            assertEquals("isSucceeded", exp.getSucceeded(), result.getSucceeded());
+            if (exp.getSucceeded()) {
                 assertNull("Expected null error message", result.getErrorMessage());
             } else {
                 assertNotNull("Expected error message", result.getErrorMessage());
@@ -936,11 +1021,11 @@ public class ClusterAdminRpcServiceTest {
         }
     }
 
-    private static ShardResult successShardResult(String shardName, DataStoreType type) {
-        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(true).build();
+    private static ShardResult successShardResult(final String shardName, final DataStoreType type) {
+        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(TRUE).build();
     }
 
-    private static ShardResult failedShardResult(String shardName, DataStoreType type) {
-        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(false).build();
+    private static ShardResult failedShardResult(final String shardName, final DataStoreType type) {
+        return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(FALSE).build();
     }
 }