Rename ActorContext to ActorUtils 94/80394/4
authorRobert Varga <robert.varga@pantheon.tech>
Tue, 19 Feb 2019 15:43:22 +0000 (16:43 +0100)
committerTom Pantelis <tompantelis@gmail.com>
Mon, 25 Feb 2019 14:38:47 +0000 (14:38 +0000)
ActorContext is overloaded name even within Akka, without us adding
to it. Furthermore Akka's AbstractActor defines ActorContext as its
nested class, which thoroughly breaks resolution priorities.

Rename ActorContext to ActorUtils, reducing confusion around class
uses.

Change-Id: I140239a8f74ee7deecf9ee848df0cfbbb72f3c4d
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
68 files changed:
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java
opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapper.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipService.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBeanImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtils.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java with 97% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/TransactionRateLimiter.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardChangePublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardedDataTreeActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/OSGI-INF/blueprint/clustered-datastore.xml
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandleTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientLocalHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolverTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/SingleClientHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapperTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallbackTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipServiceTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtilsTest.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorContextTest.java with 74% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/TransactionRateLimiterTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImplTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontendTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/MdsalLowLevelTestProvider.java

index 1bff714..4012f2c 100644 (file)
@@ -45,7 +45,7 @@ import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardR
 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshotList;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
@@ -124,7 +124,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         this.serializer = serializer;
 
         this.makeLeaderLocalTimeout =
-                new Timeout(configDataStore.getActorContext().getDatastoreContext()
+                new Timeout(configDataStore.getActorUtils().getDatastoreContext()
                         .getShardLeaderElectionTimeout().duration().$times(2));
     }
 
@@ -213,15 +213,14 @@ public class ClusterAdminRpcService implements ClusterAdminService {
             return newFailedRpcResultFuture("A valid DataStoreType must be specified");
         }
 
-        ActorContext actorContext = dataStoreType == DataStoreType.Config
-                ? configDataStore.getActorContext()
-                : operDataStore.getActorContext();
+        ActorUtils actorUtils = dataStoreType == DataStoreType.Config
+                ? configDataStore.getActorUtils() : operDataStore.getActorUtils();
 
         LOG.info("Moving leader to local node {} for shard {}, datastoreType {}",
-                actorContext.getCurrentMemberName().getName(), shardName, dataStoreType);
+                actorUtils.getCurrentMemberName().getName(), shardName, dataStoreType);
 
         final scala.concurrent.Future<ActorRef> localShardReply =
-                actorContext.findLocalShardAsync(shardName);
+                actorUtils.findLocalShardAsync(shardName);
 
         final scala.concurrent.Promise<Object> makeLeaderLocalAsk = akka.dispatch.Futures.promise();
         localShardReply.onComplete(new OnComplete<ActorRef>() {
@@ -233,11 +232,11 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                     makeLeaderLocalAsk.failure(failure);
                 } else {
                     makeLeaderLocalAsk
-                            .completeWith(actorContext
+                            .completeWith(actorUtils
                                     .executeOperationAsync(actorRef, MakeLeaderLocal.INSTANCE, makeLeaderLocalTimeout));
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         final SettableFuture<RpcResult<MakeLeaderLocalOutput>> future = SettableFuture.create();
         makeLeaderLocalAsk.future().onComplete(new OnComplete<Object>() {
@@ -253,7 +252,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 LOG.debug("Leadership transfer complete");
                 future.set(RpcResultBuilder.success(new MakeLeaderLocalOutputBuilder().build()).build());
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         return future;
     }
@@ -626,14 +625,14 @@ public class ClusterAdminRpcService implements ClusterAdminService {
     private <T> void sendMessageToManagerForConfiguredShards(final DataStoreType dataStoreType,
             final List<Entry<ListenableFuture<T>, ShardResultBuilder>> shardResultData,
             final Function<String, Object> messageSupplier) {
-        ActorContext actorContext = dataStoreType == DataStoreType.Config ? configDataStore.getActorContext()
-                : operDataStore.getActorContext();
-        Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames();
+        ActorUtils actorUtils = dataStoreType == DataStoreType.Config ? configDataStore.getActorUtils()
+                : operDataStore.getActorUtils();
+        Set<String> allShardNames = actorUtils.getConfiguration().getAllShardNames();
 
-        LOG.debug("Sending message to all shards {} for data store {}", allShardNames, actorContext.getDataStoreName());
+        LOG.debug("Sending message to all shards {} for data store {}", allShardNames, actorUtils.getDataStoreName());
 
         for (String shardName: allShardNames) {
-            ListenableFuture<T> future = this.ask(actorContext.getShardManager(), messageSupplier.apply(shardName),
+            ListenableFuture<T> future = this.ask(actorUtils.getShardManager(), messageSupplier.apply(shardName),
                                                   SHARD_MGR_TIMEOUT);
             shardResultData.add(new SimpleEntry<>(future,
                     new ShardResultBuilder().setShardName(shardName).setDataStoreType(dataStoreType)));
@@ -642,16 +641,16 @@ public class ClusterAdminRpcService implements ClusterAdminService {
 
     private <T> ListenableFuture<List<T>> sendMessageToShardManagers(final Object message) {
         Timeout timeout = SHARD_MGR_TIMEOUT;
-        ListenableFuture<T> configFuture = ask(configDataStore.getActorContext().getShardManager(), message, timeout);
-        ListenableFuture<T> operFuture = ask(operDataStore.getActorContext().getShardManager(), message, timeout);
+        ListenableFuture<T> configFuture = ask(configDataStore.getActorUtils().getShardManager(), message, timeout);
+        ListenableFuture<T> operFuture = ask(operDataStore.getActorUtils().getShardManager(), message, timeout);
 
         return Futures.allAsList(configFuture, operFuture);
     }
 
     private <T> ListenableFuture<T> sendMessageToShardManager(final DataStoreType dataStoreType, final Object message) {
         ActorRef shardManager = dataStoreType == DataStoreType.Config
-                ? configDataStore.getActorContext().getShardManager()
-                        : operDataStore.getActorContext().getShardManager();
+                ? configDataStore.getActorUtils().getShardManager()
+                        : operDataStore.getActorUtils().getShardManager();
         return ask(shardManager, message, SHARD_MGR_TIMEOUT);
     }
 
@@ -695,7 +694,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                     returnFuture.set(resp);
                 }
             }
-        }, configDataStore.getActorContext().getClientDispatcher());
+        }, configDataStore.getActorUtils().getClientDispatcher());
 
         return returnFuture;
     }
index 8f9f2b2..b87c07e 100644 (file)
@@ -165,18 +165,18 @@ public class ClusterAdminRpcServiceTest {
 
             ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
                     snapshots.get(1).getType(), snapshots.get(1));
-            verifyDatastoreSnapshot(node.configDataStore().getActorContext().getDataStoreName(),
-                    map.get(node.configDataStore().getActorContext().getDataStoreName()), "cars", "people");
+            verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(),
+                    map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people");
         } finally {
             new File(fileName).delete();
         }
 
         // Test failure by killing a shard.
 
-        node.configDataStore().getActorContext().getShardManager().tell(node.datastoreContextBuilder()
+        node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder()
                 .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
 
-        ActorRef carsShardActor = node.configDataStore().getActorContext().findLocalShard("cars").get();
+        ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get();
         node.kit().watch(carsShardActor);
         carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
         node.kit().expectTerminated(carsShardActor);
@@ -214,14 +214,14 @@ public class ClusterAdminRpcServiceTest {
         replicaNode2.kit().waitForMembersUp("member-1", "member-3");
         replicaNode3.kit().waitForMembersUp("member-1", "member-2");
 
-        final ActorRef shardManager1 = member1.configDataStore().getActorContext().getShardManager();
+        final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
 
         shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
                         new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
                         "prefix", Collections.singleton(MEMBER_1))),
                 ActorRef.noSender());
 
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(),
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
                 ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
 
         final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
@@ -258,7 +258,7 @@ public class ClusterAdminRpcServiceTest {
         final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(), "default");
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
 
         final RpcResult<GetShardRoleOutput> successResult =
                 getShardRole(member1, Mockito.mock(BindingNormalizedNodeSerializer.class), "default");
@@ -270,14 +270,14 @@ public class ClusterAdminRpcServiceTest {
 
         verifyFailedRpcResult(failedResult);
 
-        final ActorRef shardManager1 = member1.configDataStore().getActorContext().getShardManager();
+        final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
 
         shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
                         new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
                         "prefix", Collections.singleton(MEMBER_1))),
                 ActorRef.noSender());
 
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(),
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
                 ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
 
         final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
@@ -307,7 +307,7 @@ public class ClusterAdminRpcServiceTest {
         final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorContext(), "default");
+        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
 
 
     }
@@ -506,7 +506,7 @@ public class ClusterAdminRpcServiceTest {
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
-        Optional<ActorRef> optional = memberNode.configDataStore().getActorContext().findLocalShard(shardName);
+        Optional<ActorRef> optional = memberNode.configDataStore().getActorUtils().findLocalShard(shardName);
         assertTrue("Replica shard not present", optional.isPresent());
     }
 
@@ -541,7 +541,7 @@ public class ClusterAdminRpcServiceTest {
 
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
 
-        Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
+        Optional<ActorRef> optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName);
         assertFalse("Oper shard present", optional.isPresent());
 
         rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
@@ -697,10 +697,10 @@ public class ClusterAdminRpcServiceTest {
         ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
                                                                                  "pets", null,
                                                                                  Collections.singletonList(MEMBER_1));
-        leaderNode1.configDataStore().getActorContext().getShardManager().tell(
+        leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
-        leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorContext(), "pets");
+        leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets");
 
         MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
@@ -708,11 +708,11 @@ public class ClusterAdminRpcServiceTest {
         leaderNode1.waitForMembersUp("member-2");
         newReplicaNode2.waitForMembersUp("member-1");
 
-        newReplicaNode2.configDataStore().getActorContext().getShardManager().tell(
+        newReplicaNode2.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        newReplicaNode2.operDataStore().getActorContext().getShardManager().tell(
+        newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
                                                              "no-leader", null,
                                                              Collections.singletonList(MEMBER_1)),
@@ -762,15 +762,15 @@ public class ClusterAdminRpcServiceTest {
 
         ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
                 "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
-        leaderNode1.configDataStore().getActorContext().getShardManager().tell(
+        leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
 
-        replicaNode2.configDataStore().getActorContext().getShardManager().tell(
+        replicaNode2.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode2.kit().getRef());
         replicaNode2.kit().expectMsgClass(Success.class);
 
-        replicaNode3.configDataStore().getActorContext().getShardManager().tell(
+        replicaNode3.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode3.kit().getRef());
         replicaNode3.kit().expectMsgClass(Success.class);
 
@@ -1173,16 +1173,16 @@ public class ClusterAdminRpcServiceTest {
     @SafeVarargs
     private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName,
             final SimpleEntry<String, Boolean>... expStates) throws Exception {
-        String localMemberName = datastore.getActorContext().getCurrentMemberName().getName();
+        String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName();
         Map<String, Boolean> expStateMap = new HashMap<>();
         for (Entry<String, Boolean> e: expStates) {
             expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
-                    datastore.getActorContext().getDataStoreName()).toString(), e.getValue());
+                    datastore.getActorUtils().getDataStoreName()).toString(), e.getValue());
         }
 
         verifyRaftState(datastore, shardName, raftState -> {
             String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
-                    datastore.getActorContext().getDataStoreName()).toString();
+                    datastore.getActorUtils().getDataStoreName()).toString();
             assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
             for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
                 assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
index 73622a0..3a529a1 100644 (file)
@@ -16,7 +16,7 @@ import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
@@ -34,9 +34,9 @@ public class ClientBackedDataStore extends AbstractDataStore {
     }
 
     @VisibleForTesting
-    ClientBackedDataStore(final ActorContext actorContext, final ClientIdentifier identifier,
+    ClientBackedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
                           final DataStoreClient clientActor) {
-        super(actorContext, identifier, clientActor);
+        super(actorUtils, identifier, clientActor);
     }
 
     @Override
@@ -60,7 +60,7 @@ public class ClientBackedDataStore extends AbstractDataStore {
     }
 
     private boolean debugAllocation() {
-        return getActorContext().getDatastoreContext().isTransactionDebugContextEnabled();
+        return getActorUtils().getDatastoreContext().isTransactionDebugContextEnabled();
     }
 
     private Throwable allocationContext() {
index 9207118..12858b9 100644 (file)
@@ -7,10 +7,11 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import java.util.concurrent.TimeUnit;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
@@ -18,7 +19,7 @@ import org.opendaylight.controller.cluster.access.client.ClientActorConfig;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.common.actor.ExplicitAsk;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import scala.Function1;
 import scala.concurrent.Await;
 import scala.concurrent.duration.Duration;
@@ -26,25 +27,25 @@ import scala.concurrent.duration.Duration;
 public abstract class AbstractDataStoreClientActor extends AbstractClientActor {
     private static final Function1<ActorRef, ?> GET_CLIENT_FACTORY = ExplicitAsk.toScala(GetClientRequest::new);
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
 
-    AbstractDataStoreClientActor(final FrontendIdentifier frontendId, final ActorContext actorContext) {
+    AbstractDataStoreClientActor(final FrontendIdentifier frontendId, final ActorUtils actorUtils) {
         super(frontendId);
-        this.actorContext = Preconditions.checkNotNull(actorContext);
+        this.actorUtils = requireNonNull(actorUtils);
     }
 
     @Override
     protected ClientActorConfig getClientActorConfig() {
-        return actorContext.getDatastoreContext();
+        return actorUtils.getDatastoreContext();
     }
 
     @Override
     protected final AbstractDataStoreClientBehavior initialBehavior(final ClientActorContext context) {
-        return Verify.verifyNotNull(initialBehavior(context, actorContext));
+        return verifyNotNull(initialBehavior(context, actorUtils));
     }
 
     @SuppressWarnings("checkstyle:hiddenField")
-    abstract AbstractDataStoreClientBehavior initialBehavior(ClientActorContext context, ActorContext actorContext);
+    abstract AbstractDataStoreClientBehavior initialBehavior(ClientActorContext context, ActorUtils actorUtils);
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     public static DataStoreClient getDistributedDataStoreClient(@Nonnull final ActorRef actor,
index d2b10c1..eddbba6 100644 (file)
@@ -7,9 +7,11 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import com.google.common.primitives.UnsignedLong;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
@@ -35,7 +37,7 @@ import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderExc
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -45,7 +47,7 @@ import scala.compat.java8.FutureConverters;
 /**
  * {@link BackendInfoResolver} implementation for static shard configuration based on ShardManager. Each string-named
  * shard is assigned a single cookie and this mapping is stored in a bidirectional map. Information about corresponding
- * shard leader is resolved via {@link ActorContext}. The product of resolution is {@link ShardBackendInfo}.
+ * shard leader is resolved via {@link ActorUtils}. The product of resolution is {@link ShardBackendInfo}.
  *
  * @author Robert Varga
  */
@@ -57,7 +59,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
         private ShardBackendInfo result;
 
         ShardState(final CompletionStage<ShardBackendInfo> stage) {
-            this.stage = Preconditions.checkNotNull(stage);
+            this.stage = requireNonNull(stage);
             stage.whenComplete(this::onStageResolved);
         }
 
@@ -71,7 +73,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
 
         private synchronized void onStageResolved(final ShardBackendInfo info, final Throwable failure) {
             if (failure == null) {
-                this.result = Preconditions.checkNotNull(info);
+                this.result = requireNonNull(info);
             } else {
                 LOG.warn("Failed to resolve shard", failure);
             }
@@ -88,12 +90,12 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
 
     private final AtomicLong nextSessionId = new AtomicLong();
     private final Function1<ActorRef, ?> connectFunction;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final Set<Consumer<Long>> staleBackendInfoCallbacks = ConcurrentHashMap.newKeySet();
 
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
-    AbstractShardBackendResolver(final ClientIdentifier clientId, final ActorContext actorContext) {
-        this.actorContext = Preconditions.checkNotNull(actorContext);
+    AbstractShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
+        this.actorUtils = requireNonNull(actorUtils);
         this.connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.BORON,
             ABIVersion.current()));
     }
@@ -108,19 +110,19 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
         staleBackendInfoCallbacks.forEach(callback -> callback.accept(cookie));
     }
 
-    protected ActorContext actorContext() {
-        return actorContext;
+    protected ActorUtils actorUtils() {
+        return actorUtils;
     }
 
     protected final void flushCache(final String shardName) {
-        actorContext.getPrimaryShardInfoCache().remove(shardName);
+        actorUtils.getPrimaryShardInfoCache().remove(shardName);
     }
 
     protected final ShardState resolveBackendInfo(final String shardName, final long cookie) {
         LOG.debug("Resolving cookie {} to shard {}", cookie, shardName);
 
         final CompletableFuture<ShardBackendInfo> future = new CompletableFuture<>();
-        FutureConverters.toJava(actorContext.findPrimaryShardAsync(shardName)).whenComplete((info, failure) -> {
+        FutureConverters.toJava(actorUtils.findPrimaryShardAsync(shardName)).whenComplete((info, failure) -> {
             if (failure == null) {
                 connectShard(shardName, cookie, info, future);
                 return;
@@ -146,7 +148,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
 
     private static TimeoutException wrap(final String message, final Throwable cause) {
         final TimeoutException ret = new TimeoutException(message);
-        ret.initCause(Preconditions.checkNotNull(cause));
+        ret.initCause(requireNonNull(cause));
         return ret;
     }
 
@@ -175,8 +177,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
         }
 
         LOG.debug("Resolved backend information to {}", response);
-        Preconditions.checkArgument(response instanceof ConnectClientSuccess, "Unhandled response %s",
-            response);
+        checkArgument(response instanceof ConnectClientSuccess, "Unhandled response %s", response);
         final ConnectClientSuccess success = (ConnectClientSuccess) response;
         future.complete(new ShardBackendInfo(success.getBackend(), nextSessionId.getAndIncrement(),
             success.getVersion(), shardName, UnsignedLong.fromLongBits(cookie), success.getDataTree(),
index 69e0d5d..719a3b1 100644 (file)
@@ -14,7 +14,7 @@ import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 /**
  * A {@link AbstractClientActor} which acts as the point of contact for DistributedDataStore.
@@ -22,17 +22,17 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
  * @author Robert Varga
  */
 public final class DistributedDataStoreClientActor extends AbstractDataStoreClientActor {
-    private DistributedDataStoreClientActor(final FrontendIdentifier frontendId, final ActorContext actorContext) {
-        super(frontendId, actorContext);
+    private DistributedDataStoreClientActor(final FrontendIdentifier frontendId, final ActorUtils actorUtils) {
+        super(frontendId, actorUtils);
     }
 
     @Override
-    AbstractDataStoreClientBehavior initialBehavior(final ClientActorContext context, final ActorContext actorContext) {
-        return new DistributedDataStoreClientBehavior(context, actorContext);
+    AbstractDataStoreClientBehavior initialBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
+        return new DistributedDataStoreClientBehavior(context, actorUtils);
     }
 
     public static Props props(@Nonnull final MemberName memberName, @Nonnull final String storeName,
-            final ActorContext ctx) {
+            final ActorUtils ctx) {
         final String name = "datastore-" + storeName;
         final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, FrontendType.forName(name));
         return Props.create(DistributedDataStoreClientActor.class,
index 792b5b3..e40da21 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import java.util.function.Function;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
@@ -26,8 +26,8 @@ final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBe
         pathToShard = resolver::resolveShardForPath;
     }
 
-    DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorContext actorContext) {
-        this(context, new ModuleShardBackendResolver(context.getIdentifier(), actorContext));
+    DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
+        this(context, new ModuleShardBackendResolver(context.getIdentifier(), actorUtils));
     }
 
     @Override
index f6452a1..d360508 100644 (file)
@@ -26,7 +26,7 @@ import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
@@ -36,7 +36,7 @@ import scala.concurrent.Future;
 /**
  * {@link BackendInfoResolver} implementation for static shard configuration based on ShardManager. Each string-named
  * shard is assigned a single cookie and this mapping is stored in a bidirectional map. Information about corresponding
- * shard leader is resolved via {@link ActorContext}. The product of resolution is {@link ShardBackendInfo}.
+ * shard leader is resolved via {@link ActorUtils}. The product of resolution is {@link ShardBackendInfo}.
  *
  * @author Robert Varga
  */
@@ -54,10 +54,10 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     private volatile BiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
 
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
-    ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorContext actorContext) {
-        super(clientId, actorContext);
+    ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
+        super(clientId, actorUtils);
 
-        shardAvailabilityChangesRegFuture = ask(actorContext.getShardManager(), new RegisterForShardAvailabilityChanges(
+        shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges(
             this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES))
                 .map(reply -> (Registration)reply, ExecutionContexts.global());
 
@@ -84,7 +84,7 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     }
 
     Long resolveShardForPath(final YangInstanceIdentifier path) {
-        final String shardName = actorContext().getShardStrategyFactory().getStrategy(path).findShard(path);
+        final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
         Long cookie = shards.get(shardName);
         if (cookie == null) {
             synchronized (this) {
index 7068b27..de67400 100644 (file)
@@ -7,15 +7,16 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Props;
-import com.google.common.base.Preconditions;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 /**
  * A {@link AbstractClientActor} which acts as the point of contact for DistributedDataStore.
@@ -25,22 +26,22 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 public final class SimpleDataStoreClientActor extends AbstractDataStoreClientActor {
     private final String shardName;
 
-    private SimpleDataStoreClientActor(final FrontendIdentifier frontendId, final ActorContext actorContext,
+    private SimpleDataStoreClientActor(final FrontendIdentifier frontendId, final ActorUtils actorUtils,
             final String shardName) {
-        super(frontendId, actorContext);
-        this.shardName = Preconditions.checkNotNull(shardName);
+        super(frontendId, actorUtils);
+        this.shardName = requireNonNull(shardName);
     }
 
     @Override
-    AbstractDataStoreClientBehavior initialBehavior(final ClientActorContext context, final ActorContext actorContext) {
-        return new SimpleDataStoreClientBehavior(context, actorContext, shardName);
+    AbstractDataStoreClientBehavior initialBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
+        return new SimpleDataStoreClientBehavior(context, actorUtils, shardName);
     }
 
     public static Props props(@Nonnull final MemberName memberName, @Nonnull final String storeName,
-            final ActorContext ctx, final String shardName) {
+            final ActorUtils actorUtils, final String shardName) {
         final String name = "datastore-" + storeName;
         final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, FrontendType.forName(name));
         return Props.create(SimpleDataStoreClientActor.class,
-            () -> new SimpleDataStoreClientActor(frontendId, ctx, shardName));
+            () -> new SimpleDataStoreClientActor(frontendId, actorUtils, shardName));
     }
 }
index d6818d3..aaaa88e 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
@@ -25,9 +25,9 @@ final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavio
         super(context, resolver);
     }
 
-    SimpleDataStoreClientBehavior(final ClientActorContext context, final ActorContext actorContext,
+    SimpleDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils,
             final String shardName) {
-        this(context, new SimpleShardBackendResolver(context.getIdentifier(), actorContext, shardName));
+        this(context, new SimpleShardBackendResolver(context.getIdentifier(), actorUtils, shardName));
     }
 
     @Override
index 7c301e7..e086f08 100644 (file)
@@ -7,12 +7,14 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.util.concurrent.CompletionStage;
 import javax.annotation.concurrent.ThreadSafe;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -32,14 +34,14 @@ final class SimpleShardBackendResolver extends AbstractShardBackendResolver {
     private volatile ShardState state;
 
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
-    SimpleShardBackendResolver(final ClientIdentifier clientId, final ActorContext actorContext,
+    SimpleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils,
             final String shardName) {
-        super(clientId, actorContext);
-        this.shardName = Preconditions.checkNotNull(shardName);
+        super(clientId, actorUtils);
+        this.shardName = requireNonNull(shardName);
     }
 
     private CompletionStage<ShardBackendInfo> getBackendInfo(final long cookie) {
-        Preconditions.checkArgument(cookie == 0);
+        checkArgument(cookie == 0);
 
         final ShardState existing = state;
         if (existing != null) {
index 7f759d1..55108e0 100644 (file)
@@ -5,15 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.concurrent.CountDownLatch;
@@ -28,7 +28,7 @@ import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigu
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
 import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
@@ -56,7 +56,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
     private static final long READY_WAIT_FACTOR = 3;
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final long waitTillReadyTimeInMillis;
 
     private AutoCloseable closeable;
@@ -74,10 +74,10 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
             final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
             final DatastoreSnapshot restoreFromSnapshot) {
-        Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
-        Preconditions.checkNotNull(cluster, "cluster should not be null");
-        Preconditions.checkNotNull(configuration, "configuration should not be null");
-        Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
+        requireNonNull(actorSystem, "actorSystem should not be null");
+        requireNonNull(cluster, "cluster should not be null");
+        requireNonNull(configuration, "configuration should not be null");
+        requireNonNull(datastoreContextFactory, "datastoreContextFactory should not be null");
 
         String shardManagerId = ShardManagerIdentifier.builder()
                 .type(datastoreContextFactory.getBaseDatastoreContext().getDataStoreName()).build().toString();
@@ -96,12 +96,12 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
                 .restoreFromSnapshot(restoreFromSnapshot)
                 .distributedDataStore(this);
 
-        actorContext = new ActorContext(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
+        actorUtils = new ActorUtils(actorSystem, createShardManager(actorSystem, creator, shardDispatcher,
                 shardManagerId), cluster, configuration, datastoreContextFactory.getBaseDatastoreContext(),
                 primaryShardInfoCache);
 
         final Props clientProps = DistributedDataStoreClientActor.props(cluster.getCurrentMemberName(),
-            datastoreContextFactory.getBaseDatastoreContext().getDataStoreName(), actorContext);
+            datastoreContextFactory.getBaseDatastoreContext().getDataStoreName(), actorUtils);
         final ActorRef clientActor = actorSystem.actorOf(clientProps);
         try {
             client = DistributedDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
@@ -115,7 +115,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         identifier = client.getIdentifier();
         LOG.debug("Distributed data store client {} started", identifier);
 
-        this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
+        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
                 .duration().toMillis() * READY_WAIT_FACTOR;
 
         datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
@@ -124,26 +124,26 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         datastoreConfigMXBean.registerMBean();
 
         datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContextFactory.getBaseDatastoreContext()
-                .getDataStoreMXBeanType(), actorContext);
+                .getDataStoreMXBeanType(), actorUtils);
         datastoreInfoMXBean.registerMBean();
     }
 
     @VisibleForTesting
-    protected AbstractDataStore(final ActorContext actorContext, final ClientIdentifier identifier) {
-        this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
+    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
+        this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
         this.client = null;
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
+        this.identifier = requireNonNull(identifier);
+        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
                 .duration().toMillis() * READY_WAIT_FACTOR;
     }
 
     @VisibleForTesting
-    protected AbstractDataStore(final ActorContext actorContext, final ClientIdentifier identifier,
+    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
                                 final DataStoreClient clientActor) {
-        this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
+        this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
         this.client = clientActor;
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.waitTillReadyTimeInMillis = actorContext.getDatastoreContext().getShardLeaderElectionTimeout()
+        this.identifier = requireNonNull(identifier);
+        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
                 .duration().toMillis() * READY_WAIT_FACTOR;
     }
 
@@ -162,14 +162,14 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     @Override
     public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
             final YangInstanceIdentifier treeId, final L listener) {
-        Preconditions.checkNotNull(treeId, "treeId should not be null");
-        Preconditions.checkNotNull(listener, "listener should not be null");
+        requireNonNull(treeId, "treeId should not be null");
+        requireNonNull(listener, "listener should not be null");
 
-        final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
         LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
 
         final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorContext, listener, treeId);
+                new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId);
         listenerRegistrationProxy.init(shardName);
 
         return listenerRegistrationProxy;
@@ -179,30 +179,29 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     @Override
     public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
             final DOMDataTreeIdentifier subtree, final C cohort) {
-        YangInstanceIdentifier treeId =
-                Preconditions.checkNotNull(subtree, "subtree should not be null").getRootIdentifier();
-        Preconditions.checkNotNull(cohort, "listener should not be null");
+        YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier();
+        requireNonNull(cohort, "listener should not be null");
 
 
-        final String shardName = actorContext.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
         LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
 
         DataTreeCohortRegistrationProxy<C> cohortProxy =
-                new DataTreeCohortRegistrationProxy<>(actorContext, subtree, cohort);
+                new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
         cohortProxy.init(shardName);
         return cohortProxy;
     }
 
     @Override
     public void onGlobalContextUpdated(final SchemaContext schemaContext) {
-        actorContext.setSchemaContext(schemaContext);
+        actorUtils.setSchemaContext(schemaContext);
     }
 
     @Override
     public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
-        LOG.info("DatastoreContext updated for data store {}", actorContext.getDataStoreName());
+        LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName());
 
-        actorContext.setDatastoreContext(contextFactory);
+        actorUtils.setDatastoreContext(contextFactory);
         datastoreConfigMXBean.setContext(contextFactory.getBaseDatastoreContext());
     }
 
@@ -226,7 +225,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
             }
         }
 
-        actorContext.shutdown();
+        actorUtils.shutdown();
 
         if (client != null) {
             client.close();
@@ -234,8 +233,8 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     }
 
     @Override
-    public ActorContext getActorContext() {
-        return actorContext;
+    public ActorUtils getActorUtils() {
+        return actorUtils;
     }
 
     public void waitTillReady() {
@@ -283,16 +282,16 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
             final YangInstanceIdentifier insideShard,
             final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegate) {
 
-        Preconditions.checkNotNull(shardLookup, "shardLookup should not be null");
-        Preconditions.checkNotNull(insideShard, "insideShard should not be null");
-        Preconditions.checkNotNull(delegate, "delegate should not be null");
+        requireNonNull(shardLookup, "shardLookup should not be null");
+        requireNonNull(insideShard, "insideShard should not be null");
+        requireNonNull(delegate, "delegate should not be null");
 
-        final String shardName = actorContext.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
+        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
         LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
                 delegate,shardLookup, shardName, insideShard);
 
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorContext,
+                new DataTreeChangeListenerProxy<>(actorUtils,
                         // wrap this in the ClusteredDOMDataTreeChangeLister interface
                         // since we always want clustered registration
                         (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
@@ -305,12 +304,12 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
             final YangInstanceIdentifier internalPath,
             final DOMDataTreeChangeListener delegate) {
-        Preconditions.checkNotNull(delegate, "delegate should not be null");
+        requireNonNull(delegate, "delegate should not be null");
 
         LOG.debug("Registering a listener for the configuration shard: {}", internalPath);
 
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
-                new DataTreeChangeListenerProxy<>(actorContext, delegate, internalPath);
+                new DataTreeChangeListenerProxy<>(actorUtils, delegate, internalPath);
         proxy.init(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
 
         return (ListenerRegistration<L>) proxy;
index f5a156f..a2bbc11 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
@@ -20,7 +21,7 @@ import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifie
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
@@ -42,20 +43,19 @@ abstract class AbstractTransactionContextFactory<F extends LocalTransactionFacto
 
     private final ConcurrentMap<String, F> knownLocal = new ConcurrentHashMap<>();
     private final LocalHistoryIdentifier historyId;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
 
     // Used via TX_COUNTER_UPDATER
     @SuppressWarnings("unused")
     private volatile long nextTx;
 
-    protected AbstractTransactionContextFactory(final ActorContext actorContext,
-            final LocalHistoryIdentifier historyId) {
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.historyId = Preconditions.checkNotNull(historyId);
+    protected AbstractTransactionContextFactory(final ActorUtils actorUtils, final LocalHistoryIdentifier historyId) {
+        this.actorUtils = requireNonNull(actorUtils);
+        this.historyId = requireNonNull(historyId);
     }
 
-    final ActorContext getActorContext() {
-        return actorContext;
+    final ActorUtils getActorUtils() {
+        return actorUtils;
     }
 
     final LocalHistoryIdentifier getHistoryId() {
@@ -116,7 +116,7 @@ abstract class AbstractTransactionContextFactory<F extends LocalTransactionFacto
     final TransactionContextWrapper newTransactionContextWrapper(final TransactionProxy parent,
             final String shardName) {
         final TransactionContextWrapper transactionContextWrapper =
-                new TransactionContextWrapper(parent.getIdentifier(), actorContext, shardName);
+                new TransactionContextWrapper(parent.getIdentifier(), actorUtils, shardName);
 
         Future<PrimaryShardInfo> findPrimaryFuture = findPrimaryShard(shardName, parent.getIdentifier());
         if (findPrimaryFuture.isCompleted()) {
@@ -136,7 +136,7 @@ abstract class AbstractTransactionContextFactory<F extends LocalTransactionFacto
                         onFindPrimaryShardFailure(failure, parent, shardName, transactionContextWrapper);
                     }
                 }
-            }, actorContext.getClientDispatcher());
+            }, actorUtils.getClientDispatcher());
         }
 
         return transactionContextWrapper;
index a57a799..adf8b1a 100644 (file)
@@ -7,18 +7,19 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.dispatch.OnComplete;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import javax.annotation.concurrent.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
@@ -36,22 +37,22 @@ import scala.concurrent.Future;
 final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
     private final ActorRef dataChangeListenerActor;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final YangInstanceIdentifier registeredPath;
 
     @GuardedBy("this")
     private ActorSelection listenerRegistrationActor;
 
-    DataTreeChangeListenerProxy(final ActorContext actorContext, final T listener,
+    DataTreeChangeListenerProxy(final ActorUtils actorUtils, final T listener,
             final YangInstanceIdentifier registeredPath) {
         super(listener);
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.registeredPath = Preconditions.checkNotNull(registeredPath);
-        this.dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+        this.actorUtils = requireNonNull(actorUtils);
+        this.registeredPath = requireNonNull(registeredPath);
+        this.dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
                 DataTreeChangeListenerActor.props(getInstance(), registeredPath)
-                    .withDispatcher(actorContext.getNotificationDispatcherPath()));
+                    .withDispatcher(actorUtils.getNotificationDispatcherPath()));
 
-        LOG.debug("{}: Created actor {} for DTCL {}", actorContext.getDatastoreContext().getLogicalStoreType(),
+        LOG.debug("{}: Created actor {} for DTCL {}", actorUtils.getDatastoreContext().getLogicalStoreType(),
                 dataChangeListenerActor, listener);
     }
 
@@ -67,7 +68,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
     }
 
     void init(final String shardName) {
-        Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+        Future<ActorRef> findFuture = actorUtils.findLocalShardAsync(shardName);
         findFuture.onComplete(new OnComplete<ActorRef>() {
             @Override
             public void onComplete(final Throwable failure, final ActorRef shard) {
@@ -82,7 +83,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
                     doRegistration(shard);
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
     }
 
     private void setListenerRegistrationActor(final ActorSelection actor) {
@@ -104,10 +105,10 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
 
     private void doRegistration(final ActorRef shard) {
 
-        Future<Object> future = actorContext.executeOperationAsync(shard,
+        Future<Object> future = actorUtils.executeOperationAsync(shard,
                 new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor,
                         getInstance() instanceof ClusteredDOMDataTreeChangeListener),
-                actorContext.getDatastoreContext().getShardInitializationTimeout());
+                actorUtils.getDatastoreContext().getShardInitializationTimeout());
 
         future.onComplete(new OnComplete<Object>() {
             @Override
@@ -117,11 +118,11 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
                             getInstance(), registeredPath, failure);
                 } else {
                     RegisterDataTreeNotificationListenerReply reply = (RegisterDataTreeNotificationListenerReply)result;
-                    setListenerRegistrationActor(actorContext.actorSelection(
+                    setListenerRegistrationActor(actorUtils.actorSelection(
                             reply.getListenerRegistrationPath()));
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
     }
 
     @VisibleForTesting
@@ -135,6 +136,6 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
     }
 
     private String logContext() {
-        return actorContext.getDatastoreContext().getLogicalStoreType().toString();
+        return actorUtils.getDatastoreContext().getLogicalStoreType().toString();
     }
 }
index e14db0f..45b0f76 100644 (file)
@@ -7,15 +7,16 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import java.util.concurrent.TimeUnit;
 import javax.annotation.concurrent.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
@@ -32,22 +33,22 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
     private static final Timeout TIMEOUT = new Timeout(new FiniteDuration(5, TimeUnit.SECONDS));
     private final DOMDataTreeIdentifier subtree;
     private final ActorRef actor;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     @GuardedBy("this")
     private ActorRef cohortRegistry;
 
-    DataTreeCohortRegistrationProxy(final ActorContext actorContext, final DOMDataTreeIdentifier subtree,
+    DataTreeCohortRegistrationProxy(final ActorUtils actorUtils, final DOMDataTreeIdentifier subtree,
             final C cohort) {
         super(cohort);
-        this.subtree = Preconditions.checkNotNull(subtree);
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.actor = actorContext.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
-                subtree.getRootIdentifier()).withDispatcher(actorContext.getNotificationDispatcherPath()));
+        this.subtree = requireNonNull(subtree);
+        this.actorUtils = requireNonNull(actorUtils);
+        this.actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
+                subtree.getRootIdentifier()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
     }
 
     public void init(final String shardName) {
         // FIXME: Add late binding to shard.
-        Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+        Future<ActorRef> findFuture = actorUtils.findLocalShardAsync(shardName);
         findFuture.onComplete(new OnComplete<ActorRef>() {
             @Override
             public void onComplete(final Throwable failure, final ActorRef shard) {
@@ -61,7 +62,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
                     performRegistration(shard);
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
     }
 
     private synchronized void performRegistration(final ActorRef shard) {
@@ -83,7 +84,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
                 }
             }
 
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
     }
 
     @Override
index 5811506..16198ff 100644 (file)
@@ -13,7 +13,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
@@ -30,13 +30,13 @@ public class DistributedDataStore extends AbstractDataStore {
             final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
             final DatastoreSnapshot restoreFromSnapshot) {
         super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
-        this.txContextFactory = new TransactionContextFactory(getActorContext(), getIdentifier());
+        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
     }
 
     @VisibleForTesting
-    DistributedDataStore(final ActorContext actorContext, final ClientIdentifier identifier) {
-        super(actorContext, identifier);
-        this.txContextFactory = new TransactionContextFactory(getActorContext(), getIdentifier());
+    DistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
+        super(actorUtils, identifier);
+        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
     }
 
 
@@ -52,13 +52,13 @@ public class DistributedDataStore extends AbstractDataStore {
 
     @Override
     public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorContext().acquireTxCreationPermit();
+        getActorUtils().acquireTxCreationPermit();
         return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
     }
 
     @Override
     public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorContext().acquireTxCreationPermit();
+        getActorUtils().acquireTxCreationPermit();
         return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
     }
 
index 4cd88ca..8d696c0 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 
 /**
@@ -17,5 +17,5 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStore;
  */
 public interface DistributedDataStoreInterface extends DOMStore {
 
-    ActorContext getActorContext();
+    ActorUtils getActorUtils();
 }
index db879c0..ac279b7 100644 (file)
@@ -7,17 +7,18 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.Futures;
 import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
@@ -37,27 +38,27 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
 
     private final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction;
     private final DataTreeModification modification;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final ActorSelection leader;
     private final Exception operationError;
 
-    protected LocalThreePhaseCommitCohort(final ActorContext actorContext, final ActorSelection leader,
+    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
             final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
             final DataTreeModification modification,
             final Exception operationError) {
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.leader = Preconditions.checkNotNull(leader);
-        this.transaction = Preconditions.checkNotNull(transaction);
-        this.modification = Preconditions.checkNotNull(modification);
+        this.actorUtils = requireNonNull(actorUtils);
+        this.leader = requireNonNull(leader);
+        this.transaction = requireNonNull(transaction);
+        this.modification = requireNonNull(modification);
         this.operationError = operationError;
     }
 
-    protected LocalThreePhaseCommitCohort(final ActorContext actorContext, final ActorSelection leader,
+    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
             final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction, final Exception operationError) {
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.leader = Preconditions.checkNotNull(leader);
-        this.transaction = Preconditions.checkNotNull(transaction);
-        this.operationError = Preconditions.checkNotNull(operationError);
+        this.actorUtils = requireNonNull(actorUtils);
+        this.leader = requireNonNull(leader);
+        this.transaction = requireNonNull(transaction);
+        this.operationError = requireNonNull(operationError);
         this.modification = null;
     }
 
@@ -69,12 +70,12 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
 
         final ReadyLocalTransaction message = new ReadyLocalTransaction(transaction.getIdentifier(),
                 modification, immediate, participatingShardNames);
-        return actorContext.executeOperationAsync(leader, message, actorContext.getTransactionCommitOperationTimeout());
+        return actorUtils.executeOperationAsync(leader, message, actorUtils.getTransactionCommitOperationTimeout());
     }
 
     Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
         final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
-        final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorContext,
+        final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorUtils,
                 transaction.getIdentifier());
         ret.onComplete(new OnComplete<ActorSelection>() {
             @Override
@@ -87,7 +88,7 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
 
                 LOG.debug("Transaction {} resolved to actor {}", transaction.getIdentifier(), success);
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         return ret;
     }
@@ -109,7 +110,7 @@ class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
                     transactionAborted(transaction);
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         return messageFuture;
     }
index 11b4513..6610983 100644 (file)
@@ -104,12 +104,12 @@ final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain
 
         protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
                 DataTreeModification modification, Exception operationError) {
-            super(parent.getActorContext(), leader, transaction, modification, operationError);
+            super(parent.getActorUtils(), leader, transaction, modification, operationError);
         }
 
         protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
                 Exception operationError) {
-            super(parent.getActorContext(), leader, transaction, operationError);
+            super(parent.getActorUtils(), leader, transaction, operationError);
         }
 
         @Override
index 59eb0a3..4834590 100644 (file)
@@ -7,12 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
@@ -33,12 +35,12 @@ final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<Transa
 
     private final ActorSelection leader;
     private final DataTree dataTree;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
 
-    LocalTransactionFactoryImpl(final ActorContext actorContext, final ActorSelection leader, final DataTree dataTree) {
-        this.leader = Preconditions.checkNotNull(leader);
-        this.dataTree = Preconditions.checkNotNull(dataTree);
-        this.actorContext = actorContext;
+    LocalTransactionFactoryImpl(final ActorUtils actorUtils, final ActorSelection leader, final DataTree dataTree) {
+        this.leader = requireNonNull(leader);
+        this.dataTree = requireNonNull(dataTree);
+        this.actorUtils = actorUtils;
     }
 
     DataTree getDataTree() {
@@ -70,16 +72,16 @@ final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<Transa
             final SnapshotBackedWriteTransaction<TransactionIdentifier> tx,
             final DataTreeModification tree,
             final Exception readyError) {
-        return new LocalThreePhaseCommitCohort(actorContext, leader, tx, tree, readyError);
+        return new LocalThreePhaseCommitCohort(actorUtils, leader, tx, tree, readyError);
     }
 
     @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
     @Override
     public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
             @Nullable Exception operationError) {
-        Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
+        checkArgument(tx instanceof SnapshotBackedWriteTransaction);
         if (operationError != null) {
-            return new LocalThreePhaseCommitCohort(actorContext, leader,
+            return new LocalThreePhaseCommitCohort(actorUtils, leader,
                     (SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
         }
 
index b7c1705..6714815 100644 (file)
@@ -8,10 +8,12 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.Futures;
 import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.Optional;
 import java.util.SortedSet;
@@ -21,7 +23,7 @@ import org.opendaylight.controller.cluster.datastore.messages.BatchedModificatio
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,7 +38,7 @@ import scala.concurrent.Future;
 public class RemoteTransactionContext extends AbstractTransactionContext {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContext.class);
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final ActorSelection actor;
     private final OperationLimiter limiter;
 
@@ -53,19 +55,19 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
     private volatile Throwable failedModification;
 
     protected RemoteTransactionContext(final TransactionIdentifier identifier, final ActorSelection actor,
-            final ActorContext actorContext, final short remoteTransactionVersion, final OperationLimiter limiter) {
+            final ActorUtils actorUtils, final short remoteTransactionVersion, final OperationLimiter limiter) {
         super(identifier, remoteTransactionVersion);
-        this.limiter = Preconditions.checkNotNull(limiter);
+        this.limiter = requireNonNull(limiter);
         this.actor = actor;
-        this.actorContext = actorContext;
+        this.actorUtils = actorUtils;
     }
 
     private ActorSelection getActor() {
         return actor;
     }
 
-    protected ActorContext getActorContext() {
-        return actorContext;
+    protected ActorUtils getActorUtils() {
+        return actorUtils;
     }
 
     @Override
@@ -73,7 +75,7 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
         LOG.debug("Tx {} closeTransaction called", getIdentifier());
         TransactionContextCleanup.untrack(this);
 
-        actorContext.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable());
+        actorUtils.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable());
     }
 
     @Override
@@ -110,7 +112,7 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
         // Transform the last reply Future into a Future that returns the cohort actor path from
         // the last reply message. That's the end result of the ready operation.
 
-        return TransactionReadyReplyMapper.transform(readyReplyFuture, actorContext, getIdentifier());
+        return TransactionReadyReplyMapper.transform(readyReplyFuture, actorUtils, getIdentifier());
     }
 
     private BatchedModifications newBatchedModifications() {
@@ -130,7 +132,7 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
         batchedModifications.addModification(modification);
 
         if (batchedModifications.getModifications().size()
-                >= actorContext.getDatastoreContext().getShardBatchedModificationCount()) {
+                >= actorUtils.getDatastoreContext().getShardBatchedModificationCount()) {
             sendBatchedModifications();
         }
     }
@@ -175,8 +177,8 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
                 }
             }
 
-            sent = actorContext.executeOperationAsync(getActor(), toSend.toSerializable(),
-                actorContext.getTransactionCommitOperationTimeout());
+            sent = actorUtils.executeOperationAsync(getActor(), toSend.toSerializable(),
+                actorUtils.getTransactionCommitOperationTimeout());
             sent.onComplete(new OnComplete<Object>() {
                 @Override
                 public void onComplete(final Throwable failure, final Object success) {
@@ -188,7 +190,7 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
                     }
                     limiter.release(permitsToRelease);
                 }
-            }, actorContext.getClientDispatcher());
+            }, actorUtils.getClientDispatcher());
         }
 
         return sent;
@@ -252,9 +254,9 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
             }
         };
 
-        final Future<Object> future = actorContext.executeOperationAsync(getActor(),
-            readCmd.asVersion(getTransactionVersion()).toSerializable(), actorContext.getOperationTimeout());
-        future.onComplete(onComplete, actorContext.getClientDispatcher());
+        final Future<Object> future = actorUtils.executeOperationAsync(getActor(),
+            readCmd.asVersion(getTransactionVersion()).toSerializable(), actorUtils.getOperationTimeout());
+        future.onComplete(onComplete, actorUtils.getClientDispatcher());
     }
 
     /**
@@ -264,7 +266,7 @@ public class RemoteTransactionContext extends AbstractTransactionContext {
      * @return True if a permit was successfully acquired, false otherwise
      */
     private boolean acquireOperation() {
-        Preconditions.checkState(isOperationHandOffComplete(),
+        checkState(isOperationHandOffComplete(),
             "Attempted to acquire execute operation permit for transaction %s on actor %s during handoff",
             getIdentifier(), actor);
 
index c1162c2..a93d46f 100644 (file)
@@ -20,7 +20,7 @@ import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRe
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -67,13 +67,13 @@ final class RemoteTransactionContextSupport {
 
         // For the total create tx timeout, use 2 times the election timeout. This should be enough time for
         // a leader re-election to occur if we happen to hit it in transition.
-        totalCreateTxTimeout = parent.getActorContext().getDatastoreContext().getShardRaftConfig()
+        totalCreateTxTimeout = parent.getActorUtils().getDatastoreContext().getShardRaftConfig()
                 .getElectionTimeOutInterval().toMillis() * 2;
 
         // We'll use the operationTimeout for the the create Tx message timeout so it can be set appropriately
         // for unit tests but cap it at MAX_CREATE_TX_MSG_TIMEOUT_IN_MS. The operationTimeout could be set
         // larger than the totalCreateTxTimeout in production which we don't want.
-        long operationTimeout = parent.getActorContext().getOperationTimeout().duration().toMillis();
+        long operationTimeout = parent.getActorUtils().getOperationTimeout().duration().toMillis();
         createTxMessageTimeout = new Timeout(Math.min(operationTimeout, MAX_CREATE_TX_MSG_TIMEOUT_IN_MS),
                 TimeUnit.MILLISECONDS);
     }
@@ -86,8 +86,8 @@ final class RemoteTransactionContextSupport {
         return parent.getType();
     }
 
-    private ActorContext getActorContext() {
-        return parent.getActorContext();
+    private ActorUtils getActorUtils() {
+        return parent.getActorUtils();
     }
 
     private TransactionIdentifier getIdentifier() {
@@ -101,7 +101,7 @@ final class RemoteTransactionContextSupport {
         this.primaryShardInfo = newPrimaryShardInfo;
 
         if (getTransactionType() == TransactionType.WRITE_ONLY
-                && getActorContext().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
+                && getActorUtils().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
             ActorSelection primaryShard = newPrimaryShardInfo.getPrimaryShardActor();
 
             LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
@@ -126,7 +126,7 @@ final class RemoteTransactionContextSupport {
         Object serializedCreateMessage = new CreateTransaction(getIdentifier(), getTransactionType().ordinal(),
                     primaryShardInfo.getPrimaryShardVersion()).toSerializable();
 
-        Future<Object> createTxFuture = getActorContext().executeOperationAsync(
+        Future<Object> createTxFuture = getActorUtils().executeOperationAsync(
                 primaryShardInfo.getPrimaryShardActor(), serializedCreateMessage, createTxMessageTimeout);
 
         createTxFuture.onComplete(new OnComplete<Object>() {
@@ -134,20 +134,20 @@ final class RemoteTransactionContextSupport {
             public void onComplete(final Throwable failure, final Object response) {
                 onCreateTransactionComplete(failure, response);
             }
-        }, getActorContext().getClientDispatcher());
+        }, getActorUtils().getClientDispatcher());
     }
 
     private void tryFindPrimaryShard() {
         LOG.debug("Tx {} Retrying findPrimaryShardAsync for shard {}", getIdentifier(), shardName);
 
         this.primaryShardInfo = null;
-        Future<PrimaryShardInfo> findPrimaryFuture = getActorContext().findPrimaryShardAsync(shardName);
+        Future<PrimaryShardInfo> findPrimaryFuture = getActorUtils().findPrimaryShardAsync(shardName);
         findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
             @Override
             public void onComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
                 onFindPrimaryShardComplete(failure, newPrimaryShardInfo);
             }
-        }, getActorContext().getClientDispatcher());
+        }, getActorUtils().getClientDispatcher());
     }
 
     private void onFindPrimaryShardComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
@@ -185,9 +185,9 @@ final class RemoteTransactionContextSupport {
             LOG.debug("Tx {}: create tx on shard {} failed with exception \"{}\" - scheduling retry in {} ms",
                     getIdentifier(), shardName, failure, scheduleInterval);
 
-            getActorContext().getActorSystem().scheduler().scheduleOnce(
+            getActorUtils().getActorSystem().scheduler().scheduleOnce(
                     FiniteDuration.create(scheduleInterval, TimeUnit.MILLISECONDS),
-                    this::tryFindPrimaryShard, getActorContext().getClientDispatcher());
+                    this::tryFindPrimaryShard, getActorUtils().getClientDispatcher());
             return;
         }
 
@@ -234,14 +234,14 @@ final class RemoteTransactionContextSupport {
     private TransactionContext createValidTransactionContext(final CreateTransactionReply reply) {
         LOG.debug("Tx {} Received {}", getIdentifier(), reply);
 
-        return createValidTransactionContext(getActorContext().actorSelection(reply.getTransactionPath()),
+        return createValidTransactionContext(getActorUtils().actorSelection(reply.getTransactionPath()),
                 reply.getTransactionPath(), primaryShardInfo.getPrimaryShardVersion());
     }
 
     private TransactionContext createValidTransactionContext(final ActorSelection transactionActor,
             final String transactionPath, final short remoteTransactionVersion) {
         final TransactionContext ret = new RemoteTransactionContext(transactionContextWrapper.getIdentifier(),
-                transactionActor, getActorContext(), remoteTransactionVersion, transactionContextWrapper.getLimiter());
+                transactionActor, getActorUtils(), remoteTransactionVersion, transactionContextWrapper.getLimiter());
 
         if (parent.getType() == TransactionType.READ_ONLY) {
             TransactionContextCleanup.track(parent, ret);
index a600be9..5e8a954 100644 (file)
@@ -7,14 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.Arrays;
 import java.util.List;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,17 +31,17 @@ import scala.concurrent.Future;
 class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
     private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final Future<Object> cohortFuture;
     private final TransactionIdentifier transactionId;
     private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
     private final OperationCallback.Reference operationCallbackRef;
 
-    SingleCommitCohortProxy(ActorContext actorContext, Future<Object> cohortFuture, TransactionIdentifier transactionId,
+    SingleCommitCohortProxy(ActorUtils actorUtils, Future<Object> cohortFuture, TransactionIdentifier transactionId,
             OperationCallback.Reference operationCallbackRef) {
-        this.actorContext = actorContext;
+        this.actorUtils = actorUtils;
         this.cohortFuture = cohortFuture;
-        this.transactionId = Preconditions.checkNotNull(transactionId);
+        this.transactionId = requireNonNull(transactionId);
         this.operationCallbackRef = operationCallbackRef;
     }
 
@@ -70,7 +71,7 @@ class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
                 // immediate success, to complete the 3PC for the front-end.
                 returnFuture.set(Boolean.TRUE);
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         return returnFuture;
     }
index 4d80d7f..9505894 100644 (file)
@@ -5,12 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.FutureCallback;
@@ -29,7 +30,7 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -65,17 +66,17 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
     };
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final List<CohortInfo> cohorts;
     private final SettableFuture<Void> cohortsResolvedFuture = SettableFuture.create();
     private final TransactionIdentifier transactionId;
     private volatile OperationCallback commitOperationCallback;
 
-    public ThreePhaseCommitCohortProxy(final ActorContext actorContext, final List<CohortInfo> cohorts,
+    public ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
             final TransactionIdentifier transactionId) {
-        this.actorContext = actorContext;
+        this.actorUtils = actorUtils;
         this.cohorts = cohorts;
-        this.transactionId = Preconditions.checkNotNull(transactionId);
+        this.transactionId = requireNonNull(transactionId);
 
         if (cohorts.isEmpty()) {
             cohortsResolvedFuture.set(null);
@@ -109,7 +110,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                         }
                     }
                 }
-            }, actorContext.getClientDispatcher());
+            }, actorUtils.getClientDispatcher());
         }
 
         return cohortsResolvedFuture;
@@ -152,7 +153,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
             return;
         }
 
-        commitOperationCallback = new TransactionRateLimitingCallback(actorContext);
+        commitOperationCallback = new TransactionRateLimitingCallback(actorUtils);
         commitOperationCallback.run();
 
         final Iterator<CohortInfo> iterator = cohorts.iterator();
@@ -206,9 +207,9 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
 
         LOG.debug("Tx {}: sending {} to {}", transactionId, message, toCohortInfo.getResolvedActor());
 
-        Future<Object> future = actorContext.executeOperationAsync(toCohortInfo.getResolvedActor(),
-                message.toSerializable(), actorContext.getTransactionCommitOperationTimeout());
-        future.onComplete(onComplete, actorContext.getClientDispatcher());
+        Future<Object> future = actorUtils.executeOperationAsync(toCohortInfo.getResolvedActor(),
+                message.toSerializable(), actorUtils.getTransactionCommitOperationTimeout());
+        future.onComplete(onComplete, actorUtils.getClientDispatcher());
     }
 
     private Future<Iterable<Object>> invokeCohorts(final MessageSupplier messageSupplier) {
@@ -218,11 +219,11 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
 
             LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message , cohort.getResolvedActor());
 
-            futureList.add(actorContext.executeOperationAsync(cohort.getResolvedActor(), message,
-                    actorContext.getTransactionCommitOperationTimeout()));
+            futureList.add(actorUtils.executeOperationAsync(cohort.getResolvedActor(), message,
+                    actorUtils.getTransactionCommitOperationTimeout()));
         }
 
-        return akka.dispatch.Futures.sequence(futureList, actorContext.getClientDispatcher());
+        return akka.dispatch.Futures.sequence(futureList, actorUtils.getClientDispatcher());
     }
 
     @Override
@@ -350,7 +351,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                     callback.success();
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
     }
 
     @Override
@@ -386,8 +387,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
 
         short getActorVersion() {
-            Preconditions.checkState(resolvedActor != null,
-                    "getActorVersion cannot be called until the actor is resolved");
+            checkState(resolvedActor != null, "getActorVersion cannot be called until the actor is resolved");
             return actorVersionSupplier.get();
         }
     }
index dc21545..ad6cc3e 100644 (file)
@@ -148,7 +148,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
             new ConcurrentHashMap<>();
 
     TransactionChainProxy(final TransactionContextFactory parent, final LocalHistoryIdentifier historyId) {
-        super(parent.getActorContext(), historyId);
+        super(parent.getActorUtils(), historyId);
         this.parent = parent;
     }
 
@@ -162,13 +162,13 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
 
     @Override
     public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorContext().acquireTxCreationPermit();
+        getActorUtils().acquireTxCreationPermit();
         return allocateWriteTransaction(TransactionType.READ_WRITE);
     }
 
     @Override
     public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorContext().acquireTxCreationPermit();
+        getActorUtils().acquireTxCreationPermit();
         return allocateWriteTransaction(TransactionType.WRITE_ONLY);
     }
 
@@ -178,7 +178,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
 
         // Send a close transaction chain request to each and every shard
 
-        getActorContext().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(),
+        getActorUtils().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(),
                 CloseTransactionChain.class);
     }
 
@@ -249,7 +249,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
             }
         };
 
-        previous.onComplete(onComplete, getActorContext().getClientDispatcher());
+        previous.onComplete(onComplete, getActorUtils().getClientDispatcher());
         return returnPromise.future();
     }
 
@@ -269,7 +269,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
             }
 
             Future<Iterable<Object>> combinedFutures = Futures.sequence(priorReadOnlyTxFutures,
-                    getActorContext().getClientDispatcher());
+                    getActorUtils().getClientDispatcher());
 
             final Promise<T> returnPromise = Futures.promise();
             final OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@@ -282,7 +282,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
                 }
             };
 
-            combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
+            combinedFutures.onComplete(onComplete, getActorUtils().getClientDispatcher());
             return returnPromise.future();
         } else {
             return future;
@@ -306,7 +306,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
         }
 
         // Combine the ready Futures into 1
-        final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorContext().getClientDispatcher());
+        final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorUtils().getClientDispatcher());
 
         // Record the we have outstanding futures
         final State newState = new Submitted(transaction, combined);
@@ -319,7 +319,7 @@ final class TransactionChainProxy extends AbstractTransactionContextFactory<Loca
             public void onComplete(final Throwable arg0, final Iterable<T> arg1) {
                 STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE);
             }
-        }, getActorContext().getClientDispatcher());
+        }, getActorUtils().getClientDispatcher());
     }
 
     @Override
index 56e8e6e..8655c68 100644 (file)
@@ -14,7 +14,7 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
 import scala.concurrent.Future;
@@ -26,8 +26,8 @@ import scala.concurrent.Future;
 final class TransactionContextFactory extends AbstractTransactionContextFactory<LocalTransactionFactoryImpl> {
     private final AtomicLong nextHistory = new AtomicLong(1);
 
-    TransactionContextFactory(final ActorContext actorContext, final ClientIdentifier clientId) {
-        super(actorContext, new LocalHistoryIdentifier(clientId, 0));
+    TransactionContextFactory(final ActorUtils actorUtils, final ClientIdentifier clientId) {
+        super(actorUtils, new LocalHistoryIdentifier(clientId, 0));
     }
 
     @Override
@@ -37,12 +37,12 @@ final class TransactionContextFactory extends AbstractTransactionContextFactory<
     @Override
     protected LocalTransactionFactoryImpl factoryForShard(final String shardName, final ActorSelection shardLeader,
             final DataTree dataTree) {
-        return new LocalTransactionFactoryImpl(getActorContext(), shardLeader, dataTree);
+        return new LocalTransactionFactoryImpl(getActorUtils(), shardLeader, dataTree);
     }
 
     @Override
     protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, TransactionIdentifier txId) {
-        return getActorContext().findPrimaryShardAsync(shardName);
+        return getActorUtils().findPrimaryShardAsync(shardName);
     }
 
     @Override
index 38f55f3..60628b0 100644 (file)
@@ -7,9 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.Futures;
-import com.google.common.base.Preconditions;
 import java.util.AbstractMap.SimpleImmutableEntry;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -20,7 +22,7 @@ import java.util.SortedSet;
 import java.util.concurrent.TimeUnit;
 import javax.annotation.concurrent.GuardedBy;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -54,14 +56,14 @@ class TransactionContextWrapper {
     @GuardedBy("queuedTxOperations")
     private boolean pendingEnqueue;
 
-    TransactionContextWrapper(final TransactionIdentifier identifier, final ActorContext actorContext,
+    TransactionContextWrapper(final TransactionIdentifier identifier, final ActorUtils actorUtils,
             final String shardName) {
-        this.identifier = Preconditions.checkNotNull(identifier);
+        this.identifier = requireNonNull(identifier);
         this.limiter = new OperationLimiter(identifier,
                 // 1 extra permit for the ready operation
-                actorContext.getDatastoreContext().getShardBatchedModificationCount() + 1,
-                TimeUnit.MILLISECONDS.toSeconds(actorContext.getDatastoreContext().getOperationTimeoutInMillis()));
-        this.shardName = Preconditions.checkNotNull(shardName);
+                actorUtils.getDatastoreContext().getShardBatchedModificationCount() + 1,
+                TimeUnit.MILLISECONDS.toSeconds(actorUtils.getDatastoreContext().getOperationTimeoutInMillis()));
+        this.shardName = requireNonNull(shardName);
     }
 
     TransactionContext getTransactionContext() {
@@ -95,8 +97,7 @@ class TransactionContextWrapper {
         synchronized (queuedTxOperations) {
             contextOnEntry = transactionContext;
             if (contextOnEntry == null) {
-                Preconditions.checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected",
-                        identifier);
+                checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected", identifier);
                 pendingEnqueue = true;
             }
         }
index 78803f2..83863de 100644 (file)
@@ -36,7 +36,7 @@ import org.opendaylight.controller.cluster.datastore.modification.AbstractModifi
 import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
 import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
@@ -68,7 +68,7 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
 
     @VisibleForTesting
     public TransactionProxy(final AbstractTransactionContextFactory<?> txContextFactory, final TransactionType type) {
-        super(txContextFactory.nextIdentifier(), txContextFactory.getActorContext().getDatastoreContext()
+        super(txContextFactory.nextIdentifier(), txContextFactory.getActorUtils().getDatastoreContext()
                 .isTransactionDebugContextEnabled());
         this.txContextFactory = txContextFactory;
         this.type = Preconditions.checkNotNull(type);
@@ -115,7 +115,7 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
     }
 
     private FluentFuture<Optional<NormalizedNode<?, ?>>> readAllData() {
-        final Set<String> allShardNames = txContextFactory.getActorContext().getConfiguration().getAllShardNames();
+        final Set<String> allShardNames = txContextFactory.getActorUtils().getConfiguration().getAllShardNames();
         final Collection<FluentFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
 
         for (String shardName : allShardNames) {
@@ -129,8 +129,8 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
             (Function<List<Optional<NormalizedNode<?, ?>>>, Optional<NormalizedNode<?, ?>>>) input -> {
                 try {
                     return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.EMPTY, input,
-                            txContextFactory.getActorContext().getSchemaContext(),
-                            txContextFactory.getActorContext().getDatastoreContext().getLogicalStoreType());
+                            txContextFactory.getActorUtils().getSchemaContext(),
+                            txContextFactory.getActorUtils().getDatastoreContext().getLogicalStoreType());
                 } catch (DataValidationFailedException e) {
                     throw new IllegalArgumentException("Failed to aggregate", e);
                 }
@@ -262,14 +262,14 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
             future = getDirectCommitFuture(transactionContext, operationCallbackRef, null);
         }
 
-        return new SingleCommitCohortProxy(txContextFactory.getActorContext(), future, getIdentifier(),
+        return new SingleCommitCohortProxy(txContextFactory.getActorUtils(), future, getIdentifier(),
             operationCallbackRef);
     }
 
     private Future<?> getDirectCommitFuture(final TransactionContext transactionContext,
             final OperationCallback.Reference operationCallbackRef, final Boolean havePermit) {
         TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(
-                txContextFactory.getActorContext());
+                txContextFactory.getActorUtils());
         operationCallbackRef.set(rateLimitingCallback);
         rateLimitingCallback.run();
         return transactionContext.directCommit(havePermit);
@@ -293,11 +293,11 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
                     txVersionSupplier));
         }
 
-        return new ThreePhaseCommitCohortProxy(txContextFactory.getActorContext(), cohorts, getIdentifier());
+        return new ThreePhaseCommitCohortProxy(txContextFactory.getActorUtils(), cohorts, getIdentifier());
     }
 
     private String shardNameFromIdentifier(final YangInstanceIdentifier path) {
-        return txContextFactory.getActorContext().getShardStrategyFactory().getStrategy(path).findShard(path);
+        return txContextFactory.getActorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
     }
 
     private TransactionContextWrapper getContextWrapper(final YangInstanceIdentifier path) {
@@ -323,7 +323,7 @@ public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIde
         return state != TransactionState.OPEN;
     }
 
-    ActorContext getActorContext() {
-        return txContextFactory.getActorContext();
+    ActorUtils getActorUtils() {
+        return txContextFactory.getActorUtils();
     }
 }
index f35e30a..d1c10b8 100644 (file)
@@ -13,7 +13,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Ticker;
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 /**
  * TransactionRateLimitingCallback computes the new transaction rate limit on the successful completion of a
@@ -33,8 +33,8 @@ public class TransactionRateLimitingCallback implements OperationCallback {
     private long elapsedTime;
     private volatile State state = State.STOPPED;
 
-    TransactionRateLimitingCallback(ActorContext actorContext) {
-        commitTimer = actorContext.getOperationTimer(ActorContext.COMMIT);
+    TransactionRateLimitingCallback(ActorUtils actorUtils) {
+        commitTimer = actorUtils.getOperationTimer(ActorUtils.COMMIT);
     }
 
     @Override
index d177498..f5eb0e4 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
 import akka.dispatch.Mapper;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -35,15 +36,15 @@ public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection>
     };
     private static final Logger LOG = LoggerFactory.getLogger(TransactionReadyReplyMapper.class);
     private final TransactionIdentifier identifier;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
 
-    protected TransactionReadyReplyMapper(final ActorContext actorContext, final TransactionIdentifier identifier) {
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.identifier = Preconditions.checkNotNull(identifier);
+    protected TransactionReadyReplyMapper(final ActorUtils actorUtils, final TransactionIdentifier identifier) {
+        this.actorUtils = requireNonNull(actorUtils);
+        this.identifier = requireNonNull(identifier);
     }
 
-    protected final ActorContext getActorContext() {
-        return actorContext;
+    protected final ActorUtils getActorUtils() {
+        return actorUtils;
     }
 
     protected String extractCohortPathFrom(final ReadyTransactionReply readyTxReply) {
@@ -58,7 +59,7 @@ public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection>
         // actor path from the reply.
         if (ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
             ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
-            return actorContext.actorSelection(extractCohortPathFrom(readyTxReply));
+            return actorUtils.actorSelection(extractCohortPathFrom(readyTxReply));
         }
 
         // Throwing an exception here will fail the Future.
@@ -66,9 +67,9 @@ public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection>
                 identifier, serializedReadyReply.getClass()));
     }
 
-    static Future<ActorSelection> transform(final Future<Object> readyReplyFuture, final ActorContext actorContext,
+    static Future<ActorSelection> transform(final Future<Object> readyReplyFuture, final ActorUtils actorUtils,
             final TransactionIdentifier identifier) {
-        return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorContext, identifier),
-            SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
+        return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorUtils, identifier),
+            SAME_FAILURE_TRANSFORMER, actorUtils.getClientDispatcher());
     }
 }
index 10988a0..ab4d9b2 100644 (file)
@@ -35,7 +35,7 @@ import org.opendaylight.controller.cluster.datastore.entityownership.selectionst
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
 import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
@@ -69,16 +69,16 @@ public class DistributedEntityOwnershipService implements DOMEntityOwnershipServ
     private static final Timeout MESSAGE_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
 
     private final ConcurrentMap<DOMEntity, DOMEntity> registeredEntities = new ConcurrentHashMap<>();
-    private final ActorContext context;
+    private final ActorUtils context;
 
     private volatile ActorRef localEntityOwnershipShard;
     private volatile DataTree localEntityOwnershipShardDataTree;
 
-    DistributedEntityOwnershipService(final ActorContext context) {
+    DistributedEntityOwnershipService(final ActorUtils context) {
         this.context = Preconditions.checkNotNull(context);
     }
 
-    public static DistributedEntityOwnershipService start(final ActorContext context,
+    public static DistributedEntityOwnershipService start(final ActorUtils context,
             final EntityOwnerSelectionStrategyConfig strategyConfig) {
         ActorRef shardManagerActor = context.getShardManager();
 
@@ -247,7 +247,7 @@ public class DistributedEntityOwnershipService implements DOMEntityOwnershipServ
     public void close() {
     }
 
-    private static EntityOwnershipShard.Builder newShardBuilder(final ActorContext context,
+    private static EntityOwnershipShard.Builder newShardBuilder(final ActorUtils context,
             final EntityOwnerSelectionStrategyConfig strategyConfig) {
         return EntityOwnershipShard.newBuilder().localMemberName(context.getCurrentMemberName())
                 .ownerSelectionStrategyConfig(strategyConfig);
index 7976344..a2bf871 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
 
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 
 /**
@@ -17,16 +17,16 @@ import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
  */
 public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
 
-    public DatastoreInfoMXBeanImpl(String mxBeanType, ActorContext actorContext) {
+    public DatastoreInfoMXBeanImpl(String mxBeanType, ActorUtils actorUtils) {
         super("GeneralRuntimeInfo", mxBeanType, null);
-        this.actorContext = actorContext;
+        this.actorUtils = actorUtils;
     }
 
 
     @Override
     public double getTransactionCreationRateLimit() {
-        return actorContext.getTxCreationLimit();
+        return actorUtils.getTxCreationLimit();
     }
 }
@@ -61,13 +61,12 @@ import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * The ActorContext class contains utility methods which could be used by
- * non-actors (like DistributedDataStore) to work with actors a little more
- * easily. An ActorContext can be freely passed around to local object instances
- * but should not be passed to actors especially remote actors
+ * The ActorUtils class contains utility methods which could be used by non-actors (like DistributedDataStore) to work
+ * with actors a little more easily. An ActorContext can be freely passed around to local object instances but should
+ * not be passed to actors especially remote actors.
  */
-public class ActorContext {
-    private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
+public class ActorUtils {
+    private static final Logger LOG = LoggerFactory.getLogger(ActorUtils.class);
     private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
     private static final String METRIC_RATE = "rate";
     private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
@@ -113,13 +112,13 @@ public class ActorContext {
     private final PrimaryShardInfoFutureCache primaryShardInfoCache;
     private final ShardStrategyFactory shardStrategyFactory;
 
-    public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+    public ActorUtils(final ActorSystem actorSystem, final ActorRef shardManager,
             final ClusterWrapper clusterWrapper, final Configuration configuration) {
         this(actorSystem, shardManager, clusterWrapper, configuration,
                 DatastoreContext.newBuilder().build(), new PrimaryShardInfoFutureCache());
     }
 
-    public ActorContext(final ActorSystem actorSystem, final ActorRef shardManager,
+    public ActorUtils(final ActorSystem actorSystem, final ActorRef shardManager,
             final ClusterWrapper clusterWrapper, final Configuration configuration,
             final DatastoreContext datastoreContext, final PrimaryShardInfoFutureCache primaryShardInfoCache) {
         this.actorSystem = actorSystem;
index 32faf3a..f242a65 100644 (file)
@@ -21,7 +21,7 @@ import org.slf4j.LoggerFactory;
 public class TransactionRateLimiter {
     private static final Logger LOG = LoggerFactory.getLogger(TransactionRateLimiter.class);
 
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final long commitTimeoutInSeconds;
     private final String dataStoreName;
     private final RateLimiter txRateLimiter;
@@ -29,11 +29,11 @@ public class TransactionRateLimiter {
 
     private volatile long pollOnCount = 1;
 
-    public TransactionRateLimiter(ActorContext actorContext) {
-        this.actorContext = actorContext;
-        this.commitTimeoutInSeconds = actorContext.getDatastoreContext().getShardTransactionCommitTimeoutInSeconds();
-        this.dataStoreName = actorContext.getDataStoreName();
-        this.txRateLimiter = RateLimiter.create(actorContext.getDatastoreContext()
+    public TransactionRateLimiter(ActorUtils actorUtils) {
+        this.actorUtils = actorUtils;
+        this.commitTimeoutInSeconds = actorUtils.getDatastoreContext().getShardTransactionCommitTimeoutInSeconds();
+        this.dataStoreName = actorUtils.getDataStoreName();
+        this.txRateLimiter = RateLimiter.create(actorUtils.getDatastoreContext()
                 .getTransactionCreationInitialRateLimit());
     }
 
@@ -45,7 +45,7 @@ public class TransactionRateLimiter {
     private void adjustRateLimit() {
         final long count = acquireCount.incrementAndGet();
         if (count >= pollOnCount) {
-            final Timer commitTimer = actorContext.getOperationTimer(ActorContext.COMMIT);
+            final Timer commitTimer = actorUtils.getOperationTimer(ActorUtils.COMMIT);
             double newRateLimit = calculateNewRateLimit(commitTimer, commitTimeoutInSeconds);
 
             if (newRateLimit < 1.0) {
@@ -71,7 +71,7 @@ public class TransactionRateLimiter {
                 continue;
             }
 
-            double newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(name, ActorContext.COMMIT),
+            double newRateLimit = calculateNewRateLimit(actorUtils.getOperationTimer(name, ActorUtils.COMMIT),
                     this.commitTimeoutInSeconds);
             if (newRateLimit > 0.0) {
                 LOG.debug("On unused Tx - data Store {} commit rateLimit adjusted to {}",
index 4580bb9..dc0fabf 100644 (file)
@@ -24,7 +24,7 @@ import java.util.concurrent.CompletionStage;
 import java.util.concurrent.ConcurrentHashMap;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
 import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
@@ -56,7 +56,7 @@ final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener
 
     private final Collection<LeaderLocationListener> listeners = ConcurrentHashMap.newKeySet();
     private final DOMDataTreeIdentifier prefix;
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final Timeout makeLeaderLocalTimeout;
 
     private ActorRef roleChangeListenerActor;
@@ -64,20 +64,20 @@ final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener
     private volatile LeaderLocation currentLeader = LeaderLocation.UNKNOWN;
     private volatile boolean closed = false;
 
-    CDSShardAccessImpl(final DOMDataTreeIdentifier prefix, final ActorContext actorContext) {
+    CDSShardAccessImpl(final DOMDataTreeIdentifier prefix, final ActorUtils actorUtils) {
         this.prefix = requireNonNull(prefix);
-        this.actorContext = requireNonNull(actorContext);
+        this.actorUtils = requireNonNull(actorUtils);
         this.makeLeaderLocalTimeout =
-                new Timeout(actorContext.getDatastoreContext().getShardLeaderElectionTimeout().duration().$times(2));
+                new Timeout(actorUtils.getDatastoreContext().getShardLeaderElectionTimeout().duration().$times(2));
 
         // register RoleChangeListenerActor
         // TODO Maybe we should do this in async
         final Optional<ActorRef> localShardReply =
-                actorContext.findLocalShard(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
+                actorUtils.findLocalShard(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
         checkState(localShardReply.isPresent(),
                 "Local shard for {} not present. Cannot register RoleChangeListenerActor", prefix);
         roleChangeListenerActor =
-                actorContext.getActorSystem().actorOf(RoleChangeListenerActor.props(localShardReply.get(), this));
+                actorUtils.getActorSystem().actorOf(RoleChangeListenerActor.props(localShardReply.get(), this));
     }
 
     private void checkNotClosed() {
@@ -106,7 +106,7 @@ final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener
 
         // TODO can we cache local shard actorRef?
         final Future<ActorRef> localShardReply =
-                actorContext.findLocalShardAsync(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
+                actorUtils.findLocalShardAsync(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
 
         // we have to tell local shard to make leader local
         final scala.concurrent.Promise<Object> makeLeaderLocalAsk = Futures.promise();
@@ -124,11 +124,11 @@ final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener
                     makeLeaderLocalAsk.failure(failure);
                 } else {
                     makeLeaderLocalAsk
-                            .completeWith(actorContext
+                            .completeWith(actorUtils
                                     .executeOperationAsync(actorRef, MakeLeaderLocal.INSTANCE, makeLeaderLocalTimeout));
                 }
             }
-        }, actorContext.getClientDispatcher());
+        }, actorUtils.getClientDispatcher());
 
         // we have to transform make leader local request result
         Future<Void> makeLeaderLocalFuture = makeLeaderLocalAsk.future()
@@ -147,7 +147,7 @@ final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener
                         // wrap exception in LeadershipTransferFailedEx
                         return new LeadershipTransferFailedException("Leadership transfer failed", parameter);
                     }
-                }, actorContext.getClientDispatcher());
+                }, actorUtils.getClientDispatcher());
 
         return FutureConverters.toJava(makeLeaderLocalFuture);
     }
index b4b4444..5d1e4b5 100644 (file)
@@ -90,7 +90,7 @@ public class DistributedShardChangePublisher
                 .build());
 
         // XXX: can we guarantee that the root is present in the schemacontext?
-        this.dataTree.setSchemaContext(distributedDataStore.getActorContext().getSchemaContext());
+        this.dataTree.setSchemaContext(distributedDataStore.getActorUtils().getSchemaContext());
         this.shardPath = prefix.getRootIdentifier();
         this.childShards = childShards;
     }
index 64c3f14..df21b90 100644 (file)
@@ -9,6 +9,9 @@
 package org.opendaylight.controller.cluster.sharding;
 
 import static akka.actor.ActorRef.noSender;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
@@ -57,7 +60,7 @@ import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
 import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
@@ -145,16 +148,16 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
                 new ShardedDataTreeActorCreator()
                         .setShardingService(this)
                         .setActorSystem(actorSystem)
-                        .setClusterWrapper(distributedConfigDatastore.getActorContext().getClusterWrapper())
+                        .setClusterWrapper(distributedConfigDatastore.getActorUtils().getClusterWrapper())
                         .setDistributedConfigDatastore(distributedConfigDatastore)
                         .setDistributedOperDatastore(distributedOperDatastore)
                         .setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
                 ACTOR_ID);
 
-        this.memberName = distributedConfigDatastore.getActorContext().getCurrentMemberName();
+        this.memberName = distributedConfigDatastore.getActorUtils().getCurrentMemberName();
 
         updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor,
-                distributedConfigDatastore.getActorContext().getCurrentMemberName());
+                distributedConfigDatastore.getActorUtils().getCurrentMemberName());
 
         LOG.debug("{} - Starting prefix configuration shards", memberName);
         createPrefixConfigShard(distributedConfigDatastore);
@@ -162,15 +165,15 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
     }
 
     private static void createPrefixConfigShard(final AbstractDataStore dataStore) {
-        Configuration configuration = dataStore.getActorContext().getConfiguration();
+        Configuration configuration = dataStore.getActorUtils().getConfiguration();
         Collection<MemberName> memberNames = configuration.getUniqueMemberNamesForAllShards();
         CreateShard createShardMessage =
                 new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
                         "prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
                         memberNames),
-                        Shard.builder(), dataStore.getActorContext().getDatastoreContext());
+                        Shard.builder(), dataStore.getActorUtils().getDatastoreContext());
 
-        dataStore.getActorContext().getShardManager().tell(createShardMessage, noSender());
+        dataStore.getActorUtils().getShardManager().tell(createShardMessage, noSender());
     }
 
     /**
@@ -208,7 +211,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
             LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
             configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
                     createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
-                            distributedConfigDatastore.getActorContext()));
+                            distributedConfigDatastore.getActorUtils()));
         } catch (final DOMDataTreeShardCreationFailedException e) {
             throw new IllegalStateException(
                     "Unable to create datastoreClient for config DS prefix configuration shard.", e);
@@ -217,7 +220,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         try {
             configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
                     createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
-                            distributedOperDatastore.getActorContext()));
+                            distributedOperDatastore.getActorUtils()));
 
         } catch (final DOMDataTreeShardCreationFailedException e) {
             throw new IllegalStateException(
@@ -233,8 +236,8 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
         updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
 
-        distributedConfigDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
-        distributedOperDatastore.getActorContext().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
+        distributedConfigDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
+        distributedOperDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
 
 
         //create shard registration for DEFAULT_SHARD
@@ -290,12 +293,12 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         LOG.debug("{} - Creating producer for {}", memberName, subtrees);
         final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
 
-        final Object response = distributedConfigDatastore.getActorContext()
+        final Object response = distributedConfigDatastore.getActorUtils()
                 .executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
         if (response == null) {
             LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees);
             return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
-                    distributedConfigDatastore.getActorContext(), shards);
+                    distributedConfigDatastore.getActorUtils(), shards);
         }
 
         closeProducer(producer);
@@ -386,7 +389,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
 
         try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
             final Entry<DataStoreClient, ActorRef> entry =
-                    createDatastoreClient(shardName, distributedDataStore.getActorContext());
+                    createDatastoreClient(shardName, distributedDataStore.getActorUtils());
 
             final DistributedShardFrontend shard =
                     new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
@@ -400,7 +403,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
 
         } catch (final DOMDataTreeShardingConflictException e) {
             LOG.error("{}: Prefix {} is already occupied by another shard",
-                    distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e);
+                    distributedConfigDatastore.getActorUtils().getClusterWrapper().getCurrentMemberName(), prefix, e);
         } catch (DOMDataTreeProducerException e) {
             LOG.error("Unable to close producer", e);
         } catch (DOMDataTreeShardCreationFailedException e) {
@@ -472,13 +475,12 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private Entry<DataStoreClient, ActorRef> createDatastoreClient(
-            final String shardName, final ActorContext actorContext)
+    private Entry<DataStoreClient, ActorRef> createDatastoreClient(final String shardName, final ActorUtils actorUtils)
             throws DOMDataTreeShardCreationFailedException {
 
         LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName);
         final Props distributedDataStoreClientProps =
-                SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorContext, shardName);
+                SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorUtils, shardName);
 
         final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
         try {
@@ -507,11 +509,11 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
 
                 // TODO we don't have to do it for config and operational default shard separately. Just one of them
                 // should be enough
-                final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
-                        ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
+                final ActorUtils actorUtils = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
 
                 final Optional<ActorRef> defaultLocalShardOptional =
-                        actorContext.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
+                        actorUtils.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
 
                 if (defaultLocalShardOptional.isPresent()) {
                     LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
@@ -525,7 +527,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
                 // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
                 // the default shard is a module-based shard by default, it makes sense to always treat it as such,
                 // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
-//                final Collection<MemberName> names = distributedConfigDatastore.getActorContext().getConfiguration()
+//                final Collection<MemberName> names = distributedConfigDatastore.getActorUtils().getConfiguration()
 //                        .getUniqueMemberNamesForAllShards();
 //                Await.result(FutureConverters.toScala(createDistributedShard(
 //                        new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
@@ -616,7 +618,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         private final DOMDataTreeProducer delegate;
         private final Collection<DOMDataTreeIdentifier> subtrees;
         private final ActorRef shardDataTreeActor;
-        private final ActorContext actorContext;
+        private final ActorUtils actorUtils;
         @GuardedBy("shardAccessMap")
         private final Map<DOMDataTreeIdentifier, CDSShardAccessImpl> shardAccessMap = new HashMap<>();
 
@@ -628,13 +630,13 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         ProxyProducer(final DOMDataTreeProducer delegate,
                       final Collection<DOMDataTreeIdentifier> subtrees,
                       final ActorRef shardDataTreeActor,
-                      final ActorContext actorContext,
+                      final ActorUtils actorUtils,
                       final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shardLayout) {
-            this.delegate = Preconditions.checkNotNull(delegate);
-            this.subtrees = Preconditions.checkNotNull(subtrees);
-            this.shardDataTreeActor = Preconditions.checkNotNull(shardDataTreeActor);
-            this.actorContext = Preconditions.checkNotNull(actorContext);
-            this.shardTable = Preconditions.checkNotNull(shardLayout);
+            this.delegate = requireNonNull(delegate);
+            this.subtrees = requireNonNull(subtrees);
+            this.shardDataTreeActor = requireNonNull(shardDataTreeActor);
+            this.actorUtils = requireNonNull(actorUtils);
+            this.shardTable = requireNonNull(shardLayout);
         }
 
         @Nonnull
@@ -661,7 +663,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
                 shardAccessMap.values().forEach(CDSShardAccessImpl::close);
             }
 
-            final Object o = actorContext.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees));
+            final Object o = actorUtils.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees));
             if (o instanceof DOMDataTreeProducerException) {
                 throw (DOMDataTreeProducerException) o;
             } else if (o instanceof Throwable) {
@@ -677,13 +679,12 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
         @Nonnull
         @Override
         public CDSShardAccess getShardAccess(@Nonnull final DOMDataTreeIdentifier subtree) {
-            Preconditions.checkArgument(
-                    subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
-                    "Subtree %s is not controlled by this producer %s", subtree, this);
+            checkArgument(subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
+                "Subtree %s is not controlled by this producer %s", subtree, this);
 
             final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
                     shardTable.lookup(subtree);
-            Preconditions.checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
+            checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
 
             final DOMDataTreeIdentifier lookupId = lookup.getValue().getPrefix();
 
@@ -696,7 +697,7 @@ public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDat
                 // for same subtrees. But maybe it is not needed since there can be only one
                 // producer attached to some subtree at a time. And also how we can close ShardAccess
                 // then
-                final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(lookupId, actorContext);
+                final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(lookupId, actorUtils);
                 shardAccessMap.put(lookupId, shardAccess);
                 return shardAccess;
             }
index 8e8ff49..679055f 100644 (file)
@@ -41,7 +41,7 @@ import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersisten
 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
@@ -88,7 +88,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
     private final ClusterWrapper clusterWrapper;
     // helper actorContext used only for static calls to executeAsync etc
     // for calls that need specific actor context tied to a datastore use the one provided in the DistributedDataStore
-    private final ActorContext actorContext;
+    private final ActorUtils actorUtils;
     private final ShardingServiceAddressResolver resolver;
     private final AbstractDataStore distributedConfigDatastore;
     private final AbstractDataStore distributedOperDatastore;
@@ -105,7 +105,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
         distributedConfigDatastore = builder.getDistributedConfigDatastore();
         distributedOperDatastore = builder.getDistributedOperDatastore();
         lookupTaskMaxRetries = builder.getLookupTaskMaxRetries();
-        actorContext = distributedConfigDatastore.getActorContext();
+        actorUtils = distributedConfigDatastore.getActorUtils();
         resolver = new ShardingServiceAddressResolver(
                 DistributedShardedDOMDataTree.ACTOR_ID, clusterWrapper.getCurrentMemberName());
 
@@ -229,7 +229,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
             final ActorSelection actorSelection = actorSystem.actorSelection(address);
             futures.add(
                     FutureConverters.toJava(
-                            actorContext.executeOperationAsync(
+                            actorUtils.executeOperationAsync(
                                     actorSelection, new NotifyProducerCreated(subtrees), DEFAULT_ASK_TIMEOUT))
                     .toCompletableFuture());
         }
@@ -269,7 +269,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
             final ActorSelection selection = actorSystem.actorSelection(address);
 
             futures.add(FutureConverters.toJava(
-                    actorContext.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
+                    actorUtils.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
                     .toCompletableFuture());
         }
 
@@ -312,8 +312,8 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         final DOMDataTreeIdentifier prefix = message.getPrefix();
 
-        final ActorContext context = prefix.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
-                        ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
+        final ActorUtils context = prefix.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
+                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
 
         // schedule a notification task for the reply
         actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
@@ -334,7 +334,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         final ShardRemovalLookupTask removalTask =
                 new ShardRemovalLookupTask(actorSystem, getSender(),
-                        actorContext, message.getPrefix(), lookupTaskMaxRetries);
+                        actorUtils, message.getPrefix(), lookupTaskMaxRetries);
 
         actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL, removalTask, actorSystem.dispatcher());
     }
@@ -348,9 +348,9 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
     private void onStartConfigShardLookup(final StartConfigShardLookup message) {
         LOG.debug("Received StartConfigShardLookup: {}", message);
 
-        final ActorContext context =
+        final ActorUtils context =
                 message.getType().equals(LogicalDatastoreType.CONFIGURATION)
-                        ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
+                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
 
         // schedule a notification task for the reply
         actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
@@ -408,7 +408,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
         private final ActorSystem system;
         private final ActorRef replyTo;
         private final ClusterWrapper clusterWrapper;
-        private final ActorContext context;
+        private final ActorUtils context;
         private final DistributedShardedDOMDataTree shardingService;
         private final DOMDataTreeIdentifier toLookup;
         private final int lookupMaxRetries;
@@ -416,7 +416,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
         ShardCreationLookupTask(final ActorSystem system,
                                 final ActorRef replyTo,
                                 final ClusterWrapper clusterWrapper,
-                                final ActorContext context,
+                                final ActorUtils context,
                                 final DistributedShardedDOMDataTree shardingService,
                                 final DOMDataTreeIdentifier toLookup,
                                 final int lookupMaxRetries) {
@@ -468,7 +468,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         private final ActorSystem system;
         private final ActorRef replyTo;
-        private final ActorContext context;
+        private final ActorUtils context;
         private final ClusterWrapper clusterWrapper;
         private final ActorRef shard;
         private final DistributedShardedDOMDataTree shardingService;
@@ -477,7 +477,7 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         ShardLeaderLookupTask(final ActorSystem system,
                               final ActorRef replyTo,
-                              final ActorContext context,
+                              final ActorUtils context,
                               final ClusterWrapper clusterWrapper,
                               final ActorRef shard,
                               final DistributedShardedDOMDataTree shardingService,
@@ -603,12 +603,12 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         private final ActorSystem system;
         private final ActorRef replyTo;
-        private final ActorContext context;
+        private final ActorUtils context;
         private final DOMDataTreeIdentifier toLookup;
 
         ShardRemovalLookupTask(final ActorSystem system,
                                final ActorRef replyTo,
-                               final ActorContext context,
+                               final ActorUtils context,
                                final DOMDataTreeIdentifier toLookup,
                                final int lookupMaxRetries) {
             super(replyTo, lookupMaxRetries);
@@ -654,11 +654,11 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         private final ActorSystem system;
         private final ActorRef replyTo;
-        private final ActorContext context;
+        private final ActorUtils context;
 
         ConfigShardLookupTask(final ActorSystem system,
                               final ActorRef replyTo,
-                              final ActorContext context,
+                              final ActorUtils context,
                               final StartConfigShardLookup message,
                               final int lookupMaxRetries) {
             super(replyTo, lookupMaxRetries);
@@ -695,13 +695,13 @@ public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
 
         private final ActorSystem system;
         private final ActorRef replyTo;
-        private final ActorContext context;
+        private final ActorUtils context;
         private final ClusterWrapper clusterWrapper;
         private final ActorRef shard;
 
         ConfigShardReadinessTask(final ActorSystem system,
                                  final ActorRef replyTo,
-                                 final ActorContext context,
+                                 final ActorUtils context,
                                  final ClusterWrapper clusterWrapper,
                                  final ActorRef shard,
                                  final int lookupMaxRetries) {
index 0c3e5da..13515cd 100644 (file)
   <bean id="distributedEntityOwnershipService" class="org.opendaylight.controller.cluster.datastore.entityownership.DistributedEntityOwnershipService"
           factory-method="start" destroy-method="close">
     <argument>
-      <bean factory-ref="operDatastore" factory-method="getActorContext"/>
+      <bean factory-ref="operDatastore" factory-method="getActorUtils"/>
     </argument>
     <argument ref="selectionStrategyConfig"/>
   </bean>
index f423f61..bcdbb98 100644 (file)
@@ -29,7 +29,7 @@ import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
@@ -55,7 +55,7 @@ public class ClientBackedDataStoreTest {
     private DataStoreClient clientActor;
 
     @Mock
-    private ActorContext actorContext;
+    private ActorUtils actorUtils;
 
     @Mock
     private ClientLocalHistory clientLocalHistory;
@@ -80,8 +80,8 @@ public class ClientBackedDataStoreTest {
     public void setUp() {
         MockitoAnnotations.initMocks(this);
 
-        when(actorContext.getSchemaContext()).thenReturn(SCHEMA_CONTEXT);
-        when(actorContext.getDatastoreContext()).thenReturn(DatastoreContext.newBuilder().build());
+        when(actorUtils.getSchemaContext()).thenReturn(SCHEMA_CONTEXT);
+        when(actorUtils.getDatastoreContext()).thenReturn(DatastoreContext.newBuilder().build());
         when(clientTransaction.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
         when(clientSnapshot.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
 
@@ -94,7 +94,7 @@ public class ClientBackedDataStoreTest {
     @Test
     public void testCreateTransactionChain() {
         try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorContext, UNKNOWN_ID, clientActor)) {
+                actorUtils, UNKNOWN_ID, clientActor)) {
             final DOMStoreTransactionChain txChain = clientBackedDataStore.createTransactionChain();
             assertNotNull(txChain);
             verify(clientActor, Mockito.times(1)).createLocalHistory();
@@ -104,7 +104,7 @@ public class ClientBackedDataStoreTest {
     @Test
     public void testNewReadOnlyTransaction() {
         try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorContext, UNKNOWN_ID, clientActor)) {
+                actorUtils, UNKNOWN_ID, clientActor)) {
             final DOMStoreReadTransaction tx = clientBackedDataStore.newReadOnlyTransaction();
             assertNotNull(tx);
             verify(clientActor, Mockito.times(1)).createSnapshot();
@@ -114,7 +114,7 @@ public class ClientBackedDataStoreTest {
     @Test
     public void testNewWriteOnlyTransaction() {
         try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorContext, UNKNOWN_ID, clientActor)) {
+                actorUtils, UNKNOWN_ID, clientActor)) {
             final DOMStoreWriteTransaction tx = clientBackedDataStore.newWriteOnlyTransaction();
             assertNotNull(tx);
             verify(clientActor, Mockito.times(1)).createTransaction();
@@ -124,7 +124,7 @@ public class ClientBackedDataStoreTest {
     @Test
     public void testNewReadWriteTransaction() {
         try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorContext, UNKNOWN_ID, clientActor)) {
+                actorUtils, UNKNOWN_ID, clientActor)) {
             final DOMStoreReadWriteTransaction tx = clientBackedDataStore.newReadWriteTransaction();
             assertNotNull(tx);
             verify(clientActor, Mockito.times(1)).createTransaction();
index 24ab054..a9fa06d 100644 (file)
@@ -45,7 +45,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
@@ -74,10 +74,10 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         final TestProbe clientContextProbe = new TestProbe(system, "client-context");
         backendProbe = new TestProbe(system, "backend");
         //create handle dependencies
-        final ActorContext actorContext = createActorContextMock(system, contextProbe.ref());
+        final ActorUtils actorUtils = createActorContextMock(system, contextProbe.ref());
         final ClientActorContext clientContext =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        client = new SimpleDataStoreClientBehavior(clientContext, actorContext, "shard");
+        client = new SimpleDataStoreClientBehavior(clientContext, actorUtils, "shard");
         client.createLocalHistory();
         parent = new SingleClientHistory(client, HISTORY_ID);
         //connect client
@@ -200,8 +200,8 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         return dataTreeSnapshot;
     }
 
-    private static ActorContext createActorContextMock(final ActorSystem system, final ActorRef actor) {
-        final ActorContext mock = mock(ActorContext.class);
+    private static ActorUtils createActorContextMock(final ActorSystem system, final ActorRef actor) {
+        final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new scala.concurrent.impl.Promise.DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
index d655640..12f7478 100644 (file)
@@ -29,7 +29,7 @@ import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
 import scala.concurrent.Promise;
@@ -175,8 +175,8 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
         Assert.assertNull(reconnectCohort);
     }
 
-    protected static ActorContext createActorContextMock(final ActorSystem system, final ActorRef actor) {
-        final ActorContext mock = mock(ActorContext.class);
+    protected static ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
+        final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
index b6a1156..bdba5d1 100644 (file)
@@ -31,7 +31,7 @@ import org.opendaylight.controller.cluster.access.client.InternalCommand;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
@@ -54,7 +54,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         system = ActorSystem.apply();
         clientActorProbe = new TestProbe(system, "client");
         actorContextProbe = new TestProbe(system, "actor-context");
-        final ActorContext context = createActorContextMock(system, actorContextProbe.ref());
+        final ActorUtils context = createActorContextMock(system, actorContextProbe.ref());
         clientContext =
                 AccessClientUtil.createClientActorContext(system, clientActorProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
         behavior = createBehavior(clientContext, context);
@@ -62,7 +62,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
 
     @SuppressWarnings("checkstyle:hiddenField")
     protected abstract AbstractDataStoreClientBehavior createBehavior(ClientActorContext clientContext,
-                                                                      ActorContext context);
+                                                                      ActorUtils context);
 
     @After
     public void tearDown() {
@@ -163,8 +163,8 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         verify(modification).readNode(YangInstanceIdentifier.EMPTY);
     }
 
-    private static ActorContext createActorContextMock(final ActorSystem system, final ActorRef actor) {
-        final ActorContext mock = mock(ActorContext.class);
+    private static ActorUtils createActorContextMock(final ActorSystem system, final ActorRef actor) {
+        final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new scala.concurrent.impl.Promise.DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
index bc8283c..2b23430 100644 (file)
@@ -22,7 +22,7 @@ import org.opendaylight.controller.cluster.access.client.AbstractClientConnectio
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLocalHistory> {
     private ActorSystem system;
@@ -45,8 +45,8 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
         final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
         clientActorContext = AccessClientUtil.createClientActorContext(
                 system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ActorContext actorContextMock = createActorContextMock(system, actorContextProbe.ref());
-        behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorContextMock, SHARD_NAME);
+        final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
+        behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
 
         object = new ClientLocalHistory(behavior, HISTORY_ID);
     }
index 2104110..a546955 100644 (file)
@@ -14,12 +14,12 @@ import static org.mockito.Mockito.when;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 public class DistributedDataStoreClientBehaviorTest extends AbstractDataStoreClientBehaviorTest {
     @Override
     protected AbstractDataStoreClientBehavior createBehavior(final ClientActorContext clientContext,
-                                                             final ActorContext context) {
+                                                             final ActorUtils context) {
         final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
         final ShardStrategy strategy = mock(ShardStrategy.class);
         when(strategy.findShard(any())).thenReturn(SHARD);
index 5576ba7..f44cdf3 100644 (file)
@@ -45,7 +45,7 @@ import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForSha
 import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -79,14 +79,14 @@ public class ModuleShardBackendResolverTest {
 
         shardManagerProbe = new TestProbe(system, "ShardManager");
 
-        final ActorContext actorContext = createActorContextMock(system, contextProbe.ref());
-        when(actorContext.getShardManager()).thenReturn(shardManagerProbe.ref());
+        final ActorUtils actorUtils = createActorUtilsMock(system, contextProbe.ref());
+        when(actorUtils.getShardManager()).thenReturn(shardManagerProbe.ref());
 
-        moduleShardBackendResolver = new ModuleShardBackendResolver(CLIENT_ID, actorContext);
-        when(actorContext.getShardStrategyFactory()).thenReturn(shardStrategyFactory);
+        moduleShardBackendResolver = new ModuleShardBackendResolver(CLIENT_ID, actorUtils);
+        when(actorUtils.getShardStrategyFactory()).thenReturn(shardStrategyFactory);
         when(shardStrategyFactory.getStrategy(YangInstanceIdentifier.EMPTY)).thenReturn(shardStrategy);
         final PrimaryShardInfoFutureCache cache = new PrimaryShardInfoFutureCache();
-        when(actorContext.getPrimaryShardInfoCache()).thenReturn(cache);
+        when(actorUtils.getPrimaryShardInfoCache()).thenReturn(cache);
     }
 
     @After
@@ -184,8 +184,8 @@ public class ModuleShardBackendResolverTest {
         verifyNoMoreInteractions(mockCallback);
     }
 
-    private static ActorContext createActorContextMock(final ActorSystem system, final ActorRef actor) {
-        final ActorContext mock = mock(ActorContext.class);
+    private static ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
+        final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new scala.concurrent.impl.Promise.DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
index 0e67276..236c038 100644 (file)
@@ -8,13 +8,13 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 public class SimpleDataStoreClientBehaviorTest extends AbstractDataStoreClientBehaviorTest {
 
     @Override
     protected AbstractDataStoreClientBehavior createBehavior(final ClientActorContext clientContext,
-                                                             final ActorContext context) {
+                                                             final ActorUtils context) {
         return new SimpleDataStoreClientBehavior(clientContext, context, SHARD);
     }
 
index aaa1315..361c926 100644 (file)
@@ -22,7 +22,7 @@ import org.opendaylight.controller.cluster.access.client.AbstractClientConnectio
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleClientHistory> {
     private ActorSystem system;
@@ -43,8 +43,8 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
         final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
         clientActorContext = AccessClientUtil.createClientActorContext(
                 system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ActorContext actorContextMock = createActorContextMock(system, actorContextProbe.ref());
-        behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorContextMock, SHARD_NAME);
+        final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
+        behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
 
         object = new SingleClientHistory(behavior, HISTORY_ID);
     }
index 3ee49b6..3e9208e 100644 (file)
@@ -71,7 +71,7 @@ import org.opendaylight.controller.cluster.datastore.modification.WriteModificat
 import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
@@ -140,7 +140,7 @@ public abstract class AbstractTransactionProxyTest extends AbstractTest {
     };
 
     @Mock
-    protected ActorContext mockActorContext;
+    protected ActorUtils mockActorContext;
 
     protected TransactionContextFactory mockComponentFactory;
 
index 3f5f222..373d4d7 100644 (file)
@@ -39,7 +39,7 @@ import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
@@ -54,12 +54,12 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     @Test(timeout = 10000)
     public void testSuccessfulRegistration() {
         final TestKit kit = new TestKit(getSystem());
-        ActorContext actorContext = new ActorContext(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
         final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorContext, mockListener, path);
+                actorUtils, mockListener, path);
 
         new Thread(() -> proxy.init("shard-1")).start();
 
@@ -101,7 +101,7 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     @Test(timeout = 10000)
     public void testSuccessfulRegistrationForClusteredListener() {
         final TestKit kit = new TestKit(getSystem());
-        ActorContext actorContext = new ActorContext(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
         ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
@@ -109,7 +109,7 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
         final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
         final DataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> proxy =
-                new DataTreeChangeListenerProxy<>(actorContext, mockClusteredListener, path);
+                new DataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, path);
 
         new Thread(() -> proxy.init("shard-1")).start();
 
@@ -130,12 +130,12 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     @Test(timeout = 10000)
     public void testLocalShardNotFound() {
         final TestKit kit = new TestKit(getSystem());
-        ActorContext actorContext = new ActorContext(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
         final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorContext, mockListener, path);
+                actorUtils, mockListener, path);
 
         new Thread(() -> proxy.init("shard-1")).start();
 
@@ -153,12 +153,12 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     @Test(timeout = 10000)
     public void testLocalShardNotInitialized() {
         final TestKit kit = new TestKit(getSystem());
-        ActorContext actorContext = new ActorContext(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
         final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorContext, mockListener, path);
+                actorUtils, mockListener, path);
 
         new Thread(() -> proxy.init("shard-1")).start();
 
@@ -185,22 +185,22 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
         doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
         ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
 
-        ActorContext actorContext = mock(ActorContext.class);
+        ActorUtils actorUtils = mock(ActorUtils.class);
         final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
 
-        doReturn(executor).when(actorContext).getClientDispatcher();
-        doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
-        doReturn(mockActorSystem).when(actorContext).getActorSystem();
+        doReturn(executor).when(actorUtils).getClientDispatcher();
+        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
+        doReturn(mockActorSystem).when(actorUtils).getActorSystem();
 
         String shardName = "shard-1";
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorContext, mockListener, path);
+                actorUtils, mockListener, path);
 
-        doReturn(kit.duration("5 seconds")).when(actorContext).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
-        doReturn(Futures.failed(new RuntimeException("mock"))).when(actorContext).executeOperationAsync(
+        doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+        doReturn(Futures.failed(new RuntimeException("mock"))).when(actorUtils).executeOperationAsync(
             any(ActorRef.class), any(Object.class), any(Timeout.class));
-        doReturn(mock(DatastoreContext.class)).when(actorContext).getDatastoreContext();
+        doReturn(mock(DatastoreContext.class)).when(actorUtils).getDatastoreContext();
 
         proxy.init("shard-1");
 
@@ -212,28 +212,28 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     @Test
     public void testCloseBeforeRegistration() {
         final TestKit kit = new TestKit(getSystem());
-        ActorContext actorContext = mock(ActorContext.class);
+        ActorUtils actorUtils = mock(ActorUtils.class);
 
         String shardName = "shard-1";
 
-        doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
-        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorContext).getClientDispatcher();
-        doReturn(getSystem()).when(actorContext).getActorSystem();
-        doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorContext).getNotificationDispatcherPath();
-        doReturn(getSystem().actorSelection(kit.getRef().path())).when(actorContext).actorSelection(
+        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
+        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorUtils).getClientDispatcher();
+        doReturn(getSystem()).when(actorUtils).getActorSystem();
+        doReturn(Dispatchers.DEFAULT_DISPATCHER_PATH).when(actorUtils).getNotificationDispatcherPath();
+        doReturn(getSystem().actorSelection(kit.getRef().path())).when(actorUtils).actorSelection(
             kit.getRef().path());
-        doReturn(kit.duration("5 seconds")).when(actorContext).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+        doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
 
         final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorContext, mockListener, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+                actorUtils, mockListener, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
 
         Answer<Future<Object>> answer = invocation -> {
             proxy.close();
             return Futures.successful((Object) new RegisterDataTreeNotificationListenerReply(kit.getRef()));
         };
 
-        doAnswer(answer).when(actorContext).executeOperationAsync(any(ActorRef.class), any(Object.class),
+        doAnswer(answer).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class),
             any(Timeout.class));
 
         proxy.init(shardName);
index d569f7b..a4d6d2c 100644 (file)
@@ -624,8 +624,8 @@ public class DistributedDataStoreIntegrationTest {
 
         try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
 
-            final Object result = dataStore.getActorContext().executeOperation(
-                dataStore.getActorContext().getShardManager(), new FindLocalShard(shardName, true));
+            final Object result = dataStore.getActorUtils().executeOperation(
+                dataStore.getActorUtils().getShardManager(), new FindLocalShard(shardName, true));
             assertTrue("Expected LocalShardFound. Actual: " + result, result instanceof LocalShardFound);
 
             // Create the write Tx.
index ddd43af..3943e7e 100644 (file)
@@ -209,7 +209,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
                 testParameter, type, moduleShardsConfig, false, shards);
 
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), shards);
+        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
 
         leaderTestKit.waitForMembersUp("member-2");
         followerTestKit.waitForMembersUp("member-1");
@@ -602,7 +602,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         TestKit.shutdownActorSystem(leaderSystem, true);
         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
 
-        followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorContext(), CARS);
+        followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorUtils(), CARS);
 
         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
         Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS);
@@ -615,7 +615,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 newMember1TestKit.setupAbstractDataStore(
                         testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
 
-            followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), CARS);
+            followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
 
             // Write a car entry to the new leader - should switch to local Tx
 
@@ -635,10 +635,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @Test
     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
-        followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars");
+        followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
 
         final com.google.common.base.Optional<ActorRef> carsFollowerShard =
-                followerDistributedDataStore.getActorContext().findLocalShard("cars");
+                followerDistributedDataStore.getActorUtils().findLocalShard("cars");
         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
 
         final DataTree dataTree = new InMemoryDataTreeFactory().create(
@@ -683,13 +683,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
 
-        final ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection(
+        final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
                 ((ReadyTransactionReply)resp).getCohortPath());
 
         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
         ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
-                leaderDistributedDataStore.getActorContext(), Arrays.asList(
+                leaderDistributedDataStore.getActorUtils(), Arrays.asList(
                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
         cohort.canCommit().get(5, TimeUnit.SECONDS);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
@@ -702,10 +702,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @Test
     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
-        followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars");
+        followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
 
         final com.google.common.base.Optional<ActorRef> carsFollowerShard =
-                followerDistributedDataStore.getActorContext().findLocalShard("cars");
+                followerDistributedDataStore.getActorUtils().findLocalShard("cars");
         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
 
         carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
@@ -754,13 +754,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
 
-        ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection(
+        ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
                 ((ReadyTransactionReply)resp).getCohortPath());
 
         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
         final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
-                leaderDistributedDataStore.getActorContext(), Arrays.asList(
+                leaderDistributedDataStore.getActorUtils(), Arrays.asList(
                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
         cohort.canCommit().get(5, TimeUnit.SECONDS);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
@@ -851,7 +851,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
                 .shardElectionTimeoutFactor(10));
 
-        leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorContext(), "cars");
+        leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorUtils(), "cars");
 
         // Submit all tx's - the messages should get queued for retry.
 
@@ -865,9 +865,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
                 .customRaftPolicyImplementation(null).shardElectionTimeoutFactor(1));
-        IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorContext(), "cars")
+        IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "cars")
                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
-        IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorContext(), "people")
+        IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "people")
                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
 
         followerTestKit.doCommit(writeTx1CanCommit, writeTx1Cohort);
@@ -924,7 +924,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 .shardElectionTimeoutFactor(100));
 
             final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
-            final Future<ActorRef> future = leaderDistributedDataStore.getActorContext().findLocalShardAsync("cars");
+            final Future<ActorRef> future = leaderDistributedDataStore.getActorUtils().findLocalShardAsync("cars");
             final ActorRef leaderActor = Await.result(future, duration);
 
             final Future<Boolean> stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE);
@@ -968,9 +968,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
 
         // Stop the follower
-        followerTestKit.watch(followerDistributedDataStore.getActorContext().getShardManager());
+        followerTestKit.watch(followerDistributedDataStore.getActorUtils().getShardManager());
         followerDistributedDataStore.close();
-        followerTestKit.expectTerminated(followerDistributedDataStore.getActorContext().getShardManager());
+        followerTestKit.expectTerminated(followerDistributedDataStore.getActorUtils().getShardManager());
 
         // Submit the preIsolatedLeaderWriteTx so it's pending
         final DOMStoreThreePhaseCommitCohort preIsolatedLeaderTxCohort = preIsolatedLeaderWriteTx.ready();
index cadec51..f608aa0 100644 (file)
@@ -27,7 +27,7 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import scala.concurrent.duration.FiniteDuration;
@@ -39,7 +39,7 @@ public class DistributedDataStoreTest extends AbstractActorTest {
     private static SchemaContext SCHEMA_CONTEXT;
 
     @Mock
-    private ActorContext actorContext;
+    private ActorUtils actorUtils;
 
     @Mock
     private DatastoreContext datastoreContext;
@@ -61,48 +61,48 @@ public class DistributedDataStoreTest extends AbstractActorTest {
     public void setUp() {
         MockitoAnnotations.initMocks(this);
 
-        doReturn(SCHEMA_CONTEXT).when(actorContext).getSchemaContext();
-        doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+        doReturn(SCHEMA_CONTEXT).when(actorUtils).getSchemaContext();
+        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
     }
 
     @Test
     public void testRateLimitingUsedInReadWriteTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext, UNKNOWN_ID)) {
+        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
 
             distributedDataStore.newReadWriteTransaction();
 
-            verify(actorContext, times(1)).acquireTxCreationPermit();
+            verify(actorUtils, times(1)).acquireTxCreationPermit();
         }
     }
 
     @Test
     public void testRateLimitingUsedInWriteOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext, UNKNOWN_ID)) {
+        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
 
             distributedDataStore.newWriteOnlyTransaction();
 
-            verify(actorContext, times(1)).acquireTxCreationPermit();
+            verify(actorUtils, times(1)).acquireTxCreationPermit();
         }
     }
 
     @Test
     public void testRateLimitingNotUsedInReadOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext, UNKNOWN_ID)) {
+        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
 
             distributedDataStore.newReadOnlyTransaction();
             distributedDataStore.newReadOnlyTransaction();
             distributedDataStore.newReadOnlyTransaction();
 
-            verify(actorContext, times(0)).acquireTxCreationPermit();
+            verify(actorUtils, times(0)).acquireTxCreationPermit();
         }
     }
 
     @Test
     public void testWaitTillReadyBlocking() {
-        doReturn(datastoreContext).when(actorContext).getDatastoreContext();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
         doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
         doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext, UNKNOWN_ID)) {
+        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
 
             long start = System.currentTimeMillis();
 
@@ -116,8 +116,8 @@ public class DistributedDataStoreTest extends AbstractActorTest {
 
     @Test
     public void testWaitTillReadyCountDown() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorContext, UNKNOWN_ID)) {
-            doReturn(datastoreContext).when(actorContext).getDatastoreContext();
+        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
+            doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
             doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
             doReturn(FiniteDuration.apply(5000, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
 
index dc78e03..b483be8 100644 (file)
@@ -36,7 +36,7 @@ import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConf
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
@@ -147,7 +147,7 @@ public class IntegrationTestKit extends ShardTestKit {
         dataStore.onGlobalContextUpdated(schemaContext);
 
         if (waitUntilLeader) {
-            waitUntilLeader(dataStore.getActorContext(), shardNames);
+            waitUntilLeader(dataStore.getActorUtils(), shardNames);
         }
 
         datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
@@ -210,9 +210,9 @@ public class IntegrationTestKit extends ShardTestKit {
         return dataStore;
     }
 
-    public void waitUntilLeader(final ActorContext actorContext, final String... shardNames) {
+    public void waitUntilLeader(final ActorUtils actorUtils, final String... shardNames) {
         for (String shardName: shardNames) {
-            ActorRef shard = findLocalShard(actorContext, shardName);
+            ActorRef shard = findLocalShard(actorUtils, shardName);
 
             assertNotNull("Shard was not created for " + shardName, shard);
 
@@ -220,9 +220,9 @@ public class IntegrationTestKit extends ShardTestKit {
         }
     }
 
-    public void waitUntilNoLeader(final ActorContext actorContext, final String... shardNames) {
+    public void waitUntilNoLeader(final ActorUtils actorUtils, final String... shardNames) {
         for (String shardName: shardNames) {
-            ActorRef shard = findLocalShard(actorContext, shardName);
+            ActorRef shard = findLocalShard(actorUtils, shardName);
             assertNotNull("No local shard found for " + shardName, shard);
 
             waitUntilNoLeader(shard);
@@ -247,11 +247,11 @@ public class IntegrationTestKit extends ShardTestKit {
         fail("Member(s) " + otherMembersSet + " are not Up");
     }
 
-    public static ActorRef findLocalShard(final ActorContext actorContext, final String shardName) {
+    public static ActorRef findLocalShard(final ActorUtils actorUtils, final String shardName) {
         ActorRef shard = null;
         for (int i = 0; i < 20 * 5 && shard == null; i++) {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            com.google.common.base.Optional<ActorRef> shardReply = actorContext.findLocalShard(shardName);
+            com.google.common.base.Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
             if (shardReply.isPresent()) {
                 shard = shardReply.get();
             }
@@ -259,11 +259,11 @@ public class IntegrationTestKit extends ShardTestKit {
         return shard;
     }
 
-    public static void waitUntilShardIsDown(final ActorContext actorContext, final String shardName) {
+    public static void waitUntilShardIsDown(final ActorUtils actorUtils, final String shardName) {
         for (int i = 0; i < 20 * 5 ; i++) {
             LOG.debug("Waiting for shard down {}", shardName);
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            com.google.common.base.Optional<ActorRef> shardReply = actorContext.findLocalShard(shardName);
+            com.google.common.base.Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
             if (!shardReply.isPresent()) {
                 return;
             }
@@ -274,15 +274,15 @@ public class IntegrationTestKit extends ShardTestKit {
 
     public static void verifyShardStats(final AbstractDataStore datastore, final String shardName,
             final ShardStatsVerifier verifier) throws Exception {
-        ActorContext actorContext = datastore.getActorContext();
+        ActorUtils actorUtils = datastore.getActorUtils();
 
-        Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
+        Future<ActorRef> future = actorUtils.findLocalShardAsync(shardName);
         ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
 
         AssertionError lastError = null;
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
-            ShardStats shardStats = (ShardStats)actorContext
+            ShardStats shardStats = (ShardStats)actorUtils
                     .executeOperation(shardActor, Shard.GET_SHARD_MBEAN_MESSAGE);
 
             try {
@@ -299,15 +299,15 @@ public class IntegrationTestKit extends ShardTestKit {
 
     public static void verifyShardState(final AbstractDataStore datastore, final String shardName,
             final Consumer<OnDemandShardState> verifier) throws Exception {
-        ActorContext actorContext = datastore.getActorContext();
+        ActorUtils actorUtils = datastore.getActorUtils();
 
-        Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
+        Future<ActorRef> future = actorUtils.findLocalShardAsync(shardName);
         ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
 
         AssertionError lastError = null;
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
-            OnDemandShardState shardState = (OnDemandShardState)actorContext
+            OnDemandShardState shardState = (OnDemandShardState)actorUtils
                     .executeOperation(shardActor, GetOnDemandRaftState.INSTANCE);
 
             try {
index faa4824..60cc13b 100644 (file)
@@ -29,7 +29,7 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
@@ -132,15 +132,15 @@ public class MemberNode {
 
     public static void verifyRaftState(final AbstractDataStore datastore, final String shardName,
             final RaftStateVerifier verifier) throws Exception {
-        ActorContext actorContext = datastore.getActorContext();
+        ActorUtils actorUtils = datastore.getActorUtils();
 
-        Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
+        Future<ActorRef> future = actorUtils.findLocalShardAsync(shardName);
         ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
 
         AssertionError lastError = null;
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
-            OnDemandRaftState raftState = (OnDemandRaftState)actorContext
+            OnDemandRaftState raftState = (OnDemandRaftState)actorUtils
                     .executeOperation(shardActor, GetOnDemandRaftState.INSTANCE);
 
             try {
@@ -160,7 +160,7 @@ public class MemberNode {
         final Set<String> peerIds = Sets.newHashSet();
         for (String p: peerMemberNames) {
             peerIds.add(ShardIdentifier.create(shardName, MemberName.forName(p),
-                datastore.getActorContext().getDataStoreName()).toString());
+                datastore.getActorUtils().getDataStoreName()).toString());
         }
 
         verifyRaftState(datastore, shardName, raftState -> assertEquals("Peers for shard " + shardName, peerIds,
@@ -170,7 +170,7 @@ public class MemberNode {
     public static void verifyNoShardPresent(final AbstractDataStore datastore, final String shardName) {
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
-            Optional<ActorRef> shardReply = datastore.getActorContext().findLocalShard(shardName);
+            Optional<ActorRef> shardReply = datastore.getActorUtils().findLocalShard(shardName);
             if (!shardReply.isPresent()) {
                 return;
             }
index 93c83a4..ead6486 100644 (file)
@@ -35,7 +35,7 @@ import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -51,16 +51,16 @@ public class RemoteTransactionContextTest extends AbstractActorTest {
 
     private OperationLimiter limiter;
     private RemoteTransactionContext txContext;
-    private ActorContext actorContext;
+    private ActorUtils actorUtils;
     private TestKit kit;
 
     @Before
     public void before() {
         kit = new TestKit(getSystem());
-        actorContext = Mockito.spy(new ActorContext(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        actorUtils = Mockito.spy(new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class)));
         limiter = new OperationLimiter(TX_ID, 4, 0);
-        txContext = new RemoteTransactionContext(TX_ID, actorContext.actorSelection(kit.getRef().path()), actorContext,
+        txContext = new RemoteTransactionContext(TX_ID, actorUtils.actorSelection(kit.getRef().path()), actorUtils,
             DataStoreVersions.CURRENT_VERSION, limiter);
         txContext.operationHandOffComplete();
     }
index 066be88..b7a303e 100644 (file)
@@ -45,7 +45,7 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
@@ -58,7 +58,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
     static class TestException extends RuntimeException {
     }
 
-    private ActorContext actorContext;
+    private ActorUtils actorUtils;
 
     @Mock
     private Timer commitTimer;
@@ -78,7 +78,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
     public void setUp() {
         MockitoAnnotations.initMocks(this);
 
-        actorContext = new ActorContext(getSystem(), actorFactory.createActor(Props.create(DoNothingActor.class)),
+        actorUtils = new ActorUtils(getSystem(), actorFactory.createActor(Props.create(DoNothingActor.class)),
                 new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build(),
                 new PrimaryShardInfoFutureCache()) {
             @Override
@@ -103,7 +103,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testCanCommitYesWithOneCohort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
                         CanCommitTransactionReply.yes(CURRENT_VERSION)))), tx);
 
@@ -113,7 +113,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testCanCommitNoWithOneCohort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
                         CanCommitTransactionReply.no(CURRENT_VERSION)))), tx);
 
@@ -128,7 +128,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
                         CanCommitTransactionReply.yes(CURRENT_VERSION))),
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
                         CanCommitTransactionReply.yes(CURRENT_VERSION))));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifyCohortActors();
@@ -142,7 +142,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
                         CanCommitTransactionReply.no(CURRENT_VERSION))),
                 newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         verifyCanCommit(proxy.canCommit(), false);
         verifyCohortActors();
@@ -150,7 +150,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test(expected = TestException.class)
     public void testCanCommitWithExceptionFailure() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(new TestException()))), tx);
 
         propagateExecutionExceptionCause(proxy.canCommit());
@@ -158,7 +158,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test(expected = IllegalArgumentException.class)
     public void testCanCommitWithInvalidResponseType() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectCanCommit("invalid"))), tx);
 
         propagateExecutionExceptionCause(proxy.canCommit());
@@ -170,7 +170,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
                 newCohortInfo(new CohortActor.Builder(tx)),
                 newCohortInfoWithFailedFuture(new TestException()),
                 newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         propagateExecutionExceptionCause(proxy.canCommit());
     }
@@ -184,7 +184,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
                 newCohortInfo(
                         new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
                                 .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
@@ -201,7 +201,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
                 newCohortInfo(
                         new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
                                 .expectCommit(new TestException())));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
@@ -210,7 +210,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test(expected = IllegalArgumentException.class)
     public void testCommitWithInvalidResponseType() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext,
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
                 Arrays.asList(newCohortInfo(new CohortActor.Builder(tx)
                         .expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)).expectCommit("invalid"))), tx);
 
@@ -221,7 +221,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testAbort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectAbort(
                         AbortTransactionReply.instance(CURRENT_VERSION)))), tx);
 
@@ -231,7 +231,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testAbortWithFailure() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, Arrays.asList(
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
                 newCohortInfo(new CohortActor.Builder(tx).expectAbort(new RuntimeException("mock")))), tx);
 
         // The exception should not get propagated.
@@ -243,7 +243,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
     public void testAbortWithFailedCohortFuture() throws Exception {
         List<CohortInfo> cohorts = Arrays.asList(
                 newCohortInfoWithFailedFuture(new TestException()), newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
 
         verifySuccessfulFuture(proxy.abort());
         verifyCohortActors();
@@ -251,7 +251,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testWithNoCohorts() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorContext,
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
                 Collections.<CohortInfo>emptyList(), tx);
 
         verifyCanCommit(proxy.canCommit(), true);
index 888a9e6..e899ad0 100644 (file)
@@ -16,11 +16,11 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 public class TransactionContextWrapperTest {
     @Mock
-    private ActorContext actorContext;
+    private ActorUtils actorUtils;
 
     @Mock
     private TransactionContext transactionContext;
@@ -30,9 +30,9 @@ public class TransactionContextWrapperTest {
     @Before
     public void setUp() {
         MockitoAnnotations.initMocks(this);
-        doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
         transactionContextWrapper = new TransactionContextWrapper(MockIdentifiers.transactionIdentifier(
-            TransactionContextWrapperTest.class, "mock"), actorContext, "mock");
+            TransactionContextWrapperTest.class, "mock"), actorUtils, "mock");
     }
 
     @Test
index eb18f21..de3a780 100644 (file)
@@ -21,7 +21,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 /**
  * Unit tests for TransactionRateLimitingCallback.
@@ -31,7 +31,7 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 public class TransactionRateLimitingCallbackTest {
 
     @Mock
-    ActorContext mockContext;
+    ActorUtils mockContext;
 
     @Mock
     Timer mockTimer;
@@ -44,7 +44,7 @@ public class TransactionRateLimitingCallbackTest {
     @Before
     public void setUp() {
         MockitoAnnotations.initMocks(this);
-        doReturn(mockTimer).when(mockContext).getOperationTimer(ActorContext.COMMIT);
+        doReturn(mockTimer).when(mockContext).getOperationTimer(ActorUtils.COMMIT);
         callback = new TransactionRateLimitingCallback(mockContext);
         TransactionRateLimitingCallback.setTicker(mockTicker);
     }
index 9d91c27..ad80b86 100644 (file)
@@ -129,7 +129,7 @@ public class DistributedEntityOwnershipIntegrationTest {
     }
 
     private static DistributedEntityOwnershipService newOwnershipService(final AbstractDataStore datastore) {
-        return DistributedEntityOwnershipService.start(datastore.getActorContext(),
+        return DistributedEntityOwnershipService.start(datastore.getActorUtils(),
                 EntityOwnerSelectionStrategyConfig.newBuilder().build());
     }
 
@@ -160,7 +160,7 @@ public class DistributedEntityOwnershipIntegrationTest {
         final DOMEntityOwnershipService follower2EntityOwnershipService =
                 newOwnershipService(follower2Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
         leaderEntityOwnershipService.registerListener(ENTITY_TYPE2, leaderMockListener2);
@@ -305,7 +305,7 @@ public class DistributedEntityOwnershipIntegrationTest {
         final DOMEntityOwnershipService follower2EntityOwnershipService =
                 newOwnershipService(follower2Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         // Register follower1 candidate for entity1 and verify it becomes owner
 
@@ -331,12 +331,12 @@ public class DistributedEntityOwnershipIntegrationTest {
 
         // Re-enable elections on all remaining followers so one becomes the new leader
 
-        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorContext(),
+        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
         follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
                 .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
 
-        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorContext(),
+        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
         follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
                 .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
@@ -406,7 +406,7 @@ public class DistributedEntityOwnershipIntegrationTest {
                 newOwnershipService(follower3Node.configDataStore());
         newOwnershipService(follower4Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         // Register follower1 candidate for entity1 and verify it becomes owner
 
@@ -438,17 +438,17 @@ public class DistributedEntityOwnershipIntegrationTest {
 
         // Re-enable elections on all remaining followers so one becomes the new leader
 
-        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorContext(),
+        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
         follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
                 .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
 
-        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorContext(),
+        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
         follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
                 .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
 
-        ActorRef follower4Shard = IntegrationTestKit.findLocalShard(follower4Node.configDataStore().getActorContext(),
+        ActorRef follower4Shard = IntegrationTestKit.findLocalShard(follower4Node.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
         follower4Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
                 .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
@@ -503,7 +503,7 @@ public class DistributedEntityOwnershipIntegrationTest {
         final DOMEntityOwnershipService follower2EntityOwnershipService =
                 newOwnershipService(follower2Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
         follower1EntityOwnershipService.registerListener(ENTITY_TYPE1, follower1MockListener);
@@ -587,7 +587,7 @@ public class DistributedEntityOwnershipIntegrationTest {
         AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
         final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
@@ -611,7 +611,7 @@ public class DistributedEntityOwnershipIntegrationTest {
 
         // Add replica in follower1
         AddShardReplica addReplica = new AddShardReplica(ENTITY_OWNERSHIP_SHARD_NAME);
-        follower1DistributedDataStore.getActorContext().getShardManager().tell(addReplica,
+        follower1DistributedDataStore.getActorUtils().getShardManager().tell(addReplica,
                 follower1Node.kit().getRef());
         Object reply = follower1Node.kit().expectMsgAnyClassOf(follower1Node.kit().duration("5 sec"),
                 Success.class, Failure.class);
@@ -673,7 +673,7 @@ public class DistributedEntityOwnershipIntegrationTest {
                 newOwnershipService(follower1Node.configDataStore());
         newOwnershipService(follower2Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         // Register leader candidate for entity1 and verify it becomes owner
 
@@ -716,7 +716,7 @@ public class DistributedEntityOwnershipIntegrationTest {
                 newOwnershipService(follower1Node.configDataStore());
         newOwnershipService(follower2Node.configDataStore());
 
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorContext(), ENTITY_OWNERSHIP_SHARD_NAME);
+        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
 
         // Register leader candidate for entity1 and verify it becomes owner
 
@@ -782,12 +782,12 @@ public class DistributedEntityOwnershipIntegrationTest {
                 newOwnershipService(member5FollowerNode.configDataStore());
 
         newOwnershipService(member1LeaderNode.configDataStore());
-        member1LeaderNode.kit().waitUntilLeader(member1LeaderNode.configDataStore().getActorContext(),
+        member1LeaderNode.kit().waitUntilLeader(member1LeaderNode.configDataStore().getActorUtils(),
                 ENTITY_OWNERSHIP_SHARD_NAME);
 
         // Make member4 and member5 non-voting
 
-        Future<Object> future = Patterns.ask(leaderDistributedDataStore.getActorContext().getShardManager(),
+        Future<Object> future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
                 new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
                         ImmutableMap.of("member-4", Boolean.FALSE, "member-5", Boolean.FALSE)),
                 new Timeout(10, TimeUnit.SECONDS));
@@ -821,7 +821,7 @@ public class DistributedEntityOwnershipIntegrationTest {
         // Switch member4 and member5 back to voting and member3 non-voting. This should result in member4 and member5
         // to become entity owners.
 
-        future = Patterns.ask(leaderDistributedDataStore.getActorContext().getShardManager(),
+        future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
                 new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
                         ImmutableMap.of("member-3", Boolean.FALSE, "member-4", Boolean.TRUE, "member-5", Boolean.TRUE)),
                 new Timeout(10, TimeUnit.SECONDS));
index 3ca33ef..b130134 100644 (file)
@@ -118,10 +118,10 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
 
     @Test
     public void testEntityOwnershipShardCreated() throws Exception {
-        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorContext(),
+        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
                 EntityOwnerSelectionStrategyConfig.newBuilder().build());
 
-        Future<ActorRef> future = dataStore.getActorContext().findLocalShardAsync(
+        Future<ActorRef> future = dataStore.getActorUtils().findLocalShardAsync(
                 DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME);
         ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
         assertNotNull(DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME + " not found", shardActor);
@@ -132,7 +132,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
     @Test
     public void testRegisterCandidate() throws Exception {
         DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorContext(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
+            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
 
         YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
         DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
@@ -141,7 +141,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
         verifyRegisterCandidateLocal(service, entity);
         verifyEntityOwnershipCandidateRegistration(entity, reg);
         verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE, entityId,
-                dataStore.getActorContext().getCurrentMemberName().getName());
+                dataStore.getActorUtils().getCurrentMemberName().getName());
 
         // Register the same entity - should throw exception
 
@@ -161,7 +161,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
 
         verifyEntityOwnershipCandidateRegistration(entity2, reg2);
         verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE2, entityId,
-                dataStore.getActorContext().getCurrentMemberName().getName());
+                dataStore.getActorUtils().getCurrentMemberName().getName());
 
         service.close();
     }
@@ -169,7 +169,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
     @Test
     public void testCloseCandidateRegistration() throws Exception {
         DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorContext(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
+            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
 
         DOMEntity entity = new DOMEntity(ENTITY_TYPE, YangInstanceIdentifier.of(QNAME));
 
@@ -194,7 +194,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
     @Test
     public void testListenerRegistration() {
         DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorContext(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
+            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
 
         YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
         DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
@@ -222,7 +222,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
     @Test
     public void testGetOwnershipState() throws Exception {
         DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorContext(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
+            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
 
         final Shard mockShard = Mockito.mock(Shard.class);
         ShardDataTree shardDataTree = new ShardDataTree(mockShard, SchemaContextHelper.entityOwners(),
@@ -265,7 +265,7 @@ public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEnt
 
     @Test
     public void testIsCandidateRegistered() throws CandidateAlreadyRegisteredException {
-        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorContext(),
+        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
                 EntityOwnerSelectionStrategyConfig.newBuilder().build());
 
         final DOMEntity test = new DOMEntity("test-type", "test");
@@ -67,9 +67,9 @@ import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-public class ActorContextTest extends AbstractActorTest {
+public class ActorUtilsTest extends AbstractActorTest {
 
-    static final Logger LOG = LoggerFactory.getLogger(ActorContextTest.class);
+    static final Logger LOG = LoggerFactory.getLogger(ActorUtilsTest.class);
 
     private static class TestMessage {
     }
@@ -148,10 +148,10 @@ public class ActorContextTest extends AbstractActorTest {
 
             ActorRef shardManagerActorRef = getSystem().actorOf(MockShardManager.props(true, shardActorRef));
 
-            ActorContext actorContext = new ActorContext(getSystem(), shardManagerActorRef,
+            ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
                 mock(ClusterWrapper.class), mock(Configuration.class));
 
-            Optional<ActorRef> out = actorContext.findLocalShard("default");
+            Optional<ActorRef> out = actorUtils.findLocalShard("default");
 
             assertEquals(shardActorRef, out.get());
 
@@ -164,10 +164,10 @@ public class ActorContextTest extends AbstractActorTest {
     public void testFindLocalShardWithShardNotFound() {
         ActorRef shardManagerActorRef = getSystem().actorOf(MockShardManager.props(false, null));
 
-        ActorContext actorContext = new ActorContext(getSystem(), shardManagerActorRef, mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef, mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        Optional<ActorRef> out = actorContext.findLocalShard("default");
+        Optional<ActorRef> out = actorUtils.findLocalShard("default");
         assertFalse(out.isPresent());
     }
 
@@ -177,12 +177,12 @@ public class ActorContextTest extends AbstractActorTest {
 
         ActorRef shardManagerActorRef = getSystem().actorOf(MockShardManager.props(true, shardActorRef));
 
-        ActorContext actorContext = new ActorContext(getSystem(), shardManagerActorRef,
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
             mock(ClusterWrapper.class), mock(Configuration.class));
 
-        ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
+        ActorSelection actor = actorUtils.actorSelection(shardActorRef.path());
 
-        Object out = actorContext.executeOperation(actor, "hello");
+        Object out = actorUtils.executeOperation(actor, "hello");
 
         assertEquals("hello", out);
     }
@@ -193,12 +193,12 @@ public class ActorContextTest extends AbstractActorTest {
 
         ActorRef shardManagerActorRef = getSystem().actorOf(MockShardManager.props(true, shardActorRef));
 
-        ActorContext actorContext = new ActorContext(getSystem(), shardManagerActorRef,
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
             mock(ClusterWrapper.class), mock(Configuration.class));
 
-        ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
+        ActorSelection actor = actorUtils.actorSelection(shardActorRef.path());
 
-        Future<Object> future = actorContext.executeOperationAsync(actor, "hello");
+        Future<Object> future = actorUtils.executeOperationAsync(actor, "hello");
 
         Object result = Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
         assertEquals("Result", "hello", result);
@@ -207,71 +207,71 @@ public class ActorContextTest extends AbstractActorTest {
     @Test
     public void testIsPathLocal() {
         MockClusterWrapper clusterWrapper = new MockClusterWrapper();
-        ActorContext actorContext = null;
+        ActorUtils actorUtils = null;
 
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal(null));
-        assertFalse(actorContext.isPathLocal(""));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal(null));
+        assertFalse(actorUtils.isPathLocal(""));
 
         clusterWrapper.setSelfAddress(null);
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal(""));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal(""));
 
         // even if the path is in local format, match the primary path (first 3 elements) and return true
         clusterWrapper.setSelfAddress(new Address("akka", "test"));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://test/user/$a"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://test/user/$a"));
 
         clusterWrapper.setSelfAddress(new Address("akka", "test"));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://test/user/$a"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://test/user/$a"));
 
         clusterWrapper.setSelfAddress(new Address("akka", "test"));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://test/user/token2/token3/$a"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://test/user/token2/token3/$a"));
 
         // self address of remote format,but Tx path local format.
         clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://system/user/shardmanager/shard/transaction"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://system/user/shardmanager/shard/transaction"));
 
         // self address of local format,but Tx path remote format.
         clusterWrapper.setSelfAddress(new Address("akka", "system"));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal("akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal("akka://system@127.0.0.1:2550/user/shardmanager/shard/transaction"));
 
         //local path but not same
         clusterWrapper.setSelfAddress(new Address("akka", "test"));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://test1/user/$a"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://test1/user/$a"));
 
         //ip and port same
         clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertTrue(actorContext.isPathLocal("akka://system@127.0.0.1:2550/"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertTrue(actorUtils.isPathLocal("akka://system@127.0.0.1:2550/"));
 
         // forward-slash missing in address
         clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal("akka://system@127.0.0.1:2550"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal("akka://system@127.0.0.1:2550"));
 
         //ips differ
         clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal("akka://system@127.1.0.1:2550/"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal("akka://system@127.1.0.1:2550/"));
 
         //ports differ
         clusterWrapper.setSelfAddress(new Address("akka", "system", "127.0.0.1", 2550));
-        actorContext = new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
-        assertFalse(actorContext.isPathLocal("akka://system@127.0.0.1:2551/"));
+        actorUtils = new ActorUtils(getSystem(), null, clusterWrapper, mock(Configuration.class));
+        assertFalse(actorUtils.isPathLocal("akka://system@127.0.0.1:2551/"));
     }
 
     @Test
     public void testClientDispatcherIsGlobalDispatcher() {
-        ActorContext actorContext = new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
                 mock(Configuration.class), DatastoreContext.newBuilder().build(), new PrimaryShardInfoFutureCache());
 
-        assertEquals(getSystem().dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+        assertEquals(getSystem().dispatchers().defaultGlobalDispatcher(), actorUtils.getClientDispatcher());
     }
 
     @Test
@@ -279,10 +279,10 @@ public class ActorContextTest extends AbstractActorTest {
         ActorSystem actorSystem = ActorSystem.create("with-custom-dispatchers",
                 ConfigFactory.load("application-with-custom-dispatchers.conf"));
 
-        ActorContext actorContext = new ActorContext(actorSystem, mock(ActorRef.class), mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(actorSystem, mock(ActorRef.class), mock(ClusterWrapper.class),
                 mock(Configuration.class), DatastoreContext.newBuilder().build(), new PrimaryShardInfoFutureCache());
 
-        assertNotEquals(actorSystem.dispatchers().defaultGlobalDispatcher(), actorContext.getClientDispatcher());
+        assertNotEquals(actorSystem.dispatchers().defaultGlobalDispatcher(), actorUtils.getClientDispatcher());
 
         actorSystem.terminate();
     }
@@ -290,14 +290,14 @@ public class ActorContextTest extends AbstractActorTest {
     @Test
     public void testSetDatastoreContext() {
         final TestKit testKit = new TestKit(getSystem());
-        ActorContext actorContext = new ActorContext(getSystem(), testKit.getRef(),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), testKit.getRef(),
             mock(ClusterWrapper.class), mock(Configuration.class), DatastoreContext.newBuilder()
             .operationTimeoutInSeconds(5).shardTransactionCommitTimeoutInSeconds(7).build(),
             new PrimaryShardInfoFutureCache());
 
-        assertEquals("getOperationDuration", 5, actorContext.getOperationDuration().toSeconds());
+        assertEquals("getOperationDuration", 5, actorUtils.getOperationDuration().toSeconds());
         assertEquals("getTransactionCommitOperationTimeout", 7,
-            actorContext.getTransactionCommitOperationTimeout().duration().toSeconds());
+            actorUtils.getTransactionCommitOperationTimeout().duration().toSeconds());
 
         DatastoreContext newContext = DatastoreContext.newBuilder().operationTimeoutInSeconds(6)
                 .shardTransactionCommitTimeoutInSeconds(8).build();
@@ -305,15 +305,15 @@ public class ActorContextTest extends AbstractActorTest {
         DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
         Mockito.doReturn(newContext).when(mockContextFactory).getBaseDatastoreContext();
 
-        actorContext.setDatastoreContext(mockContextFactory);
+        actorUtils.setDatastoreContext(mockContextFactory);
 
         testKit.expectMsgClass(Duration.ofSeconds(5), DatastoreContextFactory.class);
 
-        Assert.assertSame("getDatastoreContext", newContext, actorContext.getDatastoreContext());
+        Assert.assertSame("getDatastoreContext", newContext, actorUtils.getDatastoreContext());
 
-        assertEquals("getOperationDuration", 6, actorContext.getOperationDuration().toSeconds());
+        assertEquals("getOperationDuration", 6, actorUtils.getOperationDuration().toSeconds());
         assertEquals("getTransactionCommitOperationTimeout", 8,
-            actorContext.getTransactionCommitOperationTimeout().duration().toSeconds());
+            actorUtils.getTransactionCommitOperationTimeout().duration().toSeconds());
     }
 
     @Test
@@ -327,7 +327,7 @@ public class ActorContextTest extends AbstractActorTest {
 
         final String expPrimaryPath = "akka://test-system/find-primary-shard";
         final short expPrimaryVersion = DataStoreVersions.CURRENT_VERSION;
-        ActorContext actorContext = new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManager, mock(ClusterWrapper.class),
                 mock(Configuration.class), dataStoreContext, new PrimaryShardInfoFutureCache()) {
             @Override
             protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
@@ -335,7 +335,7 @@ public class ActorContextTest extends AbstractActorTest {
             }
         };
 
-        Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+        Future<PrimaryShardInfo> foobar = actorUtils.findPrimaryShardAsync("foobar");
         PrimaryShardInfo actual = Await.result(foobar, FiniteDuration.apply(5000, TimeUnit.MILLISECONDS));
 
         assertNotNull(actual);
@@ -344,15 +344,15 @@ public class ActorContextTest extends AbstractActorTest {
                 expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
         assertEquals("getPrimaryShardVersion", expPrimaryVersion, actual.getPrimaryShardVersion());
 
-        Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+        Future<PrimaryShardInfo> cached = actorUtils.getPrimaryShardInfoCache().getIfPresent("foobar");
 
         PrimaryShardInfo cachedInfo = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
 
         assertEquals(cachedInfo, actual);
 
-        actorContext.getPrimaryShardInfoCache().remove("foobar");
+        actorUtils.getPrimaryShardInfoCache().remove("foobar");
 
-        cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+        cached = actorUtils.getPrimaryShardInfoCache().getIfPresent("foobar");
 
         assertNull(cached);
     }
@@ -368,7 +368,7 @@ public class ActorContextTest extends AbstractActorTest {
 
         final DataTree mockDataTree = Mockito.mock(DataTree.class);
         final String expPrimaryPath = "akka://test-system/find-primary-shard";
-        ActorContext actorContext = new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManager, mock(ClusterWrapper.class),
                 mock(Configuration.class), dataStoreContext, new PrimaryShardInfoFutureCache()) {
             @Override
             protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
@@ -376,7 +376,7 @@ public class ActorContextTest extends AbstractActorTest {
             }
         };
 
-        Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+        Future<PrimaryShardInfo> foobar = actorUtils.findPrimaryShardAsync("foobar");
         PrimaryShardInfo actual = Await.result(foobar, FiniteDuration.apply(5000, TimeUnit.MILLISECONDS));
 
         assertNotNull(actual);
@@ -386,15 +386,15 @@ public class ActorContextTest extends AbstractActorTest {
                 expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
         assertEquals("getPrimaryShardVersion", DataStoreVersions.CURRENT_VERSION, actual.getPrimaryShardVersion());
 
-        Future<PrimaryShardInfo> cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+        Future<PrimaryShardInfo> cached = actorUtils.getPrimaryShardInfoCache().getIfPresent("foobar");
 
         PrimaryShardInfo cachedInfo = Await.result(cached, FiniteDuration.apply(1, TimeUnit.MILLISECONDS));
 
         assertEquals(cachedInfo, actual);
 
-        actorContext.getPrimaryShardInfoCache().remove("foobar");
+        actorUtils.getPrimaryShardInfoCache().remove("foobar");
 
-        cached = actorContext.getPrimaryShardInfoCache().getIfPresent("foobar");
+        cached = actorUtils.getPrimaryShardInfoCache().getIfPresent("foobar");
 
         assertNull(cached);
     }
@@ -417,16 +417,15 @@ public class ActorContextTest extends AbstractActorTest {
                 .logicalStoreType(LogicalDatastoreType.CONFIGURATION)
                 .shardLeaderElectionTimeout(100, TimeUnit.MILLISECONDS).build();
 
-        ActorContext actorContext =
-            new ActorContext(getSystem(), shardManager, mock(ClusterWrapper.class),
+        ActorUtils actorUtils = new ActorUtils(getSystem(), shardManager, mock(ClusterWrapper.class),
                 mock(Configuration.class), dataStoreContext, new PrimaryShardInfoFutureCache()) {
-                @Override
-                protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
-                    return Futures.successful(expectedException);
-                }
-            };
+            @Override
+            protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
+                return Futures.successful(expectedException);
+            }
+        };
 
-        Future<PrimaryShardInfo> foobar = actorContext.findPrimaryShardAsync("foobar");
+        Future<PrimaryShardInfo> foobar = actorUtils.findPrimaryShardAsync("foobar");
 
         try {
             Await.result(foobar, FiniteDuration.apply(100, TimeUnit.MILLISECONDS));
@@ -437,7