Deprecate DOMDataTreeProducer-related classes
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / sharding / DistributedShardedDOMDataTreeRemotingTest.java
index 63a3852f6d473a29a0371faa0a9806bbb341fe53..749d69987d70218dc57f6842c4266256fd9a88e5 100644 (file)
@@ -8,6 +8,7 @@
 
 package org.opendaylight.controller.cluster.sharding;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -20,14 +21,15 @@ import akka.actor.ActorSystem;
 import akka.actor.Address;
 import akka.actor.AddressFromURIString;
 import akka.cluster.Cluster;
-import akka.testkit.JavaTestKit;
+import akka.testkit.javadsl.TestKit;
 import com.google.common.collect.Lists;
 import com.typesafe.config.ConfigFactory;
 import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
@@ -39,7 +41,6 @@ import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
@@ -54,7 +55,7 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLe
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Ignore("https://bugs.opendaylight.org/show_bug.cgi?id=8301")
+@Deprecated(forRemoval = true)
 public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
 
     private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
@@ -65,21 +66,17 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
     private static final DOMDataTreeIdentifier TEST_ID =
             new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
 
-    private static final String MODULE_SHARDS_CONFIG = "module-shards-cars-member-1-and-2.conf";
+    private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
 
     private ActorSystem leaderSystem;
     private ActorSystem followerSystem;
 
 
     private final Builder leaderDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
-                    .logicalStoreType(
-                            org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
+            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
 
     private final DatastoreContext.Builder followerDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
-                    .logicalStoreType(
-                            org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
+            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
 
     private DistributedDataStore leaderConfigDatastore;
     private DistributedDataStore leaderOperDatastore;
@@ -131,21 +128,25 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
             followerOperDatastore.close();
         }
 
-        JavaTestKit.shutdownActorSystem(leaderSystem, null, Boolean.TRUE);
-        JavaTestKit.shutdownActorSystem(followerSystem, null, Boolean.TRUE);
+        TestKit.shutdownActorSystem(leaderSystem, true);
+        TestKit.shutdownActorSystem(followerSystem, true);
 
         InMemoryJournal.clear();
         InMemorySnapshotStore.clear();
     }
 
     private void initEmptyDatastores() throws Exception {
+        initEmptyDatastores(MODULE_SHARDS_CONFIG);
+    }
+
+    private void initEmptyDatastores(final String moduleShardsConfig) throws Exception {
         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
 
         leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
-                "config", MODULE_SHARDS_CONFIG, true,
+                "config", moduleShardsConfig, true,
                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
         leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
-                "operational", MODULE_SHARDS_CONFIG, true,
+                "operational", moduleShardsConfig, true,
                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
 
         leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
@@ -155,48 +156,60 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
 
         followerConfigDatastore = followerTestKit.setupDistributedDataStore(
-                "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
+                "config", moduleShardsConfig, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
         followerOperDatastore = followerTestKit.setupDistributedDataStore(
-                "operational", MODULE_SHARDS_CONFIG, true,
+                "operational", moduleShardsConfig, true,
                 SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
 
         followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
                 followerOperDatastore,
                 followerConfigDatastore);
 
+        followerTestKit.waitForMembersUp("member-1");
+
+        LOG.info("Initializing leader DistributedShardedDOMDataTree");
         leaderShardFactory.init();
-        followerShardFactory.init();
 
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
-                ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
+                ClusterUtils.getCleanShardName(YangInstanceIdentifier.empty()));
 
-        leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorContext(),
-                ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
+        leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorUtils(),
+                ClusterUtils.getCleanShardName(YangInstanceIdentifier.empty()));
 
+        LOG.info("Initializing follower DistributedShardedDOMDataTree");
+        followerShardFactory.init();
     }
 
     @Test
     public void testProducerRegistrations() throws Exception {
+        LOG.info("testProducerRegistrations starting");
         initEmptyDatastores();
 
         leaderTestKit.waitForMembersUp("member-2");
 
+        // TODO refactor shard creation and verification to own method
         final DistributedShardRegistration shardRegistration =
                 waitOnAsyncTask(leaderShardFactory.createDistributedShard(
                         TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
 
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
 
-        final ActorRef leaderShardManager = leaderConfigDatastore.getActorContext().getShardManager();
+        final ActorRef leaderShardManager = leaderConfigDatastore.getActorUtils().getShardManager();
 
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
 
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
 
+        final Set<String> peers  = new HashSet<>();
+        IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+                        peers.addAll(onDemandShardState.getPeerAddresses().values()));
+        assertEquals(peers.size(), 1);
+
         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
         try {
             followerShardFactory.createProducer(Collections.singleton(TEST_ID));
@@ -228,10 +241,13 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
         }
 
         shardRegistration.close().toCompletableFuture().get();
+
+        LOG.info("testProducerRegistrations ending");
     }
 
     @Test
     public void testWriteIntoMultipleShards() throws Exception {
+        LOG.info("testWriteIntoMultipleShards starting");
         initEmptyDatastores();
 
         leaderTestKit.waitForMembersUp("member-2");
@@ -243,11 +259,17 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
                         DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
 
 
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-        findLocalShard(followerConfigDatastore.getActorContext(),
+        findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
 
+        final Set<String> peers  = new HashSet<>();
+        IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+                        peers.addAll(onDemandShardState.getPeerAddresses().values()));
+        assertEquals(peers.size(), 1);
+
         LOG.debug("Got after waiting for nonleader");
         final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
 
@@ -263,13 +285,16 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
         cursor.close();
         LOG.warn("Got to pre submit");
 
-        tx.submit().checkedGet();
+        tx.commit().get();
 
         shardRegistration.close().toCompletableFuture().get();
+
+        LOG.info("testWriteIntoMultipleShards ending");
     }
 
     @Test
     public void testMultipleShardRegistrations() throws Exception {
+        LOG.info("testMultipleShardRegistrations starting");
         initEmptyDatastores();
 
         final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
@@ -291,42 +316,41 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
                 Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
                 DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
 
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
 
         // check leader has local shards
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
 
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
 
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
 
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
 
         // check follower has local shards
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
 
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
 
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
 
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
 
-
         LOG.debug("Closing registrations");
 
         reg1.close().toCompletableFuture().get();
@@ -334,62 +358,80 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
         reg3.close().toCompletableFuture().get();
         reg4.close().toCompletableFuture().get();
 
-        waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
 
-        waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
 
-        waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
 
-        waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
 
         LOG.debug("All leader shards gone");
 
-        waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
 
-        waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
 
-        waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
 
-        waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
+        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
                 ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
 
         LOG.debug("All follower shards gone");
+        LOG.info("testMultipleShardRegistrations ending");
     }
 
     @Test
     public void testMultipleRegistrationsAtOnePrefix() throws Exception {
+        LOG.info("testMultipleRegistrationsAtOnePrefix starting");
         initEmptyDatastores();
 
-        for (int i = 0; i < 10; i++) {
-            LOG.debug("Round {}", i);
+        for (int i = 0; i < 5; i++) {
+            LOG.info("Round {}", i);
             final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
                     TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
                     DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
 
-            leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
+            leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
 
-            assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(),
+            assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
 
-            assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
+            assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
 
+
+            final Set<String> peers  = new HashSet<>();
+            IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+                    ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+                            peers.addAll(onDemandShardState.getPeerAddresses().values()));
+            assertEquals(peers.size(), 1);
+
             waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
 
-            waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
+            waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
 
-            waitUntilShardIsDown(followerConfigDatastore.getActorContext(),
+            waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
                     ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
         }
 
+        LOG.info("testMultipleRegistrationsAtOnePrefix ending");
+    }
+
+    @Test
+    public void testInitialBootstrappingWithNoModuleShards() throws Exception {
+        LOG.info("testInitialBootstrappingWithNoModuleShards starting");
+        initEmptyDatastores("module-shards-default-member-1.conf");
+
+        // We just verify the DistributedShardedDOMDataTree initialized without error.
     }
 }