package org.opendaylight.controller.cluster.sharding;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import akka.actor.Address;
import akka.actor.AddressFromURIString;
import akka.cluster.Cluster;
-import akka.testkit.JavaTestKit;
+import akka.testkit.javadsl.TestKit;
import com.google.common.collect.Lists;
import com.typesafe.config.ConfigFactory;
import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mockito;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-@Ignore("https://bugs.opendaylight.org/show_bug.cgi?id=8301")
public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
private static final DOMDataTreeIdentifier TEST_ID =
new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
- private static final String MODULE_SHARDS_CONFIG = "module-shards-cars-member-1-and-2.conf";
+ private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
private ActorSystem leaderSystem;
private ActorSystem followerSystem;
private final Builder leaderDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
- .logicalStoreType(
- org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
private final DatastoreContext.Builder followerDatastoreContextBuilder =
- DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
- .logicalStoreType(
- org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION);
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
private DistributedDataStore leaderConfigDatastore;
private DistributedDataStore leaderOperDatastore;
followerOperDatastore.close();
}
- JavaTestKit.shutdownActorSystem(leaderSystem, null, Boolean.TRUE);
- JavaTestKit.shutdownActorSystem(followerSystem, null, Boolean.TRUE);
+ TestKit.shutdownActorSystem(leaderSystem, true);
+ TestKit.shutdownActorSystem(followerSystem, true);
InMemoryJournal.clear();
InMemorySnapshotStore.clear();
}
private void initEmptyDatastores() throws Exception {
+ initEmptyDatastores(MODULE_SHARDS_CONFIG);
+ }
+
+ private void initEmptyDatastores(final String moduleShardsConfig) throws Exception {
leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
- "config", MODULE_SHARDS_CONFIG, true,
+ "config", moduleShardsConfig, true,
SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
- "operational", MODULE_SHARDS_CONFIG, true,
+ "operational", moduleShardsConfig, true,
SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
followerConfigDatastore = followerTestKit.setupDistributedDataStore(
- "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
+ "config", moduleShardsConfig, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
followerOperDatastore = followerTestKit.setupDistributedDataStore(
- "operational", MODULE_SHARDS_CONFIG, true,
+ "operational", moduleShardsConfig, true,
SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
followerOperDatastore,
followerConfigDatastore);
+ followerTestKit.waitForMembersUp("member-1");
+
+ LOG.info("Initializing leader DistributedShardedDOMDataTree");
leaderShardFactory.init();
- followerShardFactory.init();
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorContext(),
ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
+ LOG.info("Initializing follower DistributedShardedDOMDataTree");
+ followerShardFactory.init();
}
@Test
public void testProducerRegistrations() throws Exception {
+ LOG.info("testProducerRegistrations starting");
initEmptyDatastores();
leaderTestKit.waitForMembersUp("member-2");
+ // TODO refactor shard creation and verification to own method
final DistributedShardRegistration shardRegistration =
waitOnAsyncTask(leaderShardFactory.createDistributedShard(
TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
+ final Set<String> peers = new HashSet<>();
+ IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+ ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+ peers.addAll(onDemandShardState.getPeerAddresses().values()));
+ assertEquals(peers.size(), 1);
+
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
try {
followerShardFactory.createProducer(Collections.singleton(TEST_ID));
}
shardRegistration.close().toCompletableFuture().get();
+
+ LOG.info("testProducerRegistrations ending");
}
@Test
public void testWriteIntoMultipleShards() throws Exception {
+ LOG.info("testWriteIntoMultipleShards starting");
initEmptyDatastores();
leaderTestKit.waitForMembersUp("member-2");
findLocalShard(followerConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
+ final Set<String> peers = new HashSet<>();
+ IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+ ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+ peers.addAll(onDemandShardState.getPeerAddresses().values()));
+ assertEquals(peers.size(), 1);
+
LOG.debug("Got after waiting for nonleader");
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
cursor.close();
LOG.warn("Got to pre submit");
- tx.submit().checkedGet();
+ tx.commit().get();
shardRegistration.close().toCompletableFuture().get();
+
+ LOG.info("testWriteIntoMultipleShards ending");
}
@Test
public void testMultipleShardRegistrations() throws Exception {
+ LOG.info("testMultipleShardRegistrations starting");
initEmptyDatastores();
final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
-
LOG.debug("Closing registrations");
reg1.close().toCompletableFuture().get();
ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
LOG.debug("All follower shards gone");
+ LOG.info("testMultipleShardRegistrations ending");
}
@Test
public void testMultipleRegistrationsAtOnePrefix() throws Exception {
+ LOG.info("testMultipleRegistrationsAtOnePrefix starting");
initEmptyDatastores();
- for (int i = 0; i < 10; i++) {
- LOG.debug("Round {}", i);
+ for (int i = 0; i < 5; i++) {
+ LOG.info("Round {}", i);
final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
+
+ final Set<String> peers = new HashSet<>();
+ IntegrationTestKit.verifyShardState(leaderConfigDatastore,
+ ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
+ peers.addAll(onDemandShardState.getPeerAddresses().values()));
+ assertEquals(peers.size(), 1);
+
waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(),
ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
}
+ LOG.info("testMultipleRegistrationsAtOnePrefix ending");
+ }
+
+ @Test
+ public void testInitialBootstrappingWithNoModuleShards() throws Exception {
+ LOG.info("testInitialBootstrappingWithNoModuleShards starting");
+ initEmptyDatastores("module-shards-default-member-1.conf");
+
+ // We just verify the DistributedShardedDOMDataTree initialized without error.
}
}