From: Robert Varga Date: Mon, 12 Dec 2016 18:34:38 +0000 (+0100) Subject: BUG-5280: switch tests to ClientBackedDataStore X-Git-Tag: release/carbon~113 X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=commitdiff_plain;h=1a6462c3cdb2cd310ab9503beb18da70f6e3779d BUG-5280: switch tests to ClientBackedDataStore Enable integration tests to run on the new frontend code with parametrized JUNIT. Not working tests for new code are ignored. For old code all tests run and pass. Change-Id: Ib5656ecd2333a56d5c466e633fbdd477accc4095 Signed-off-by: Robert Varga Signed-off-by: Ivan Hrasko --- diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java index c9a30677a6..73a6c74f83 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java @@ -101,8 +101,8 @@ public class DataTreeCohortIntegrationTest { ArgumentCaptor candidateCapt = ArgumentCaptor.forClass(Collection.class); IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder); - try (AbstractDataStore dataStore = kit.setupDistributedDataStore("testSuccessfulCanCommitWithNoopPostStep", - "test-1")) { + try (AbstractDataStore dataStore = kit.setupAbstractDataStore( + DistributedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep", "test-1")) { final ObjectRegistration cohortReg = dataStore.registerCommitCohort(TEST_ID, cohort); assertNotNull(cohortReg); @@ -144,7 +144,8 @@ public class DataTreeCohortIntegrationTest { any(Collection.class), any(SchemaContext.class)); IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder); - try (AbstractDataStore dataStore = kit.setupDistributedDataStore("testFailedCanCommit", "test-1")) { + try (AbstractDataStore dataStore = kit.setupAbstractDataStore( + DistributedDataStore.class, "testFailedCanCommit", "test-1")) { dataStore.registerCommitCohort(TEST_ID, failedCohort); IntegrationTestKit.verifyShardState(dataStore, "test-1", @@ -170,8 +171,8 @@ public class DataTreeCohortIntegrationTest { any(Collection.class), any(SchemaContext.class)); IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder); - try (AbstractDataStore dataStore = kit.setupDistributedDataStore("testCanCommitWithMultipleListEntries", - "cars-1")) { + try (AbstractDataStore dataStore = kit.setupAbstractDataStore( + DistributedDataStore.class, "testCanCommitWithMultipleListEntries", "cars-1")) { final ObjectRegistration cohortReg = dataStore.registerCommitCohort( new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH .node(CarsModel.CAR_QNAME)), cohort); @@ -287,8 +288,8 @@ public class DataTreeCohortIntegrationTest { any(Collection.class), any(SchemaContext.class)); IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder); - try (AbstractDataStore dataStore = kit.setupDistributedDataStore("testAbortAfterCanCommit", - "test-1", "cars-1")) { + try (AbstractDataStore dataStore = kit.setupAbstractDataStore( + DistributedDataStore.class, "testAbortAfterCanCommit", "test-1", "cars-1")) { dataStore.registerCommitCohort(TEST_ID, cohortToAbort); IntegrationTestKit.verifyShardState(dataStore, "test-1", diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java index e5b14b021e..11379a0bf2 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java @@ -44,10 +44,16 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.mockito.Mockito; +import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore; import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker; import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException; @@ -58,7 +64,6 @@ import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardData import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion; import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState; import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener; -import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry; import org.opendaylight.controller.cluster.raft.persisted.Snapshot; import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal; import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore; @@ -93,22 +98,33 @@ import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes; import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder; import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory; +@RunWith(Parameterized.class) public class DistributedDataStoreIntegrationTest { + @Parameters(name = "{0}") + public static Collection data() { + return Arrays.asList(new Object[][] { + { DistributedDataStore.class }, { ClientBackedDataStore.class } + }); + } + + @Parameter + public Class testParameter; + private static ActorSystem system; private final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder() .shardHeartbeatIntervalInMillis(100); - @BeforeClass - public static void setUpClass() throws IOException { + @Before + public void setUp() throws IOException { system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1")); Address member1Address = AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558"); Cluster.get(system).join(member1Address); } - @AfterClass - public static void tearDownClass() throws IOException { + @After + public void tearDown() throws IOException { JavaTestKit.shutdownActorSystem(system); system = null; } @@ -121,8 +137,8 @@ public class DistributedDataStoreIntegrationTest { public void testWriteTransactionWithSingleShard() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore("transactionIntegrationTest", - "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "transactionIntegrationTest", "test-1")) { testWriteTransaction(dataStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); @@ -138,8 +154,8 @@ public class DistributedDataStoreIntegrationTest { public void testWriteTransactionWithMultipleShards() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testWriteTransactionWithMultipleShards", "cars-1", "people-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testWriteTransactionWithMultipleShards", "cars-1", "people-1")) { DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); @@ -158,19 +174,18 @@ public class DistributedDataStoreIntegrationTest { writeTx = dataStore.newWriteOnlyTransaction(); - MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); + final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); writeTx.write(carPath, car); - MapEntryNode person = PeopleModel.newPersonEntry("jack"); - YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); + final MapEntryNode person = PeopleModel.newPersonEntry("jack"); + final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); writeTx.write(personPath, person); doCommit(writeTx.ready()); // Verify the data in the store - - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); Optional> optional = readTx.read(carPath).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -188,23 +203,20 @@ public class DistributedDataStoreIntegrationTest { public void testReadWriteTransactionWithSingleShard() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testReadWriteTransactionWithSingleShard", "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testReadWriteTransactionWithSingleShard", "test-1")) { // 1. Create a read-write Tx - - DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", readWriteTx); // 2. Write some data - - YangInstanceIdentifier nodePath = TestModel.TEST_PATH; - NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME); + final YangInstanceIdentifier nodePath = TestModel.TEST_PATH; + final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME); readWriteTx.write(nodePath, nodeToWrite); // 3. Read the data from Tx - - Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS); + final Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS); assertEquals("exists", true, exists); Optional> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS); @@ -212,16 +224,13 @@ public class DistributedDataStoreIntegrationTest { assertEquals("Data node", nodeToWrite, optional.get()); // 4. Ready the Tx for commit - - DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready(); // 5. Commit the Tx - doCommit(cohort); // 6. Verify the data in the store - - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -235,8 +244,8 @@ public class DistributedDataStoreIntegrationTest { public void testReadWriteTransactionWithMultipleShards() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testReadWriteTransactionWithMultipleShards", "cars-1", "people-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testReadWriteTransactionWithMultipleShards", "cars-1", "people-1")) { DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", readWriteTx); @@ -255,15 +264,15 @@ public class DistributedDataStoreIntegrationTest { readWriteTx = dataStore.newReadWriteTransaction(); - MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); + final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); readWriteTx.write(carPath, car); - MapEntryNode person = PeopleModel.newPersonEntry("jack"); - YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); + final MapEntryNode person = PeopleModel.newPersonEntry("jack"); + final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); readWriteTx.write(personPath, person); - Boolean exists = readWriteTx.exists(carPath).checkedGet(5, TimeUnit.SECONDS); + final Boolean exists = readWriteTx.exists(carPath).checkedGet(5, TimeUnit.SECONDS); assertEquals("exists", true, exists); Optional> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS); @@ -273,7 +282,6 @@ public class DistributedDataStoreIntegrationTest { doCommit(readWriteTx.ready()); // Verify the data in the store - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); optional = readTx.read(carPath).get(5, TimeUnit.SECONDS); @@ -293,10 +301,10 @@ public class DistributedDataStoreIntegrationTest { public void testSingleTransactionsWritesInQuickSuccession() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testSingleTransactionsWritesInQuickSuccession", "cars-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testSingleTransactionsWritesInQuickSuccession", "cars-1")) { - DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); + final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); @@ -313,7 +321,7 @@ public class DistributedDataStoreIntegrationTest { doCommit(writeTx.ready()); - Optional> optional = txChain.newReadOnlyTransaction() + final Optional> optional = txChain.newReadOnlyTransaction() .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); assertEquals("# cars", numCars, ((Collection) optional.get().getValue()).size()); @@ -327,26 +335,25 @@ public class DistributedDataStoreIntegrationTest { throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - String shardName = "test-1"; + final String shardName = "test-1"; // Setup the InMemoryJournal to block shard recovery to ensure // the shard isn't // initialized until we create and submit the write the Tx. - String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); - CountDownLatch blockRecoveryLatch = new CountDownLatch(1); + final String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); + final CountDownLatch blockRecoveryLatch = new CountDownLatch(1); InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch); - try (AbstractDataStore dataStore = setupDistributedDataStore(testName, false, shardName)) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, testName, false, shardName)) { // Create the write Tx - final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction() : dataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", writeTx); // Do some modification operations and ready the Tx on a // separate thread. - final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier .builder(TestModel.OUTER_LIST_PATH) .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(); @@ -354,7 +361,7 @@ public class DistributedDataStoreIntegrationTest { final AtomicReference txCohort = new AtomicReference<>(); final AtomicReference caughtEx = new AtomicReference<>(); final CountDownLatch txReady = new CountDownLatch(1); - Thread txThread = new Thread() { + final Thread txThread = new Thread() { @Override public void run() { try { @@ -371,7 +378,6 @@ public class DistributedDataStoreIntegrationTest { txCohort.set(writeTx.ready()); } catch (Exception e) { caughtEx.set(e); - return; } finally { txReady.countDown(); } @@ -381,8 +387,7 @@ public class DistributedDataStoreIntegrationTest { txThread.start(); // Wait for the Tx operations to complete. - - boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS); + final boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS); if (caughtEx.get() != null) { throw caughtEx.get(); } @@ -392,16 +397,13 @@ public class DistributedDataStoreIntegrationTest { // At this point the Tx operations should be waiting for the // shard to initialize so // trigger the latch to let the shard recovery to continue. - blockRecoveryLatch.countDown(); // Wait for the Tx commit to complete. - doCommit(txCohort.get()); // Verify the data in the store - - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); Optional> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -432,32 +434,31 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionReadsWithShardNotInitiallyReady() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - String testName = "testTransactionReadsWithShardNotInitiallyReady"; - String shardName = "test-1"; + final String testName = "testTransactionReadsWithShardNotInitiallyReady"; + final String shardName = "test-1"; // Setup the InMemoryJournal to block shard recovery to ensure // the shard isn't // initialized until we create the Tx. - String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); - CountDownLatch blockRecoveryLatch = new CountDownLatch(1); + final String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); + final CountDownLatch blockRecoveryLatch = new CountDownLatch(1); InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch); - try (AbstractDataStore dataStore = setupDistributedDataStore(testName, false, shardName)) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, testName, false, shardName)) { // Create the read-write Tx - final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", readWriteTx); // Do some reads on the Tx on a separate thread. - final AtomicReference> txExistsFuture = new AtomicReference<>(); final AtomicReference>, ReadFailedException>> txReadFuture = new AtomicReference<>(); final AtomicReference caughtEx = new AtomicReference<>(); final CountDownLatch txReadsDone = new CountDownLatch(1); - Thread txThread = new Thread() { + final Thread txThread = new Thread() { @Override public void run() { try { @@ -469,7 +470,6 @@ public class DistributedDataStoreIntegrationTest { txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH)); } catch (Exception e) { caughtEx.set(e); - return; } finally { txReadsDone.countDown(); } @@ -479,7 +479,6 @@ public class DistributedDataStoreIntegrationTest { txThread.start(); // Wait for the Tx operations to complete. - boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS); if (caughtEx.get() != null) { throw caughtEx.get(); @@ -490,11 +489,9 @@ public class DistributedDataStoreIntegrationTest { // At this point the Tx operations should be waiting for the // shard to initialize so // trigger the latch to let the shard recovery to continue. - blockRecoveryLatch.countDown(); // Wait for the reads to complete and verify. - assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS)); assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent()); @@ -509,74 +506,64 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionCommitFailureWithShardNotInitialized() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - String testName = "testTransactionCommitFailureWithShardNotInitialized"; - String shardName = "test-1"; + final String testName = "testTransactionCommitFailureWithShardNotInitialized"; + final String shardName = "test-1"; // Set the shard initialization timeout low for the test. - datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS); // Setup the InMemoryJournal to block shard recovery // indefinitely. - - String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); - CountDownLatch blockRecoveryLatch = new CountDownLatch(1); + final String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); + final CountDownLatch blockRecoveryLatch = new CountDownLatch(1); InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch); InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence"); - try (AbstractDataStore dataStore = setupDistributedDataStore(testName, false, shardName)) { - - // Create the write Tx - - final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction(); - assertNotNull("newReadWriteTransaction returned null", writeTx); - - // Do some modifications and ready the Tx on a separate - // thread. - - final AtomicReference txCohort = new AtomicReference<>(); - final AtomicReference caughtEx = new AtomicReference<>(); - final CountDownLatch txReady = new CountDownLatch(1); - Thread txThread = new Thread() { - @Override - public void run() { - try { - writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); - - txCohort.set(writeTx.ready()); - } catch (Exception e) { - caughtEx.set(e); - return; - } finally { - txReady.countDown(); - } - } - }; + final AbstractDataStore dataStore = + setupAbstractDataStore(testParameter, testName, false, shardName); - txThread.start(); + // Create the write Tx + final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction(); + assertNotNull("newReadWriteTransaction returned null", writeTx); - // Wait for the Tx operations to complete. + // Do some modifications and ready the Tx on a separate + // thread. + final AtomicReference txCohort = new AtomicReference<>(); + final AtomicReference caughtEx = new AtomicReference<>(); + final CountDownLatch txReady = new CountDownLatch(1); + final Thread txThread = new Thread(() -> { + try { + writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); - boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS); - if (caughtEx.get() != null) { - throw caughtEx.get(); + txCohort.set(writeTx.ready()); + } catch (Exception e) { + caughtEx.set(e); + } finally { + txReady.countDown(); } + }); - assertEquals("Tx ready", true, done); + txThread.start(); - // Wait for the commit to complete. Since the shard never - // initialized, the Tx should - // have timed out and throw an appropriate exception cause. + // Wait for the Tx operations to complete. + boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS); + if (caughtEx.get() != null) { + throw caughtEx.get(); + } - try { - txCohort.get().canCommit().get(5, TimeUnit.SECONDS); - } catch (ExecutionException e) { - Throwables.propagateIfInstanceOf(e.getCause(), Exception.class); - Throwables.propagate(e.getCause()); - } finally { - blockRecoveryLatch.countDown(); - } + assertEquals("Tx ready", true, done); + + // Wait for the commit to complete. Since the shard never + // initialized, the Tx should + // have timed out and throw an appropriate exception cause. + try { + txCohort.get().canCommit().get(5, TimeUnit.SECONDS); + fail("Expected NotInitializedException"); + } catch (final Exception e) { + Throwables.propagate(Throwables.getRootCause(e)); + } finally { + blockRecoveryLatch.countDown(); } } }; @@ -587,58 +574,50 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionReadFailureWithShardNotInitialized() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - String testName = "testTransactionReadFailureWithShardNotInitialized"; - String shardName = "test-1"; + final String testName = "testTransactionReadFailureWithShardNotInitialized"; + final String shardName = "test-1"; // Set the shard initialization timeout low for the test. - datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS); // Setup the InMemoryJournal to block shard recovery // indefinitely. - - String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); - CountDownLatch blockRecoveryLatch = new CountDownLatch(1); + final String persistentID = String.format("member-1-shard-%s-%s", shardName, testName); + final CountDownLatch blockRecoveryLatch = new CountDownLatch(1); InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch); InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence"); - try (AbstractDataStore dataStore = setupDistributedDataStore(testName, false, shardName)) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, testName, false, shardName)) { // Create the read-write Tx - final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", readWriteTx); // Do a read on the Tx on a separate thread. - final AtomicReference>, ReadFailedException>> txReadFuture = new AtomicReference<>(); final AtomicReference caughtEx = new AtomicReference<>(); final CountDownLatch txReadDone = new CountDownLatch(1); - Thread txThread = new Thread() { - @Override - public void run() { - try { - readWriteTx.write(TestModel.TEST_PATH, - ImmutableNodes.containerNode(TestModel.TEST_QNAME)); + final Thread txThread = new Thread(() -> { + try { + readWriteTx.write(TestModel.TEST_PATH, + ImmutableNodes.containerNode(TestModel.TEST_QNAME)); - txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH)); + txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH)); - readWriteTx.close(); - } catch (Exception e) { - caughtEx.set(e); - return; - } finally { - txReadDone.countDown(); - } + readWriteTx.close(); + } catch (Exception e) { + caughtEx.set(e); + } finally { + txReadDone.countDown(); } - }; + }); txThread.start(); // Wait for the Tx operations to complete. - boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS); if (caughtEx.get() != null) { throw caughtEx.get(); @@ -649,12 +628,11 @@ public class DistributedDataStoreIntegrationTest { // Wait for the read to complete. Since the shard never // initialized, the Tx should // have timed out and throw an appropriate exception cause. - try { txReadFuture.get().checkedGet(5, TimeUnit.SECONDS); - } catch (ReadFailedException e) { - Throwables.propagateIfInstanceOf(e.getCause(), Exception.class); - Throwables.propagate(e.getCause()); + fail("Expected NotInitializedException"); + } catch (final ReadFailedException e) { + Throwables.propagate(Throwables.getRootCause(e)); } finally { blockRecoveryLatch.countDown(); } @@ -668,7 +646,7 @@ public class DistributedDataStoreIntegrationTest { throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - String shardName = "default"; + final String shardName = "default"; // We don't want the shard to become the leader so prevent shard // elections. @@ -680,45 +658,39 @@ public class DistributedDataStoreIntegrationTest { datastoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(1) .shardInitializationTimeout(200, TimeUnit.MILLISECONDS); - try (AbstractDataStore dataStore = setupDistributedDataStore(testName, false, shardName)) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, testName, false, shardName)) { - Object result = dataStore.getActorContext().executeOperation( + final Object result = dataStore.getActorContext().executeOperation( dataStore.getActorContext().getShardManager(), new FindLocalShard(shardName, true)); assertTrue("Expected LocalShardFound. Actual: " + result, result instanceof LocalShardFound); // Create the write Tx. - - try (DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction() + try (final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction() : dataStore.newReadWriteTransaction()) { assertNotNull("newReadWriteTransaction returned null", writeTx); // Do some modifications and ready the Tx on a separate // thread. - final AtomicReference txCohort = new AtomicReference<>(); final AtomicReference caughtEx = new AtomicReference<>(); final CountDownLatch txReady = new CountDownLatch(1); - Thread txThread = new Thread() { - @Override - public void run() { - try { - writeTx.write(TestModel.JUNK_PATH, - ImmutableNodes.containerNode(TestModel.JUNK_QNAME)); - - txCohort.set(writeTx.ready()); - } catch (Exception e) { - caughtEx.set(e); - return; - } finally { - txReady.countDown(); - } + final Thread txThread = new Thread(() -> { + try { + writeTx.write(TestModel.JUNK_PATH, + ImmutableNodes.containerNode(TestModel.JUNK_QNAME)); + + txCohort.set(writeTx.ready()); + } catch (Exception e) { + caughtEx.set(e); + } finally { + txReady.countDown(); } - }; + }); txThread.start(); // Wait for the Tx operations to complete. - boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS); if (caughtEx.get() != null) { throw caughtEx.get(); @@ -730,12 +702,11 @@ public class DistributedDataStoreIntegrationTest { // leader was elected in time, the Tx // should have timed out and throw an appropriate // exception cause. - try { txCohort.get().canCommit().get(5, TimeUnit.SECONDS); - } catch (ExecutionException e) { - Throwables.propagateIfInstanceOf(e.getCause(), Exception.class); - Throwables.propagate(e.getCause()); + fail("Expected NoShardLeaderException"); + } catch (final ExecutionException e) { + Throwables.propagate(Throwables.getRootCause(e)); } } } @@ -758,15 +729,15 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionAbort() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore("transactionAbortIntegrationTest", - "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "transactionAbortIntegrationTest", "test-1")) { - DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); - DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort = writeTx.ready(); cohort.canCommit().get(5, TimeUnit.SECONDS); @@ -784,49 +755,41 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionChainWithSingleShard() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore("testTransactionChainWithSingleShard", - "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testTransactionChainWithSingleShard", "test-1")) { // 1. Create a Tx chain and write-only Tx + final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); - DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); - - DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); // 2. Write some data - - NormalizedNode testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME); + final NormalizedNode testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME); writeTx.write(TestModel.TEST_PATH, testNode); // 3. Ready the Tx for commit - final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready(); // 4. Commit the Tx on another thread that first waits for // the second read Tx. - final CountDownLatch continueCommit1 = new CountDownLatch(1); final CountDownLatch commit1Done = new CountDownLatch(1); final AtomicReference commit1Error = new AtomicReference<>(); - new Thread() { - @Override - public void run() { - try { - continueCommit1.await(); - doCommit(cohort1); - } catch (Exception e) { - commit1Error.set(e); - } finally { - commit1Done.countDown(); - } + new Thread(() -> { + try { + continueCommit1.await(); + doCommit(cohort1); + } catch (Exception e) { + commit1Error.set(e); + } finally { + commit1Done.countDown(); } - }.start(); + }).start(); // 5. Create a new read Tx from the chain to read and verify // the data from the first // Tx is visible after being readied. - DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction(); Optional> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -834,9 +797,8 @@ public class DistributedDataStoreIntegrationTest { // 6. Create a new RW Tx from the chain, write more data, // and ready it - - DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); - MapNode outerNode = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(); + final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); + final MapNode outerNode = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(); rwTx.write(TestModel.OUTER_LIST_PATH, outerNode); final DOMStoreThreePhaseCommitCohort cohort2 = rwTx.ready(); @@ -844,7 +806,6 @@ public class DistributedDataStoreIntegrationTest { // 7. Create a new read Tx from the chain to read the data // from the last RW Tx to // verify it is visible. - readTx = txChain.newReadWriteTransaction(); optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -852,7 +813,6 @@ public class DistributedDataStoreIntegrationTest { // 8. Wait for the 2 commits to complete and close the // chain. - continueCommit1.countDown(); Uninterruptibles.awaitUninterruptibly(commit1Done, 5, TimeUnit.SECONDS); @@ -866,7 +826,6 @@ public class DistributedDataStoreIntegrationTest { // 9. Create a new read Tx from the data store and verify // committed data. - readTx = dataStore.newReadOnlyTransaction(); optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); @@ -880,10 +839,10 @@ public class DistributedDataStoreIntegrationTest { public void testTransactionChainWithMultipleShards() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testTransactionChainWithMultipleShards", "cars-1", "people-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testTransactionChainWithMultipleShards", "cars-1", "people-1")) { - DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); + final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); @@ -896,14 +855,14 @@ public class DistributedDataStoreIntegrationTest { final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready(); - DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction(); - MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); + final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); readWriteTx.write(carPath, car); - MapEntryNode person = PeopleModel.newPersonEntry("jack"); - YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); + final MapEntryNode person = PeopleModel.newPersonEntry("jack"); + final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); readWriteTx.merge(personPath, person); Optional> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS); @@ -914,16 +873,16 @@ public class DistributedDataStoreIntegrationTest { assertEquals("isPresent", true, optional.isPresent()); assertEquals("Data node", person, optional.get()); - DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready(); writeTx = txChain.newWriteOnlyTransaction(); writeTx.delete(carPath); - DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready(); - ListenableFuture canCommit1 = cohort1.canCommit(); - ListenableFuture canCommit2 = cohort2.canCommit(); + final ListenableFuture canCommit1 = cohort1.canCommit(); + final ListenableFuture canCommit2 = cohort2.canCommit(); doCommit(canCommit1, cohort1); doCommit(canCommit2, cohort2); @@ -931,7 +890,7 @@ public class DistributedDataStoreIntegrationTest { txChain.close(); - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); optional = readTx.read(carPath).get(5, TimeUnit.SECONDS); assertEquals("isPresent", false, optional.isPresent()); @@ -948,27 +907,27 @@ public class DistributedDataStoreIntegrationTest { public void testCreateChainedTransactionsInQuickSuccession() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testCreateChainedTransactionsInQuickSuccession", "cars-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testCreateChainedTransactionsInQuickSuccession", "cars-1")) { - ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( + final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.builder() .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(), MoreExecutors.directExecutor()); - TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); + final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); DOMTransactionChain txChain = broker.createTransactionChain(listener); - List> futures = new ArrayList<>(); + final List> futures = new ArrayList<>(); - DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); writeTx.put(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, CarsModel.emptyContainer()); writeTx.put(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()); futures.add(writeTx.submit()); int numCars = 100; for (int i = 0; i < numCars; i++) { - DOMDataReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); + final DOMDataReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); rwTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.newCarPath("car" + i), CarsModel.newCarEntry("car" + i, BigInteger.valueOf(20000))); @@ -976,11 +935,11 @@ public class DistributedDataStoreIntegrationTest { futures.add(rwTx.submit()); } - for (CheckedFuture f : futures) { + for (final CheckedFuture f : futures) { f.checkedGet(); } - Optional> optional = txChain.newReadOnlyTransaction() + final Optional> optional = txChain.newReadOnlyTransaction() .read(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); assertEquals("# cars", numCars, ((Collection) optional.get().getValue()).size()); @@ -997,18 +956,19 @@ public class DistributedDataStoreIntegrationTest { public void testCreateChainedTransactionAfterEmptyTxReadied() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testCreateChainedTransactionAfterEmptyTxReadied", "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testCreateChainedTransactionAfterEmptyTxReadied", "test-1")) { - DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); + final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); - DOMStoreReadWriteTransaction rwTx1 = txChain.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx1 = txChain.newReadWriteTransaction(); rwTx1.ready(); - DOMStoreReadWriteTransaction rwTx2 = txChain.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx2 = txChain.newReadWriteTransaction(); - Optional> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS); + final Optional> optional = rwTx2.read(TestModel.TEST_PATH).get( + 5, TimeUnit.SECONDS); assertEquals("isPresent", false, optional.isPresent()); txChain.close(); @@ -1021,12 +981,12 @@ public class DistributedDataStoreIntegrationTest { public void testCreateChainedTransactionWhenPreviousNotReady() throws Exception { new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testCreateChainedTransactionWhenPreviousNotReady", "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testCreateChainedTransactionWhenPreviousNotReady", "test-1")) { final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); - DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); @@ -1034,7 +994,6 @@ public class DistributedDataStoreIntegrationTest { // Try to create another Tx of each type - each should fail // b/c the previous Tx wasn't // readied. - assertExceptionOnTxChainCreates(txChain, IllegalStateException.class); } } @@ -1043,18 +1002,19 @@ public class DistributedDataStoreIntegrationTest { @Test public void testCreateChainedTransactionAfterClose() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testCreateChainedTransactionAfterClose", "test-1")) { - - DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); + try (AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testCreateChainedTransactionAfterClose", "test-1")) { + final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); txChain.close(); // Try to create another Tx of each type - should fail b/c // the previous Tx was closed. - assertExceptionOnTxChainCreates(txChain, TransactionChainClosedException.class); } } @@ -1063,21 +1023,22 @@ public class DistributedDataStoreIntegrationTest { @Test public void testChainWithReadOnlyTxAfterPreviousReady() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testChainWithReadOnlyTxAfterPreviousReady", "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testChainWithReadOnlyTxAfterPreviousReady", "test-1")) { final DOMStoreTransactionChain txChain = dataStore.createTransactionChain(); // Create a write tx and submit. - - DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready(); // Create read-only tx's and issue a read. - CheckedFuture>, ReadFailedException> readFuture1 = txChain .newReadOnlyTransaction().read(TestModel.TEST_PATH); @@ -1085,7 +1046,6 @@ public class DistributedDataStoreIntegrationTest { .newReadOnlyTransaction().read(TestModel.TEST_PATH); // Create another write tx and issue the write. - DOMStoreWriteTransaction writeTx2 = txChain.newWriteOnlyTransaction(); writeTx2.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build()); @@ -1096,7 +1056,6 @@ public class DistributedDataStoreIntegrationTest { assertEquals("isPresent", true, readFuture2.checkedGet(5, TimeUnit.SECONDS).isPresent()); // Ensure the writes succeed. - DOMStoreThreePhaseCommitCohort cohort2 = writeTx2.ready(); doCommit(cohort1); @@ -1111,35 +1070,41 @@ public class DistributedDataStoreIntegrationTest { @Test public void testChainedTransactionFailureWithSingleShard() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testChainedTransactionFailureWithSingleShard", "cars-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testChainedTransactionFailureWithSingleShard", "cars-1")) { - ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( + final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.builder() .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(), MoreExecutors.directExecutor()); - TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); - DOMTransactionChain txChain = broker.createTransactionChain(listener); + final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); + final DOMTransactionChain txChain = broker.createTransactionChain(listener); - DOMDataReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); + final DOMDataReadWriteTransaction writeTx = txChain.newReadWriteTransaction(); + + writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, + PeopleModel.emptyContainer()); - ContainerNode invalidData = ImmutableContainerNodeBuilder.create() + final ContainerNode invalidData = ImmutableContainerNodeBuilder.create() .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME)) .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build(); - rwTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData); + writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData); try { - rwTx.submit().checkedGet(5, TimeUnit.SECONDS); + writeTx.submit().checkedGet(5, TimeUnit.SECONDS); fail("Expected TransactionCommitFailedException"); - } catch (TransactionCommitFailedException e) { + } catch (final TransactionCommitFailedException e) { // Expected } - verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(rwTx), + verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class)); txChain.close(); @@ -1151,37 +1116,40 @@ public class DistributedDataStoreIntegrationTest { @Test public void testChainedTransactionFailureWithMultipleShards() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore( - "testChainedTransactionFailureWithMultipleShards", "cars-1", "people-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testChainedTransactionFailureWithMultipleShards", "cars-1", "people-1")) { - ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( + final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.builder() .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(), MoreExecutors.directExecutor()); - TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); - DOMTransactionChain txChain = broker.createTransactionChain(listener); + final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); + final DOMTransactionChain txChain = broker.createTransactionChain(listener); - DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMDataReadWriteTransaction writeTx = txChain.newReadWriteTransaction(); writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer()); - ContainerNode invalidData = ImmutableContainerNodeBuilder.create() + final ContainerNode invalidData = ImmutableContainerNodeBuilder.create() .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME)) .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build(); + writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData); + // Note that merge will validate the data and fail but put // succeeds b/c deep validation is not // done for put for performance reasons. - writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData); - try { writeTx.submit().checkedGet(5, TimeUnit.SECONDS); fail("Expected TransactionCommitFailedException"); - } catch (TransactionCommitFailedException e) { + } catch (final TransactionCommitFailedException e) { // Expected } @@ -1197,29 +1165,29 @@ public class DistributedDataStoreIntegrationTest { @Test public void testChangeListenerRegistration() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + new IntegrationTestKit(getSystem(), datastoreContextBuilder) { { - try (AbstractDataStore dataStore = setupDistributedDataStore("testChangeListenerRegistration", - "test-1")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, "testChangeListenerRegistration", "test-1")) { testWriteTransaction(dataStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME)); - MockDataChangeListener listener = new MockDataChangeListener(1); + final MockDataChangeListener listener = new MockDataChangeListener(1); - ListenerRegistration listenerReg = dataStore + final ListenerRegistration listenerReg = dataStore .registerChangeListener(TestModel.TEST_PATH, listener, DataChangeScope.SUBTREE); assertNotNull("registerChangeListener returned null", listenerReg); // Wait for the initial notification - listener.waitForChangeEvents(TestModel.TEST_PATH); - listener.reset(2); // Write 2 updates. - testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build()); @@ -1229,9 +1197,7 @@ public class DistributedDataStoreIntegrationTest { ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)); // Wait for the 2 updates. - listener.waitForChangeEvents(TestModel.OUTER_LIST_PATH, listPath); - listenerReg.close(); testWriteTransaction(dataStore, @@ -1251,7 +1217,7 @@ public class DistributedDataStoreIntegrationTest { { final String name = "transactionIntegrationTest"; - ContainerNode carsNode = CarsModel.newCarsNode( + final ContainerNode carsNode = CarsModel.newCarsNode( CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000L)), CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000L)))); @@ -1262,27 +1228,30 @@ public class DistributedDataStoreIntegrationTest { final Snapshot carsSnapshot = Snapshot.create( new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)), - Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); + Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); - NormalizedNode peopleNode = PeopleModel.create(); dataTree = InMemoryDataTreeFactory.getInstance().create(TreeType.OPERATIONAL); dataTree.setSchemaContext(SchemaContextHelper.full()); + + final NormalizedNode peopleNode = PeopleModel.create(); AbstractShardTest.writeToStore(dataTree, PeopleModel.BASE_PATH, peopleNode); + root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.EMPTY); - Snapshot peopleSnapshot = Snapshot.create( + final Snapshot peopleSnapshot = Snapshot.create( new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)), - Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); + Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); restoreFromSnapshot = new DatastoreSnapshot(name, null, Arrays.asList( new DatastoreSnapshot.ShardSnapshot("cars", carsSnapshot), new DatastoreSnapshot.ShardSnapshot("people", peopleSnapshot))); - try (AbstractDataStore dataStore = setupDistributedDataStore(name, "module-shards-member1.conf", - true, "cars", "people")) { + try (final AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, name, "module-shards-member1.conf", true, "cars", "people")) { - DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); + // two reads Optional> optional = readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); assertEquals("Data node", carsNode, optional.get()); @@ -1302,7 +1271,7 @@ public class DistributedDataStoreIntegrationTest { { final String name = "testRecoveryFromPreCarbonSnapshot"; - ContainerNode carsNode = CarsModel.newCarsNode( + final ContainerNode carsNode = CarsModel.newCarsNode( CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000L)), CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000L)))); @@ -1321,13 +1290,13 @@ public class DistributedDataStoreIntegrationTest { } final org.opendaylight.controller.cluster.raft.Snapshot snapshot = - org.opendaylight.controller.cluster.raft.Snapshot.create(bos.toByteArray(), - Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); + org.opendaylight.controller.cluster.raft.Snapshot.create(bos.toByteArray(), + Collections.emptyList(), 2, 1, 2, 1, 1, "member-1", null); InMemorySnapshotStore.addSnapshot("member-1-shard-cars-" + name, snapshot); - try (AbstractDataStore dataStore = setupDistributedDataStore(name, "module-shards-member1.conf", - true, "cars")) { + try (AbstractDataStore dataStore = setupAbstractDataStore( + testParameter, name, "module-shards-member1.conf", true, "cars")) { DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction(); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java index 3b14705613..da8c7cb725 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java @@ -36,16 +36,23 @@ import com.google.common.util.concurrent.Uninterruptibles; import com.typesafe.config.ConfigFactory; import java.math.BigInteger; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import org.junit.After; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; import org.mockito.Mockito; import org.mockito.stubbing.Answer; import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier; +import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore; import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker; import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder; import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException; @@ -102,8 +109,19 @@ import scala.concurrent.duration.FiniteDuration; * * @author Thomas Pantelis */ +@RunWith(Parameterized.class) public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { + @Parameters(name = "{0}") + public static Collection data() { + return Arrays.asList(new Object[][] { + { DistributedDataStore.class }, { ClientBackedDataStore.class } + }); + } + + @Parameter + public Class testParameter; + private static final String[] CARS_AND_PEOPLE = {"cars", "people"}; private static final String[] CARS = {"cars"}; @@ -166,33 +184,36 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { InMemorySnapshotStore.clear(); } - private void initDatastoresWithCars(final String type) { + private void initDatastoresWithCars(final String type) throws Exception { initDatastores(type, MODULE_SHARDS_CARS_ONLY_1_2, CARS); } - private void initDatastoresWithCarsAndPeople(final String type) { + private void initDatastoresWithCarsAndPeople(final String type) throws Exception { initDatastores(type, MODULE_SHARDS_CARS_PEOPLE_1_2, CARS_AND_PEOPLE); } - private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards) { + private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards) + throws Exception { leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder); - leaderDistributedDataStore = leaderTestKit.setupDistributedDataStore(type, moduleShardsConfig, false, shards); + leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore( + testParameter, type, moduleShardsConfig, false, shards); followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder); - followerDistributedDataStore = followerTestKit.setupDistributedDataStore(type, moduleShardsConfig, false, - shards); + followerDistributedDataStore = followerTestKit.setupAbstractDataStore( + testParameter, type, moduleShardsConfig, false, shards); leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), shards); } private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries) throws Exception { - Optional> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS); + final Optional> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); - CollectionNodeBuilder listBuilder = ImmutableNodes.mapNodeBuilder(CarsModel.CAR_QNAME); - for (NormalizedNode entry: entries) { + final CollectionNodeBuilder listBuilder = ImmutableNodes.mapNodeBuilder( + CarsModel.CAR_QNAME); + for (final NormalizedNode entry: entries) { listBuilder.withChild((MapEntryNode) entry); } @@ -201,23 +222,23 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path, final NormalizedNode expNode) throws Exception { - Optional> optional = readTx.read(path).get(5, TimeUnit.SECONDS); + final Optional> optional = readTx.read(path).get(5, TimeUnit.SECONDS); assertEquals("isPresent", true, optional.isPresent()); assertEquals("Data node", expNode, optional.get()); } private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path) throws Exception { - Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS); + final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS); assertEquals("exists", true, exists); } @Test public void testWriteTransactionWithSingleShard() throws Exception { - String testName = "testWriteTransactionWithSingleShard"; + final String testName = "testWriteTransactionWithSingleShard"; initDatastoresWithCars(testName); - String followerCarShardName = "member-2-shard-cars-" + testName; + final String followerCarShardName = "member-2-shard-cars-" + testName; InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 2, ApplyJournalEntries.class); DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction(); @@ -226,12 +247,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()); - MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima"); + final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima"); writeTx.merge(car1Path, car1); - MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); - YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage"); + final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); + final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage"); writeTx.merge(car2Path, car2); followerTestKit.doCommit(writeTx.ready()); @@ -261,10 +282,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { JavaTestKit.shutdownActorSystem(leaderSystem, null, true); JavaTestKit.shutdownActorSystem(followerSystem, null, true); - ActorSystem newSystem = ActorSystem.create("reinstated-member2", ConfigFactory.load().getConfig("Member2")); + final ActorSystem newSystem = ActorSystem.create("reinstated-member2", ConfigFactory.load() + .getConfig("Member2")); - try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder) - .setupDistributedDataStore(testName, "module-shards-member2", true, CARS_AND_PEOPLE)) { + try (final AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder) + .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS_AND_PEOPLE)) { verifyCars(member2Datastore.newReadOnlyTransaction(), car2); } @@ -275,19 +297,19 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testReadWriteTransactionWithSingleShard() throws Exception { initDatastoresWithCars("testReadWriteTransactionWithSingleShard"); - DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", rwTx); rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()); - MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); rwTx.merge(CarsModel.newCarPath("optima"), car1); verifyCars(rwTx, car1); - MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); - YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage"); + final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); + final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage"); rwTx.merge(car2Path, car2); verifyExists(rwTx, car2Path); @@ -301,20 +323,20 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testWriteTransactionWithMultipleShards() throws Exception { initDatastoresWithCarsAndPeople("testWriteTransactionWithMultipleShards"); - DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); - YangInstanceIdentifier carsPath = CarsModel.BASE_PATH; - NormalizedNode carsNode = CarsModel.emptyContainer(); + final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH; + final NormalizedNode carsNode = CarsModel.emptyContainer(); writeTx.write(carsPath, carsNode); - YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH; - NormalizedNode peopleNode = PeopleModel.emptyContainer(); + final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH; + final NormalizedNode peopleNode = PeopleModel.emptyContainer(); writeTx.write(peoplePath, peopleNode); followerTestKit.doCommit(writeTx.ready()); - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); verifyNode(readTx, carsPath, carsNode); verifyNode(readTx, peoplePath, peopleNode); @@ -324,20 +346,20 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testReadWriteTransactionWithMultipleShards() throws Exception { initDatastoresWithCarsAndPeople("testReadWriteTransactionWithMultipleShards"); - DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); assertNotNull("newReadWriteTransaction returned null", rwTx); - YangInstanceIdentifier carsPath = CarsModel.BASE_PATH; - NormalizedNode carsNode = CarsModel.emptyContainer(); + final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH; + final NormalizedNode carsNode = CarsModel.emptyContainer(); rwTx.write(carsPath, carsNode); - YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH; - NormalizedNode peopleNode = PeopleModel.emptyContainer(); + final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH; + final NormalizedNode peopleNode = PeopleModel.emptyContainer(); rwTx.write(peoplePath, peopleNode); followerTestKit.doCommit(rwTx.ready()); - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); verifyNode(readTx, carsPath, carsNode); verifyNode(readTx, peoplePath, peopleNode); @@ -347,16 +369,16 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testTransactionChainWithSingleShard() throws Exception { initDatastoresWithCars("testTransactionChainWithSingleShard"); - DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain(); + final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain(); // Add the top-level cars container with write-only. - DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); - writeTx.ready(); + final DOMStoreThreePhaseCommitCohort writeTxReady = writeTx.ready(); // Verify the top-level cars container with read-only. @@ -364,25 +386,27 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Perform car operations with read-write. - DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction(); verifyNode(rwTx, CarsModel.BASE_PATH, CarsModel.emptyContainer()); rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()); - MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima"); + final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima"); rwTx.write(car1Path, car1); verifyExists(rwTx, car1Path); verifyCars(rwTx, car1); - MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); + final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000)); rwTx.merge(CarsModel.newCarPath("sportage"), car2); rwTx.delete(car1Path); + followerTestKit.doCommit(writeTxReady); + followerTestKit.doCommit(rwTx.ready()); txChain.close(); @@ -394,7 +418,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testTransactionChainWithMultipleShards() throws Exception { initDatastoresWithCarsAndPeople("testTransactionChainWithMultipleShards"); - DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain(); + final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain(); DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); assertNotNull("newWriteOnlyTransaction returned null", writeTx); @@ -407,14 +431,14 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { followerTestKit.doCommit(writeTx.ready()); - DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction(); - MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); - YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); + final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima"); readWriteTx.write(carPath, car); - MapEntryNode person = PeopleModel.newPersonEntry("jack"); - YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); + final MapEntryNode person = PeopleModel.newPersonEntry("jack"); + final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack"); readWriteTx.merge(personPath, person); Optional> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS); @@ -425,20 +449,20 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { assertEquals("isPresent", true, optional.isPresent()); assertEquals("Data node", person, optional.get()); - DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready(); writeTx = txChain.newWriteOnlyTransaction(); writeTx.delete(personPath); - DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready(); + final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready(); followerTestKit.doCommit(cohort2); followerTestKit.doCommit(cohort3); txChain.close(); - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); verifyCars(readTx, car); optional = readTx.read(personPath).get(5, TimeUnit.SECONDS); @@ -449,17 +473,17 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testChainedTransactionFailureWithSingleShard() throws Exception { initDatastoresWithCars("testChainedTransactionFailureWithSingleShard"); - ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( + final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.builder().put( LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(), MoreExecutors.directExecutor()); - TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); - DOMTransactionChain txChain = broker.createTransactionChain(listener); + final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); + final DOMTransactionChain txChain = broker.createTransactionChain(listener); - DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); - ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier( + final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier( new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME)) .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build(); @@ -468,7 +492,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { try { writeTx.submit().checkedGet(5, TimeUnit.SECONDS); fail("Expected TransactionCommitFailedException"); - } catch (TransactionCommitFailedException e) { + } catch (final TransactionCommitFailedException e) { // Expected } @@ -482,19 +506,19 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { public void testChainedTransactionFailureWithMultipleShards() throws Exception { initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards"); - ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( + final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker( ImmutableMap.builder().put( LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(), MoreExecutors.directExecutor()); - TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); - DOMTransactionChain txChain = broker.createTransactionChain(listener); + final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class); + final DOMTransactionChain txChain = broker.createTransactionChain(listener); - DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); + final DOMDataWriteTransaction writeTx = txChain.newWriteOnlyTransaction(); writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer()); - ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier( + final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier( new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME)) .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build(); @@ -505,7 +529,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { try { writeTx.submit().checkedGet(5, TimeUnit.SECONDS); fail("Expected TransactionCommitFailedException"); - } catch (TransactionCommitFailedException e) { + } catch (final TransactionCommitFailedException e) { // Expected } @@ -517,10 +541,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test public void testSingleShardTransactionsWithLeaderChanges() throws Exception { - String testName = "testSingleShardTransactionsWithLeaderChanges"; + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); + final String testName = "testSingleShardTransactionsWithLeaderChanges"; initDatastoresWithCars(testName); - String followerCarShardName = "member-2-shard-cars-" + testName; + final String followerCarShardName = "member-2-shard-cars-" + testName; InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 1, ApplyJournalEntries.class); // Write top-level car container from the follower so it uses a remote Tx. @@ -547,12 +573,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1")); Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS); - DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder() + final DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder() .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5); IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder); - try (AbstractDataStore ds = - newMember1TestKit.setupDistributedDataStore(testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) { + try (final AbstractDataStore ds = + newMember1TestKit.setupAbstractDataStore( + testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) { followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), CARS); @@ -576,10 +603,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader"); followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars"); - Optional carsFollowerShard = followerDistributedDataStore.getActorContext().findLocalShard("cars"); + final Optional carsFollowerShard = followerDistributedDataStore.getActorContext() + .findLocalShard("cars"); assertEquals("Cars follower shard found", true, carsFollowerShard.isPresent()); - TipProducingDataTree dataTree = InMemoryDataTreeFactory.getInstance().create(TreeType.OPERATIONAL); + final TipProducingDataTree dataTree = InMemoryDataTreeFactory.getInstance().create(TreeType.OPERATIONAL); dataTree.setSchemaContext(SchemaContextHelper.full()); // Send a tx with immediate commit. @@ -588,7 +616,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification); new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification); - MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification); modification.ready(); @@ -621,10 +649,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { assertEquals("Response type", ReadyTransactionReply.class, resp.getClass()); - ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection( + final ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection( ((ReadyTransactionReply)resp).getCohortPath()); - Supplier versionSupplier = Mockito.mock(Supplier.class); + final Supplier versionSupplier = Mockito.mock(Supplier.class); Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get(); ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy( leaderDistributedDataStore.getActorContext(), Arrays.asList( @@ -642,11 +670,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader"); followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars"); - Optional carsFollowerShard = followerDistributedDataStore.getActorContext().findLocalShard("cars"); + final Optional carsFollowerShard = followerDistributedDataStore.getActorContext() + .findLocalShard("cars"); assertEquals("Cars follower shard found", true, carsFollowerShard.isPresent()); carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef()); - DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class); + final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class); // Send a tx with immediate commit. @@ -654,7 +683,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification); new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification); - MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification); ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1, @@ -692,9 +721,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection( ((ReadyTransactionReply)resp).getCohortPath()); - Supplier versionSupplier = Mockito.mock(Supplier.class); + final Supplier versionSupplier = Mockito.mock(Supplier.class); Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get(); - ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy( + final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy( leaderDistributedDataStore.getActorContext(), Arrays.asList( new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2); cohort.canCommit().get(5, TimeUnit.SECONDS); @@ -706,13 +735,15 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test public void testTransactionForwardedToLeaderAfterRetry() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); followerDatastoreContextBuilder.shardBatchedModificationCount(2); leaderDatastoreContextBuilder.shardBatchedModificationCount(2); initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry"); // Do an initial write to get the primary shard info cached. - DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction(); initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); initialWriteTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer()); followerTestKit.doCommit(initialWriteTx.ready()); @@ -728,18 +759,18 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Prepare, ready and canCommit a WO tx that writes to 2 shards. This will become the current tx in // the leader shard. - DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction(); writeTx1.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()); writeTx1.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer()); - DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready(); - ListenableFuture writeTx1CanCommit = writeTx1Cohort.canCommit(); + final DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready(); + final ListenableFuture writeTx1CanCommit = writeTx1Cohort.canCommit(); writeTx1CanCommit.get(5, TimeUnit.SECONDS); // Prepare and ready another WO tx that writes to 2 shards but don't canCommit yet. This will be queued // in the leader shard. - DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction(); - LinkedList cars = new LinkedList<>(); + final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction(); + final LinkedList cars = new LinkedList<>(); int carIndex = 1; cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex))); writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast()); @@ -753,7 +784,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be // sent on ready. - DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction(); for (int i = 1; i <= 5; i++, carIndex++) { cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex))); writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast()); @@ -762,7 +793,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications // message on ready. - DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction(); cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex))); writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast()); carIndex++; @@ -770,7 +801,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the // leader shard on ready. - DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction(); cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex))); readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast()); @@ -788,7 +819,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Submit all tx's - the messages should get queued for retry. - ListenableFuture writeTx2CanCommit = writeTx2Cohort.canCommit(); + final ListenableFuture writeTx2CanCommit = writeTx2Cohort.canCommit(); final DOMStoreThreePhaseCommitCohort writeTx3Cohort = writeTx3.ready(); final DOMStoreThreePhaseCommitCohort writeTx4Cohort = writeTx4.ready(); final DOMStoreThreePhaseCommitCohort rwTxCohort = readWriteTx.ready(); @@ -812,15 +843,17 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test public void testLeadershipTransferOnShutdown() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); leaderDatastoreContextBuilder.shardBatchedModificationCount(1); followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null); - String testName = "testLeadershipTransferOnShutdown"; + final String testName = "testLeadershipTransferOnShutdown"; initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE); - IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, + final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(100)); - try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupDistributedDataStore(testName, - MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) { + try (final AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore( + testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) { // Create and submit a couple tx's so they're pending. @@ -834,7 +867,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize())); writeTx = followerDistributedDataStore.newWriteOnlyTransaction(); - MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); + final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)); writeTx.write(CarsModel.newCarPath("optima"), car); final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready(); @@ -846,11 +879,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder .shardElectionTimeoutFactor(100)); - FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS); - Future future = leaderDistributedDataStore.getActorContext().findLocalShardAsync("cars"); - ActorRef leaderActor = Await.result(future, duration); + final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS); + final Future future = leaderDistributedDataStore.getActorContext().findLocalShardAsync("cars"); + final ActorRef leaderActor = Await.result(future, duration); - Future stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE); + final Future stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE); // Commit the 2 transactions. They should finish and succeed. @@ -859,7 +892,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { // Wait for the leader actor stopped. - Boolean stopped = Await.result(stopFuture, duration); + final Boolean stopped = Await.result(stopFuture, duration); assertEquals("Stopped", Boolean.TRUE, stopped); // Verify leadership was transferred by reading the committed data from the other nodes. @@ -871,21 +904,23 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test public void testTransactionWithIsolatedLeader() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); // Set the isolated leader check interval high so we can control the switch to IsolatedLeader. leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000); - String testName = "testTransactionWithIsolatedLeader"; + final String testName = "testTransactionWithIsolatedLeader"; initDatastoresWithCars(testName); // Tx that is submitted after the follower is stopped but before the leader transitions to IsolatedLeader. - DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); preIsolatedLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); // Tx that is submitted after the leader transitions to IsolatedLeader. - DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); noShardLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); // Tx that is submitted after the follower is reinstated. - DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); + final DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction(); successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer()); // Stop the follower @@ -906,17 +941,17 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { try { leaderTestKit.doCommit(noShardLeaderWriteTx.ready()); fail("Expected NoShardLeaderException"); - } catch (ExecutionException e) { - assertEquals("getCause", NoShardLeaderException.class, e.getCause().getClass()); + } catch (final ExecutionException e) { + assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass()); } sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder .shardElectionTimeoutFactor(100)); - DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready(); + final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready(); - followerDistributedDataStore = followerTestKit.setupDistributedDataStore(testName, - MODULE_SHARDS_CARS_ONLY_1_2, false, CARS); + followerDistributedDataStore = followerTestKit.setupAbstractDataStore( + testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS); leaderTestKit.doCommit(preIsolatedLeaderTxCohort); leaderTestKit.doCommit(successTxCohort); @@ -924,12 +959,14 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test(expected = AskTimeoutException.class) public void testTransactionWithShardLeaderNotResponding() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); followerDatastoreContextBuilder.shardElectionTimeoutFactor(50); initDatastoresWithCars("testTransactionWithShardLeaderNotResponding"); // Do an initial read to get the primary shard info cached. - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); readTx.read(CarsModel.BASE_PATH).checkedGet(5, TimeUnit.SECONDS); // Shutdown the leader and try to create a new tx. @@ -939,13 +976,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { followerDatastoreContextBuilder.operationTimeoutInMillis(50).shardElectionTimeoutFactor(1); sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder); - DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); try { followerTestKit.doCommit(rwTx.ready()); - } catch (ExecutionException e) { + } catch (final ExecutionException e) { assertTrue("Expected ShardLeaderNotRespondingException cause. Actual: " + e.getCause(), e.getCause() instanceof ShardLeaderNotRespondingException); assertNotNull("Expected a nested cause", e.getCause().getCause()); @@ -956,11 +993,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test(expected = NoShardLeaderException.class) public void testTransactionWithCreateTxFailureDueToNoLeader() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); initDatastoresWithCars("testTransactionWithCreateTxFailureDueToNoLeader"); // Do an initial read to get the primary shard info cached. - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); readTx.read(CarsModel.BASE_PATH).checkedGet(5, TimeUnit.SECONDS); // Shutdown the leader and try to create a new tx. @@ -974,13 +1013,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder .operationTimeoutInMillis(10).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null)); - DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); try { followerTestKit.doCommit(rwTx.ready()); - } catch (ExecutionException e) { + } catch (final ExecutionException e) { Throwables.propagateIfInstanceOf(e.getCause(), Exception.class); Throwables.propagate(e.getCause()); } @@ -988,22 +1027,26 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { @Test public void testTransactionRetryWithInitialAskTimeoutExOnCreateTx() throws Exception { + //TODO remove when test passes also for ClientBackedDataStore + Assume.assumeTrue(testParameter.equals(DistributedDataStore.class)); String testName = "testTransactionRetryWithInitialAskTimeoutExOnCreateTx"; initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS); - DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder() + final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder() .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5); - IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System, follower2DatastoreContextBuilder); + final IntegrationTestKit follower2TestKit = new IntegrationTestKit( + follower2System, follower2DatastoreContextBuilder); - try (AbstractDataStore ds = - follower2TestKit.setupDistributedDataStore(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false, CARS)) { + try (final AbstractDataStore ds = + follower2TestKit.setupAbstractDataStore( + testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false, CARS)) { followerTestKit.waitForMembersUp("member-1", "member-3"); follower2TestKit.waitForMembersUp("member-1", "member-2"); // Do an initial read to get the primary shard info cached. - DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); + final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction(); readTx.read(CarsModel.BASE_PATH).checkedGet(5, TimeUnit.SECONDS); // Shutdown the leader and try to create a new tx. @@ -1015,7 +1058,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder .operationTimeoutInMillis(500).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null)); - DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); + final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction(); rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer()); @@ -1035,12 +1078,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { TipProducingDataTree tree = InMemoryDataTreeFactory.getInstance().create(TreeType.CONFIGURATION); tree.setSchemaContext(SchemaContextHelper.full()); - ContainerNode carsNode = CarsModel.newCarsNode( + final ContainerNode carsNode = CarsModel.newCarsNode( CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000)))); AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode); - NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.EMPTY); - Snapshot initialSnapshot = Snapshot.create( + final NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.EMPTY); + final Snapshot initialSnapshot = Snapshot.create( new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)), Collections.emptyList(), 5, 1, 5, 1, 1, null, null); InMemorySnapshotStore.addSnapshot(leaderCarShardName, initialSnapshot); @@ -1050,7 +1093,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { initDatastoresWithCars(testName); - Optional> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read( + final Optional> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read( CarsModel.BASE_PATH).checkedGet(5, TimeUnit.SECONDS); assertEquals("isPresent", true, readOptional.isPresent()); assertEquals("Node", carsNode, readOptional.get()); @@ -1062,7 +1105,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { initialSnapshot, snapshotRoot); } - private static void verifySnapshot(Snapshot actual, Snapshot expected, NormalizedNode expRoot) { + private static void verifySnapshot(final Snapshot actual, final Snapshot expected, + final NormalizedNode expRoot) { assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm()); assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex()); assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm()); @@ -1075,8 +1119,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest { private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) { final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build()); - DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class); - Answer answer = invocation -> newBuilder.build(); + final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class); + final Answer answer = invocation -> newBuilder.build(); Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext(); Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString()); dataStore.onDatastoreContextUpdated(mockContextFactory); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java index 86c875993a..9570e0e4aa 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java @@ -22,6 +22,7 @@ import com.google.common.base.Stopwatch; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.Uninterruptibles; +import java.lang.reflect.Constructor; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -67,43 +68,73 @@ public class IntegrationTestKit extends ShardTestKit { return datastoreContextBuilder; } - public AbstractDataStore setupDistributedDataStore(final String typeName, final String... shardNames) { - return setupDistributedDataStore(typeName, "module-shards.conf", true, SchemaContextHelper.full(), shardNames); + public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig, + final boolean waitUntilLeader, + final SchemaContext schemaContext) throws Exception { + return setupDistributedDataStore(typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, schemaContext); } - public AbstractDataStore setupDistributedDataStore(final String typeName, final boolean waitUntilLeader, - final String... shardNames) { - return setupDistributedDataStore(typeName, "module-shards.conf", waitUntilLeader, + public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig, + final String modulesConfig, + final boolean waitUntilLeader, + final SchemaContext schemaContext, + final String... shardNames) throws Exception { + return (DistributedDataStore) setupAbstractDataStore(DistributedDataStore.class, typeName, moduleShardsConfig, + modulesConfig, waitUntilLeader, schemaContext, shardNames); + } + + public AbstractDataStore setupAbstractDataStore(final Class implementation, + final String typeName, final String... shardNames) + throws Exception { + return setupAbstractDataStore(implementation, typeName, "module-shards.conf", true, + SchemaContextHelper.full(), shardNames); + } + + public AbstractDataStore setupAbstractDataStore(final Class implementation, + final String typeName, final boolean waitUntilLeader, + final String... shardNames) throws Exception { + return setupAbstractDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader, SchemaContextHelper.full(), shardNames); } - public AbstractDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig, - final boolean waitUntilLeader, final String... shardNames) { - return setupDistributedDataStore(typeName, moduleShardsConfig, waitUntilLeader, + public AbstractDataStore setupAbstractDataStore(final Class implementation, + final String typeName, final String moduleShardsConfig, + final boolean waitUntilLeader, final String... shardNames) + throws Exception { + return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader, SchemaContextHelper.full(), shardNames); } - public AbstractDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig, - final boolean waitUntilLeader, final SchemaContext schemaContext, final String... shardNames) { - return setupDistributedDataStore(typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, + public AbstractDataStore setupAbstractDataStore(final Class implementation, + final String typeName, final String moduleShardsConfig, + final boolean waitUntilLeader, + final SchemaContext schemaContext, + final String... shardNames) throws Exception { + return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, schemaContext, shardNames); } - public AbstractDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig, - final String modulesConfig, final boolean waitUntilLeader, - final SchemaContext schemaContext, final String... shardNames) { + private AbstractDataStore setupAbstractDataStore(final Class implementation, + final String typeName, final String moduleShardsConfig, + final String modulesConfig, final boolean waitUntilLeader, + final SchemaContext schemaContext, final String... shardNames) + throws Exception { final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem()); final Configuration config = new ConfigurationImpl(moduleShardsConfig, modulesConfig); datastoreContextBuilder.dataStoreName(typeName); - DatastoreContext datastoreContext = datastoreContextBuilder.build(); - DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class); + final DatastoreContext datastoreContext = datastoreContextBuilder.build(); + final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class); Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext(); Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString()); - AbstractDataStore dataStore = new DistributedDataStore(getSystem(), cluster, config, mockContextFactory, - restoreFromSnapshot); + final Constructor constructor = implementation.getDeclaredConstructor( + ActorSystem.class, ClusterWrapper.class, Configuration.class, + DatastoreContextFactory.class, DatastoreSnapshot.class); + + final AbstractDataStore dataStore = (AbstractDataStore) constructor.newInstance( + getSystem(), cluster, config, mockContextFactory, restoreFromSnapshot); dataStore.onGlobalContextUpdated(schemaContext); @@ -210,7 +241,7 @@ public class IntegrationTestKit extends ShardTestKit { return shard; } - public static void waitUntilShardIsDown(ActorContext actorContext, String shardName) { + public static void waitUntilShardIsDown(final ActorContext actorContext, final String shardName) { for (int i = 0; i < 20 * 5 ; i++) { LOG.debug("Waiting for shard down {}", shardName); Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java index e6ea971247..fbfa91133f 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java @@ -277,7 +277,7 @@ public class MemberNode { return this; } - public MemberNode build() { + public MemberNode build() throws Exception { Preconditions.checkNotNull(moduleShardsConfig, "moduleShardsConfig must be specified"); Preconditions.checkNotNull(akkaConfig, "akkaConfig must be specified"); Preconditions.checkNotNull(testName, "testName must be specified"); @@ -306,13 +306,13 @@ public class MemberNode { String memberName = new ClusterWrapperImpl(system).getCurrentMemberName().getName(); node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-config-" + memberName); - node.configDataStore = node.kit.setupDistributedDataStore("config_" + testName, moduleShardsConfig, - true, schemaContext, waitForshardLeader); + node.configDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class, + "config_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader); if (createOperDatastore) { node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-oper-" + memberName); - node.operDataStore = node.kit.setupDistributedDataStore("oper_" + testName, moduleShardsConfig, - true, schemaContext, waitForshardLeader); + node.operDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class, + "oper_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader); } members.add(node); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java index d15ea39beb..7240b3eebb 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java @@ -57,7 +57,6 @@ import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolic import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal; import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore; import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper; -import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException; import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState; import org.opendaylight.mdsal.eos.dom.api.DOMEntity; import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration; @@ -452,7 +451,7 @@ public class DistributedEntityOwnershipIntegrationTest { * Reproduces bug 4554. */ @Test - public void testCloseCandidateRegistrationInQuickSuccession() throws CandidateAlreadyRegisteredException { + public void testCloseCandidateRegistrationInQuickSuccession() throws Exception { String name = "testCloseCandidateRegistrationInQuickSuccession"; MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name) .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false) diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java index df870f584c..522a7d39f9 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java @@ -136,13 +136,13 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest { InMemorySnapshotStore.clear(); } - private void initEmptyDatastores() { + private void initEmptyDatastores() throws Exception { leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder); - leaderConfigDatastore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore( + leaderConfigDatastore = leaderTestKit.setupDistributedDataStore( "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext()); - leaderOperDatastore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore( + leaderOperDatastore = leaderTestKit.setupDistributedDataStore( "operational", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext()); @@ -152,9 +152,9 @@ public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest { followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder); - followerConfigDatastore = (DistributedDataStore) followerTestKit.setupDistributedDataStore( + followerConfigDatastore = followerTestKit.setupDistributedDataStore( "config", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext()); - followerOperDatastore = (DistributedDataStore) followerTestKit.setupDistributedDataStore( + followerOperDatastore = followerTestKit.setupDistributedDataStore( "operational", MODULE_SHARDS_CONFIG, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext()); diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java index 33a8e59935..41329641d1 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java @@ -154,14 +154,14 @@ public class DistributedShardedDOMDataTreeTest extends AbstractTest { InMemorySnapshotStore.clear(); } - private void initEmptyDatastores() { + private void initEmptyDatastores() throws Exception { leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder); - leaderDistributedDataStore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore( + leaderDistributedDataStore = leaderTestKit.setupDistributedDataStore( "config", MODULE_SHARDS_CONFIG, "empty-modules.conf", true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext()); - operDistributedDatastore = (DistributedDataStore) leaderTestKit.setupDistributedDataStore( + operDistributedDatastore = leaderTestKit.setupDistributedDataStore( "operational", MODULE_SHARDS_CONFIG, "empty-modules.conf",true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());