1 package org.opendaylight.controller.cluster.datastore;
3 import static org.junit.Assert.assertEquals;
4 import static org.junit.Assert.assertNotNull;
5 import akka.actor.ActorRef;
6 import akka.actor.ActorSystem;
7 import akka.actor.PoisonPill;
8 import com.google.common.base.Optional;
9 import com.google.common.util.concurrent.CheckedFuture;
10 import com.google.common.util.concurrent.Uninterruptibles;
11 import java.util.concurrent.CountDownLatch;
12 import java.util.concurrent.ExecutionException;
13 import java.util.concurrent.TimeUnit;
14 import java.util.concurrent.atomic.AtomicReference;
15 import org.junit.Test;
16 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
17 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
18 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
19 import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
20 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
21 import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
22 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
23 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
24 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
25 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
26 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
27 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
28 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
29 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
30 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
31 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
32 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
33 import org.opendaylight.yangtools.concepts.ListenerRegistration;
34 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
35 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
36 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
37 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
38 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
40 public class DistributedDataStoreIntegrationTest extends AbstractActorTest {
42 private final DatastoreContext.Builder datastoreContextBuilder =
43 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100);
46 public void testWriteTransactionWithSingleShard() throws Exception{
47 new IntegrationTestKit(getSystem()) {{
48 DistributedDataStore dataStore =
49 setupDistributedDataStore("transactionIntegrationTest", "test-1");
51 testWriteTransaction(dataStore, TestModel.TEST_PATH,
52 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
54 testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
55 ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
62 public void testWriteTransactionWithMultipleShards() throws Exception{
63 new IntegrationTestKit(getSystem()) {{
64 DistributedDataStore dataStore =
65 setupDistributedDataStore("testWriteTransactionWithMultipleShards", "cars-1", "people-1");
67 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
68 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
70 YangInstanceIdentifier nodePath1 = CarsModel.BASE_PATH;
71 NormalizedNode<?, ?> nodeToWrite1 = CarsModel.emptyContainer();
72 writeTx.write(nodePath1, nodeToWrite1);
74 YangInstanceIdentifier nodePath2 = PeopleModel.BASE_PATH;
75 NormalizedNode<?, ?> nodeToWrite2 = PeopleModel.emptyContainer();
76 writeTx.write(nodePath2, nodeToWrite2);
78 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
82 // Verify the data in the store
84 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
86 Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath1).get();
87 assertEquals("isPresent", true, optional.isPresent());
88 assertEquals("Data node", nodeToWrite1, optional.get());
90 optional = readTx.read(nodePath2).get();
91 assertEquals("isPresent", true, optional.isPresent());
92 assertEquals("Data node", nodeToWrite2, optional.get());
99 public void testReadWriteTransaction() throws Exception{
100 System.setProperty("shard.persistent", "true");
101 new IntegrationTestKit(getSystem()) {{
102 DistributedDataStore dataStore =
103 setupDistributedDataStore("testReadWriteTransaction", "test-1");
105 // 1. Create a read-write Tx
107 DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
108 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
110 // 2. Write some data
112 YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
113 NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
114 readWriteTx.write(nodePath, nodeToWrite );
116 // 3. Read the data from Tx
118 Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS);
119 assertEquals("exists", true, exists);
121 Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
122 assertEquals("isPresent", true, optional.isPresent());
123 assertEquals("Data node", nodeToWrite, optional.get());
125 // 4. Ready the Tx for commit
127 DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
133 // 6. Verify the data in the store
135 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
137 optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
138 assertEquals("isPresent", true, optional.isPresent());
139 assertEquals("Data node", nodeToWrite, optional.get());
146 public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
147 new IntegrationTestKit(getSystem()) {{
148 String testName = "testTransactionWritesWithShardNotInitiallyReady";
149 String shardName = "test-1";
151 // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
152 // initialized until we create and submit the write the Tx.
153 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
154 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
155 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
157 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
159 // Create the write Tx
161 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
162 assertNotNull("newReadWriteTransaction returned null", writeTx);
164 // Do some modification operations and ready the Tx on a separate thread.
166 final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier.builder(
167 TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME,
168 TestModel.ID_QNAME, 1).build();
170 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
171 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
172 final CountDownLatch txReady = new CountDownLatch(1);
173 Thread txThread = new Thread() {
177 writeTx.write(TestModel.TEST_PATH,
178 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
180 writeTx.merge(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(
181 TestModel.OUTER_LIST_QNAME).build());
183 writeTx.write(listEntryPath, ImmutableNodes.mapEntry(
184 TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
186 writeTx.delete(listEntryPath);
188 txCohort.set(writeTx.ready());
189 } catch(Exception e) {
200 // Wait for the Tx operations to complete.
202 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
203 if(caughtEx.get() != null) {
204 throw caughtEx.get();
207 assertEquals("Tx ready", true, done);
209 // At this point the Tx operations should be waiting for the shard to initialize so
210 // trigger the latch to let the shard recovery to continue.
212 blockRecoveryLatch.countDown();
214 // Wait for the Tx commit to complete.
216 doCommit(txCohort.get());
218 // Verify the data in the store
220 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
222 Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).
223 get(5, TimeUnit.SECONDS);
224 assertEquals("isPresent", true, optional.isPresent());
226 optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
227 assertEquals("isPresent", true, optional.isPresent());
229 optional = readTx.read(listEntryPath).get(5, TimeUnit.SECONDS);
230 assertEquals("isPresent", false, optional.isPresent());
237 public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
238 new IntegrationTestKit(getSystem()) {{
239 String testName = "testTransactionReadsWithShardNotInitiallyReady";
240 String shardName = "test-1";
242 // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
243 // initialized until we create the Tx.
244 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
245 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
246 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
248 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
250 // Create the read-write Tx
252 final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
253 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
255 // Do some reads on the Tx on a separate thread.
257 final AtomicReference<CheckedFuture<Boolean, ReadFailedException>> txExistsFuture =
258 new AtomicReference<>();
259 final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
260 txReadFuture = new AtomicReference<>();
261 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
262 final CountDownLatch txReadsDone = new CountDownLatch(1);
263 Thread txThread = new Thread() {
267 readWriteTx.write(TestModel.TEST_PATH,
268 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
270 txExistsFuture.set(readWriteTx.exists(TestModel.TEST_PATH));
272 txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
273 } catch(Exception e) {
277 txReadsDone.countDown();
284 // Wait for the Tx operations to complete.
286 boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS);
287 if(caughtEx.get() != null) {
288 throw caughtEx.get();
291 assertEquals("Tx reads done", true, done);
293 // At this point the Tx operations should be waiting for the shard to initialize so
294 // trigger the latch to let the shard recovery to continue.
296 blockRecoveryLatch.countDown();
298 // Wait for the reads to complete and verify.
300 assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS));
301 assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent());
309 @Test(expected=NotInitializedException.class)
310 public void testTransactionCommitFailureWithShardNotInitialized() throws Throwable{
311 new IntegrationTestKit(getSystem()) {{
312 String testName = "testTransactionCommitFailureWithShardNotInitialized";
313 String shardName = "test-1";
315 // Set the shard initialization timeout low for the test.
317 datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
319 // Setup the InMemoryJournal to block shard recovery indefinitely.
321 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
322 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
323 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
325 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
327 // Create the write Tx
329 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
330 assertNotNull("newReadWriteTransaction returned null", writeTx);
332 // Do some modifications and ready the Tx on a separate thread.
334 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
335 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
336 final CountDownLatch txReady = new CountDownLatch(1);
337 Thread txThread = new Thread() {
341 writeTx.write(TestModel.TEST_PATH,
342 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
344 txCohort.set(writeTx.ready());
345 } catch(Exception e) {
356 // Wait for the Tx operations to complete.
358 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
359 if(caughtEx.get() != null) {
360 throw caughtEx.get();
363 assertEquals("Tx ready", true, done);
365 // Wait for the commit to complete. Since the shard never initialized, the Tx should
366 // have timed out and throw an appropriate exception cause.
369 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
370 } catch(ExecutionException e) {
373 blockRecoveryLatch.countDown();
379 @Test(expected=NotInitializedException.class)
380 public void testTransactionReadFailureWithShardNotInitialized() throws Throwable{
381 new IntegrationTestKit(getSystem()) {{
382 String testName = "testTransactionReadFailureWithShardNotInitialized";
383 String shardName = "test-1";
385 // Set the shard initialization timeout low for the test.
387 datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
389 // Setup the InMemoryJournal to block shard recovery indefinitely.
391 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
392 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
393 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
395 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
397 // Create the read-write Tx
399 final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
400 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
402 // Do a read on the Tx on a separate thread.
404 final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
405 txReadFuture = new AtomicReference<>();
406 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
407 final CountDownLatch txReadDone = new CountDownLatch(1);
408 Thread txThread = new Thread() {
412 readWriteTx.write(TestModel.TEST_PATH,
413 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
415 txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
418 } catch(Exception e) {
422 txReadDone.countDown();
429 // Wait for the Tx operations to complete.
431 boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS);
432 if(caughtEx.get() != null) {
433 throw caughtEx.get();
436 assertEquals("Tx read done", true, done);
438 // Wait for the read to complete. Since the shard never initialized, the Tx should
439 // have timed out and throw an appropriate exception cause.
442 txReadFuture.get().checkedGet(5, TimeUnit.SECONDS);
443 } catch(ReadFailedException e) {
446 blockRecoveryLatch.countDown();
452 @Test(expected=NoShardLeaderException.class)
453 public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
454 new IntegrationTestKit(getSystem()) {{
455 String testName = "testTransactionCommitFailureWithNoShardLeader";
456 String shardName = "test-1";
458 // We don't want the shard to become the leader so prevent shard election from completing
459 // by setting the election timeout, which is based on the heartbeat interval, really high.
461 datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
463 // Set the leader election timeout low for the test.
465 datastoreContextBuilder.shardLeaderElectionTimeout(1, TimeUnit.MILLISECONDS);
467 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
469 // Create the write Tx.
471 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
472 assertNotNull("newReadWriteTransaction returned null", writeTx);
474 // Do some modifications and ready the Tx on a separate thread.
476 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
477 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
478 final CountDownLatch txReady = new CountDownLatch(1);
479 Thread txThread = new Thread() {
483 writeTx.write(TestModel.TEST_PATH,
484 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
486 txCohort.set(writeTx.ready());
487 } catch(Exception e) {
498 // Wait for the Tx operations to complete.
500 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
501 if(caughtEx.get() != null) {
502 throw caughtEx.get();
505 assertEquals("Tx ready", true, done);
507 // Wait for the commit to complete. Since no shard leader was elected in time, the Tx
508 // should have timed out and throw an appropriate exception cause.
511 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
512 } catch(ExecutionException e) {
521 public void testTransactionAbort() throws Exception{
522 System.setProperty("shard.persistent", "true");
523 new IntegrationTestKit(getSystem()) {{
524 DistributedDataStore dataStore =
525 setupDistributedDataStore("transactionAbortIntegrationTest", "test-1");
527 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
528 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
530 writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
532 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
534 cohort.canCommit().get(5, TimeUnit.SECONDS);
536 cohort.abort().get(5, TimeUnit.SECONDS);
538 testWriteTransaction(dataStore, TestModel.TEST_PATH,
539 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
546 public void testTransactionChain() throws Exception{
547 new IntegrationTestKit(getSystem()) {{
548 DistributedDataStore dataStore = setupDistributedDataStore("testTransactionChain", "test-1");
550 // 1. Create a Tx chain and write-only Tx
552 DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
554 DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
555 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
557 // 2. Write some data
559 NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
560 writeTx.write(TestModel.TEST_PATH, testNode);
562 // 3. Ready the Tx for commit
564 final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
566 // 4. Commit the Tx on another thread that first waits for the second read Tx.
568 final CountDownLatch continueCommit1 = new CountDownLatch(1);
569 final CountDownLatch commit1Done = new CountDownLatch(1);
570 final AtomicReference<Exception> commit1Error = new AtomicReference<>();
575 continueCommit1.await();
577 } catch (Exception e) {
580 commit1Done.countDown();
585 // 5. Create a new read Tx from the chain to read and verify the data from the first
586 // Tx is visible after being readied.
588 DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
589 Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
590 assertEquals("isPresent", true, optional.isPresent());
591 assertEquals("Data node", testNode, optional.get());
593 // 6. Create a new RW Tx from the chain, write more data, and ready it
595 DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
596 MapNode outerNode = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build();
597 rwTx.write(TestModel.OUTER_LIST_PATH, outerNode);
599 DOMStoreThreePhaseCommitCohort cohort2 = rwTx.ready();
601 // 7. Create a new read Tx from the chain to read the data from the last RW Tx to
602 // verify it is visible.
604 readTx = txChain.newReadOnlyTransaction();
605 optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
606 assertEquals("isPresent", true, optional.isPresent());
607 assertEquals("Data node", outerNode, optional.get());
609 // 8. Wait for the 2 commits to complete and close the chain.
611 continueCommit1.countDown();
612 Uninterruptibles.awaitUninterruptibly(commit1Done, 5, TimeUnit.SECONDS);
614 if(commit1Error.get() != null) {
615 throw commit1Error.get();
622 // 9. Create a new read Tx from the data store and verify committed data.
624 readTx = dataStore.newReadOnlyTransaction();
625 optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
626 assertEquals("isPresent", true, optional.isPresent());
627 assertEquals("Data node", outerNode, optional.get());
634 public void testChangeListenerRegistration() throws Exception{
635 new IntegrationTestKit(getSystem()) {{
636 DistributedDataStore dataStore =
637 setupDistributedDataStore("testChangeListenerRegistration", "test-1");
639 testWriteTransaction(dataStore, TestModel.TEST_PATH,
640 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
642 MockDataChangeListener listener = new MockDataChangeListener(1);
644 ListenerRegistration<MockDataChangeListener>
645 listenerReg = dataStore.registerChangeListener(TestModel.TEST_PATH, listener,
646 DataChangeScope.SUBTREE);
648 assertNotNull("registerChangeListener returned null", listenerReg);
650 // Wait for the initial notification
652 listener.waitForChangeEvents(TestModel.TEST_PATH);
658 testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
659 ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
661 YangInstanceIdentifier listPath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
662 nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build();
663 testWriteTransaction(dataStore, listPath,
664 ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
666 // Wait for the 2 updates.
668 listener.waitForChangeEvents(TestModel.OUTER_LIST_PATH, listPath);
672 testWriteTransaction(dataStore, YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
673 nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build(),
674 ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2));
676 listener.expectNoMoreChanges("Received unexpected change after close");
682 class IntegrationTestKit extends ShardTestKit {
684 IntegrationTestKit(ActorSystem actorSystem) {
688 DistributedDataStore setupDistributedDataStore(String typeName, String... shardNames) {
689 return setupDistributedDataStore(typeName, true, shardNames);
692 DistributedDataStore setupDistributedDataStore(String typeName, boolean waitUntilLeader,
693 String... shardNames) {
694 MockClusterWrapper cluster = new MockClusterWrapper();
695 Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
696 ShardStrategyFactory.setConfiguration(config);
698 DatastoreContext datastoreContext = datastoreContextBuilder.build();
699 DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
700 config, datastoreContext);
702 SchemaContext schemaContext = SchemaContextHelper.full();
703 dataStore.onGlobalContextUpdated(schemaContext);
705 if(waitUntilLeader) {
706 for(String shardName: shardNames) {
707 ActorRef shard = null;
708 for(int i = 0; i < 20 * 5 && shard == null; i++) {
709 Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
710 Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
711 if(shardReply.isPresent()) {
712 shard = shardReply.get();
716 assertNotNull("Shard was not created", shard);
718 waitUntilLeader(shard);
725 void testWriteTransaction(DistributedDataStore dataStore, YangInstanceIdentifier nodePath,
726 NormalizedNode<?, ?> nodeToWrite) throws Exception {
728 // 1. Create a write-only Tx
730 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
731 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
733 // 2. Write some data
735 writeTx.write(nodePath, nodeToWrite);
737 // 3. Ready the Tx for commit
739 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
745 // 5. Verify the data in the store
747 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
749 Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
750 assertEquals("isPresent", true, optional.isPresent());
751 assertEquals("Data node", nodeToWrite, optional.get());
754 void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
755 Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
756 assertEquals("canCommit", true, canCommit);
757 cohort.preCommit().get(5, TimeUnit.SECONDS);
758 cohort.commit().get(5, TimeUnit.SECONDS);
761 void cleanup(DistributedDataStore dataStore) {
762 dataStore.getActorContext().getShardManager().tell(PoisonPill.getInstance(), null);