1 package org.opendaylight.controller.cluster.datastore;
3 import akka.actor.ActorRef;
4 import akka.actor.ActorSystem;
5 import akka.actor.PoisonPill;
6 import com.google.common.base.Optional;
7 import com.google.common.util.concurrent.CheckedFuture;
8 import com.google.common.util.concurrent.Uninterruptibles;
9 import static org.junit.Assert.assertEquals;
10 import static org.junit.Assert.assertNotNull;
11 import org.junit.Test;
12 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
13 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
14 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
15 import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
16 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
17 import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
18 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
19 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
20 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
21 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
22 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
23 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
24 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
25 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
26 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
27 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
28 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
29 import org.opendaylight.yangtools.concepts.ListenerRegistration;
30 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
31 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
32 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
33 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
34 import java.util.concurrent.CountDownLatch;
35 import java.util.concurrent.ExecutionException;
36 import java.util.concurrent.TimeUnit;
37 import java.util.concurrent.atomic.AtomicReference;
39 public class DistributedDataStoreIntegrationTest extends AbstractActorTest {
41 private final DatastoreContext.Builder datastoreContextBuilder =
42 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100);
45 public void testWriteTransactionWithSingleShard() throws Exception{
46 System.setProperty("shard.persistent", "true");
47 new IntegrationTestKit(getSystem()) {{
48 DistributedDataStore dataStore =
49 setupDistributedDataStore("transactionIntegrationTest", "test-1");
51 testWriteTransaction(dataStore, TestModel.TEST_PATH,
52 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
54 testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
55 ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
62 public void testWriteTransactionWithMultipleShards() throws Exception{
63 System.setProperty("shard.persistent", "true");
64 new IntegrationTestKit(getSystem()) {{
65 DistributedDataStore dataStore =
66 setupDistributedDataStore("testWriteTransactionWithMultipleShards", "cars-1", "people-1");
68 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
69 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
71 YangInstanceIdentifier nodePath1 = CarsModel.BASE_PATH;
72 NormalizedNode<?, ?> nodeToWrite1 = CarsModel.emptyContainer();
73 writeTx.write(nodePath1, nodeToWrite1);
75 YangInstanceIdentifier nodePath2 = PeopleModel.BASE_PATH;
76 NormalizedNode<?, ?> nodeToWrite2 = PeopleModel.emptyContainer();
77 writeTx.write(nodePath2, nodeToWrite2);
79 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
81 Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
82 assertEquals("canCommit", true, canCommit);
83 cohort.preCommit().get(5, TimeUnit.SECONDS);
84 cohort.commit().get(5, TimeUnit.SECONDS);
86 // Verify the data in the store
88 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
90 Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath1).get();
91 assertEquals("isPresent", true, optional.isPresent());
92 assertEquals("Data node", nodeToWrite1, optional.get());
94 optional = readTx.read(nodePath2).get();
95 assertEquals("isPresent", true, optional.isPresent());
96 assertEquals("Data node", nodeToWrite2, optional.get());
103 public void testReadWriteTransaction() throws Exception{
104 System.setProperty("shard.persistent", "true");
105 new IntegrationTestKit(getSystem()) {{
106 DistributedDataStore dataStore =
107 setupDistributedDataStore("testReadWriteTransaction", "test-1");
109 // 1. Create a read-write Tx
111 DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
112 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
114 // 2. Write some data
116 YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
117 NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
118 readWriteTx.write(nodePath, nodeToWrite );
120 // 3. Read the data from Tx
122 Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS);
123 assertEquals("exists", true, exists);
125 Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
126 assertEquals("isPresent", true, optional.isPresent());
127 assertEquals("Data node", nodeToWrite, optional.get());
129 // 4. Ready the Tx for commit
131 DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
135 Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
136 assertEquals("canCommit", true, canCommit);
137 cohort.preCommit().get(5, TimeUnit.SECONDS);
138 cohort.commit().get(5, TimeUnit.SECONDS);
140 // 6. Verify the data in the store
142 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
144 optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
145 assertEquals("isPresent", true, optional.isPresent());
146 assertEquals("Data node", nodeToWrite, optional.get());
153 public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
154 new IntegrationTestKit(getSystem()) {{
155 String testName = "testTransactionWritesWithShardNotInitiallyReady";
156 String shardName = "test-1";
158 // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
159 // initialized until we create and submit the write the Tx.
160 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
161 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
162 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
164 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
166 // Create the write Tx
168 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
169 assertNotNull("newReadWriteTransaction returned null", writeTx);
171 // Do some modification operations and ready the Tx on a separate thread.
173 final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier.builder(
174 TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME,
175 TestModel.ID_QNAME, 1).build();
177 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
178 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
179 final CountDownLatch txReady = new CountDownLatch(1);
180 Thread txThread = new Thread() {
184 writeTx.write(TestModel.TEST_PATH,
185 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
187 writeTx.merge(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(
188 TestModel.OUTER_LIST_QNAME).build());
190 writeTx.write(listEntryPath, ImmutableNodes.mapEntry(
191 TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
193 writeTx.delete(listEntryPath);
195 txCohort.set(writeTx.ready());
196 } catch(Exception e) {
207 // Wait for the Tx operations to complete.
209 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
210 if(caughtEx.get() != null) {
211 throw caughtEx.get();
214 assertEquals("Tx ready", true, done);
216 // At this point the Tx operations should be waiting for the shard to initialize so
217 // trigger the latch to let the shard recovery to continue.
219 blockRecoveryLatch.countDown();
221 // Wait for the Tx commit to complete.
223 assertEquals("canCommit", true, txCohort.get().canCommit().get(5, TimeUnit.SECONDS));
224 txCohort.get().preCommit().get(5, TimeUnit.SECONDS);
225 txCohort.get().commit().get(5, TimeUnit.SECONDS);
227 // Verify the data in the store
229 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
231 Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).
232 get(5, TimeUnit.SECONDS);
233 assertEquals("isPresent", true, optional.isPresent());
235 optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
236 assertEquals("isPresent", true, optional.isPresent());
238 optional = readTx.read(listEntryPath).get(5, TimeUnit.SECONDS);
239 assertEquals("isPresent", false, optional.isPresent());
246 public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
247 new IntegrationTestKit(getSystem()) {{
248 String testName = "testTransactionReadsWithShardNotInitiallyReady";
249 String shardName = "test-1";
251 // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
252 // initialized until we create the Tx.
253 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
254 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
255 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
257 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
259 // Create the read-write Tx
261 final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
262 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
264 // Do some reads on the Tx on a separate thread.
266 final AtomicReference<CheckedFuture<Boolean, ReadFailedException>> txExistsFuture =
267 new AtomicReference<>();
268 final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
269 txReadFuture = new AtomicReference<>();
270 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
271 final CountDownLatch txReadsDone = new CountDownLatch(1);
272 Thread txThread = new Thread() {
276 readWriteTx.write(TestModel.TEST_PATH,
277 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
279 txExistsFuture.set(readWriteTx.exists(TestModel.TEST_PATH));
281 txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
282 } catch(Exception e) {
286 txReadsDone.countDown();
293 // Wait for the Tx operations to complete.
295 boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS);
296 if(caughtEx.get() != null) {
297 throw caughtEx.get();
300 assertEquals("Tx reads done", true, done);
302 // At this point the Tx operations should be waiting for the shard to initialize so
303 // trigger the latch to let the shard recovery to continue.
305 blockRecoveryLatch.countDown();
307 // Wait for the reads to complete and verify.
309 assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS));
310 assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent());
318 @Test(expected=NotInitializedException.class)
319 public void testTransactionCommitFailureWithShardNotInitialized() throws Throwable{
320 new IntegrationTestKit(getSystem()) {{
321 String testName = "testTransactionCommitFailureWithShardNotInitialized";
322 String shardName = "test-1";
324 // Set the shard initialization timeout low for the test.
326 datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
328 // Setup the InMemoryJournal to block shard recovery indefinitely.
330 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
331 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
332 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
334 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
336 // Create the write Tx
338 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
339 assertNotNull("newReadWriteTransaction returned null", writeTx);
341 // Do some modifications and ready the Tx on a separate thread.
343 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
344 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
345 final CountDownLatch txReady = new CountDownLatch(1);
346 Thread txThread = new Thread() {
350 writeTx.write(TestModel.TEST_PATH,
351 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
353 txCohort.set(writeTx.ready());
354 } catch(Exception e) {
365 // Wait for the Tx operations to complete.
367 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
368 if(caughtEx.get() != null) {
369 throw caughtEx.get();
372 assertEquals("Tx ready", true, done);
374 // Wait for the commit to complete. Since the shard never initialized, the Tx should
375 // have timed out and throw an appropriate exception cause.
378 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
379 } catch(ExecutionException e) {
382 blockRecoveryLatch.countDown();
388 @Test(expected=NotInitializedException.class)
389 public void testTransactionReadFailureWithShardNotInitialized() throws Throwable{
390 new IntegrationTestKit(getSystem()) {{
391 String testName = "testTransactionReadFailureWithShardNotInitialized";
392 String shardName = "test-1";
394 // Set the shard initialization timeout low for the test.
396 datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
398 // Setup the InMemoryJournal to block shard recovery indefinitely.
400 String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
401 CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
402 InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
404 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
406 // Create the read-write Tx
408 final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
409 assertNotNull("newReadWriteTransaction returned null", readWriteTx);
411 // Do a read on the Tx on a separate thread.
413 final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
414 txReadFuture = new AtomicReference<>();
415 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
416 final CountDownLatch txReadDone = new CountDownLatch(1);
417 Thread txThread = new Thread() {
421 readWriteTx.write(TestModel.TEST_PATH,
422 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
424 txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
427 } catch(Exception e) {
431 txReadDone.countDown();
438 // Wait for the Tx operations to complete.
440 boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS);
441 if(caughtEx.get() != null) {
442 throw caughtEx.get();
445 assertEquals("Tx read done", true, done);
447 // Wait for the read to complete. Since the shard never initialized, the Tx should
448 // have timed out and throw an appropriate exception cause.
451 txReadFuture.get().checkedGet(5, TimeUnit.SECONDS);
452 } catch(ReadFailedException e) {
455 blockRecoveryLatch.countDown();
461 @Test(expected=NoShardLeaderException.class)
462 public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
463 new IntegrationTestKit(getSystem()) {{
464 String testName = "testTransactionCommitFailureWithNoShardLeader";
465 String shardName = "test-1";
467 // We don't want the shard to become the leader so prevent shard election from completing
468 // by setting the election timeout, which is based on the heartbeat interval, really high.
470 datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
472 // Set the leader election timeout low for the test.
474 datastoreContextBuilder.shardLeaderElectionTimeout(1, TimeUnit.MILLISECONDS);
476 DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
478 // Create the write Tx.
480 final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
481 assertNotNull("newReadWriteTransaction returned null", writeTx);
483 // Do some modifications and ready the Tx on a separate thread.
485 final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
486 final AtomicReference<Exception> caughtEx = new AtomicReference<>();
487 final CountDownLatch txReady = new CountDownLatch(1);
488 Thread txThread = new Thread() {
492 writeTx.write(TestModel.TEST_PATH,
493 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
495 txCohort.set(writeTx.ready());
496 } catch(Exception e) {
507 // Wait for the Tx operations to complete.
509 boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
510 if(caughtEx.get() != null) {
511 throw caughtEx.get();
514 assertEquals("Tx ready", true, done);
516 // Wait for the commit to complete. Since no shard leader was elected in time, the Tx
517 // should have timed out and throw an appropriate exception cause.
520 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
521 } catch(ExecutionException e) {
530 public void testTransactionAbort() throws Exception{
531 System.setProperty("shard.persistent", "true");
532 new IntegrationTestKit(getSystem()) {{
533 DistributedDataStore dataStore =
534 setupDistributedDataStore("transactionAbortIntegrationTest", "test-1");
536 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
537 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
539 writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
541 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
543 cohort.canCommit().get(5, TimeUnit.SECONDS);
545 cohort.abort().get(5, TimeUnit.SECONDS);
547 testWriteTransaction(dataStore, TestModel.TEST_PATH,
548 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
555 public void testTransactionChain() throws Exception{
556 System.setProperty("shard.persistent", "true");
557 new IntegrationTestKit(getSystem()) {{
558 DistributedDataStore dataStore =
559 setupDistributedDataStore("transactionChainIntegrationTest", "test-1");
561 // 1. Create a Tx chain and write-only Tx
563 DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
565 DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
566 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
568 // 2. Write some data
570 NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
571 writeTx.write(TestModel.TEST_PATH, containerNode);
573 // 3. Ready the Tx for commit
575 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
579 Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
580 assertEquals("canCommit", true, canCommit);
581 cohort.preCommit().get(5, TimeUnit.SECONDS);
582 cohort.commit().get(5, TimeUnit.SECONDS);
584 // 5. Verify the data in the store
586 DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
588 Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
589 assertEquals("isPresent", true, optional.isPresent());
590 assertEquals("Data node", containerNode, optional.get());
599 public void testChangeListenerRegistration() throws Exception{
600 new IntegrationTestKit(getSystem()) {{
601 DistributedDataStore dataStore =
602 setupDistributedDataStore("testChangeListenerRegistration", "test-1");
604 MockDataChangeListener listener = new MockDataChangeListener(3);
606 ListenerRegistration<MockDataChangeListener>
607 listenerReg = dataStore.registerChangeListener(TestModel.TEST_PATH, listener,
608 DataChangeScope.SUBTREE);
610 assertNotNull("registerChangeListener returned null", listenerReg);
612 testWriteTransaction(dataStore, TestModel.TEST_PATH,
613 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
615 testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
616 ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
618 YangInstanceIdentifier listPath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
619 nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build();
620 testWriteTransaction(dataStore, listPath,
621 ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
623 listener.waitForChangeEvents(TestModel.TEST_PATH, TestModel.OUTER_LIST_PATH, listPath );
627 testWriteTransaction(dataStore, YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
628 nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build(),
629 ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2));
631 listener.expectNoMoreChanges("Received unexpected change after close");
637 class IntegrationTestKit extends ShardTestKit {
639 IntegrationTestKit(ActorSystem actorSystem) {
643 DistributedDataStore setupDistributedDataStore(String typeName, String... shardNames) {
644 return setupDistributedDataStore(typeName, true, shardNames);
647 DistributedDataStore setupDistributedDataStore(String typeName, boolean waitUntilLeader,
648 String... shardNames) {
649 MockClusterWrapper cluster = new MockClusterWrapper();
650 Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
651 ShardStrategyFactory.setConfiguration(config);
653 DatastoreContext datastoreContext = datastoreContextBuilder.build();
654 DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
655 config, datastoreContext);
657 SchemaContext schemaContext = SchemaContextHelper.full();
658 dataStore.onGlobalContextUpdated(schemaContext);
660 if(waitUntilLeader) {
661 for(String shardName: shardNames) {
662 ActorRef shard = null;
663 for(int i = 0; i < 20 * 5 && shard == null; i++) {
664 Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
665 Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
666 if(shardReply.isPresent()) {
667 shard = shardReply.get();
671 assertNotNull("Shard was not created", shard);
673 waitUntilLeader(shard);
680 void testWriteTransaction(DistributedDataStore dataStore, YangInstanceIdentifier nodePath,
681 NormalizedNode<?, ?> nodeToWrite) throws Exception {
683 // 1. Create a write-only Tx
685 DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
686 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
688 // 2. Write some data
690 writeTx.write(nodePath, nodeToWrite);
692 // 3. Ready the Tx for commit
694 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
698 Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
699 assertEquals("canCommit", true, canCommit);
700 cohort.preCommit().get(5, TimeUnit.SECONDS);
701 cohort.commit().get(5, TimeUnit.SECONDS);
703 // 5. Verify the data in the store
705 DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
707 Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
708 assertEquals("isPresent", true, optional.isPresent());
709 assertEquals("Data node", nodeToWrite, optional.get());
712 void cleanup(DistributedDataStore dataStore) {
713 dataStore.getActorContext().getShardManager().tell(PoisonPill.getInstance(), null);