Merge "Fix warnings in config-util"
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / datastore / DistributedDataStoreIntegrationTest.java
1 package org.opendaylight.controller.cluster.datastore;
2
3 import static org.junit.Assert.assertEquals;
4 import static org.junit.Assert.assertNotNull;
5 import akka.actor.ActorRef;
6 import akka.actor.ActorSystem;
7 import akka.actor.PoisonPill;
8 import com.google.common.base.Optional;
9 import com.google.common.util.concurrent.CheckedFuture;
10 import com.google.common.util.concurrent.Uninterruptibles;
11 import java.util.concurrent.CountDownLatch;
12 import java.util.concurrent.ExecutionException;
13 import java.util.concurrent.TimeUnit;
14 import java.util.concurrent.atomic.AtomicReference;
15 import org.junit.Test;
16 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
17 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
18 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
19 import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
20 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
21 import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
22 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
23 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
24 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
25 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
26 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
27 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
28 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
29 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
30 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
31 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
32 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
33 import org.opendaylight.yangtools.concepts.ListenerRegistration;
34 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
35 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
36 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
37 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
38 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
39
40 public class DistributedDataStoreIntegrationTest extends AbstractActorTest {
41
42     private final DatastoreContext.Builder datastoreContextBuilder =
43             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100);
44
45     @Test
46     public void testWriteTransactionWithSingleShard() throws Exception{
47         new IntegrationTestKit(getSystem()) {{
48             DistributedDataStore dataStore =
49                     setupDistributedDataStore("transactionIntegrationTest", "test-1");
50
51             testWriteTransaction(dataStore, TestModel.TEST_PATH,
52                     ImmutableNodes.containerNode(TestModel.TEST_QNAME));
53
54             testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
55                     ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
56
57             cleanup(dataStore);
58         }};
59     }
60
61     @Test
62     public void testWriteTransactionWithMultipleShards() throws Exception{
63         new IntegrationTestKit(getSystem()) {{
64             DistributedDataStore dataStore =
65                     setupDistributedDataStore("testWriteTransactionWithMultipleShards", "cars-1", "people-1");
66
67             DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
68             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
69
70             YangInstanceIdentifier nodePath1 = CarsModel.BASE_PATH;
71             NormalizedNode<?, ?> nodeToWrite1 = CarsModel.emptyContainer();
72             writeTx.write(nodePath1, nodeToWrite1);
73
74             YangInstanceIdentifier nodePath2 = PeopleModel.BASE_PATH;
75             NormalizedNode<?, ?> nodeToWrite2 = PeopleModel.emptyContainer();
76             writeTx.write(nodePath2, nodeToWrite2);
77
78             DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
79
80             doCommit(cohort);
81
82             // Verify the data in the store
83
84             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
85
86             Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath1).get();
87             assertEquals("isPresent", true, optional.isPresent());
88             assertEquals("Data node", nodeToWrite1, optional.get());
89
90             optional = readTx.read(nodePath2).get();
91             assertEquals("isPresent", true, optional.isPresent());
92             assertEquals("Data node", nodeToWrite2, optional.get());
93
94             cleanup(dataStore);
95         }};
96     }
97
98     @Test
99     public void testReadWriteTransaction() throws Exception{
100         System.setProperty("shard.persistent", "true");
101         new IntegrationTestKit(getSystem()) {{
102             DistributedDataStore dataStore =
103                     setupDistributedDataStore("testReadWriteTransaction", "test-1");
104
105             // 1. Create a read-write Tx
106
107             DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
108             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
109
110             // 2. Write some data
111
112             YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
113             NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
114             readWriteTx.write(nodePath, nodeToWrite );
115
116             // 3. Read the data from Tx
117
118             Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS);
119             assertEquals("exists", true, exists);
120
121             Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
122             assertEquals("isPresent", true, optional.isPresent());
123             assertEquals("Data node", nodeToWrite, optional.get());
124
125             // 4. Ready the Tx for commit
126
127             DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
128
129             // 5. Commit the Tx
130
131             doCommit(cohort);
132
133             // 6. Verify the data in the store
134
135             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
136
137             optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
138             assertEquals("isPresent", true, optional.isPresent());
139             assertEquals("Data node", nodeToWrite, optional.get());
140
141             cleanup(dataStore);
142         }};
143     }
144
145     @Test
146     public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
147         new IntegrationTestKit(getSystem()) {{
148             String testName = "testTransactionWritesWithShardNotInitiallyReady";
149             String shardName = "test-1";
150
151             // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
152             // initialized until we create and submit the write the Tx.
153             String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
154             CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
155             InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
156
157             DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
158
159             // Create the write Tx
160
161             final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
162             assertNotNull("newReadWriteTransaction returned null", writeTx);
163
164             // Do some modification operations and ready the Tx on a separate thread.
165
166             final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier.builder(
167                     TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME,
168                             TestModel.ID_QNAME, 1).build();
169
170             final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
171             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
172             final CountDownLatch txReady = new CountDownLatch(1);
173             Thread txThread = new Thread() {
174                 @Override
175                 public void run() {
176                     try {
177                         writeTx.write(TestModel.TEST_PATH,
178                                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
179
180                         writeTx.merge(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(
181                                 TestModel.OUTER_LIST_QNAME).build());
182
183                         writeTx.write(listEntryPath, ImmutableNodes.mapEntry(
184                                 TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
185
186                         writeTx.delete(listEntryPath);
187
188                         txCohort.set(writeTx.ready());
189                     } catch(Exception e) {
190                         caughtEx.set(e);
191                         return;
192                     } finally {
193                         txReady.countDown();
194                     }
195                 }
196             };
197
198             txThread.start();
199
200             // Wait for the Tx operations to complete.
201
202             boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
203             if(caughtEx.get() != null) {
204                 throw caughtEx.get();
205             }
206
207             assertEquals("Tx ready", true, done);
208
209             // At this point the Tx operations should be waiting for the shard to initialize so
210             // trigger the latch to let the shard recovery to continue.
211
212             blockRecoveryLatch.countDown();
213
214             // Wait for the Tx commit to complete.
215
216             doCommit(txCohort.get());
217
218             // Verify the data in the store
219
220             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
221
222             Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).
223                     get(5, TimeUnit.SECONDS);
224             assertEquals("isPresent", true, optional.isPresent());
225
226             optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
227             assertEquals("isPresent", true, optional.isPresent());
228
229             optional = readTx.read(listEntryPath).get(5, TimeUnit.SECONDS);
230             assertEquals("isPresent", false, optional.isPresent());
231
232             cleanup(dataStore);
233         }};
234     }
235
236     @Test
237     public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
238         new IntegrationTestKit(getSystem()) {{
239             String testName = "testTransactionReadsWithShardNotInitiallyReady";
240             String shardName = "test-1";
241
242             // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
243             // initialized until we create the Tx.
244             String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
245             CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
246             InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
247
248             DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
249
250             // Create the read-write Tx
251
252             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
253             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
254
255             // Do some reads on the Tx on a separate thread.
256
257             final AtomicReference<CheckedFuture<Boolean, ReadFailedException>> txExistsFuture =
258                     new AtomicReference<>();
259             final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
260                     txReadFuture = new AtomicReference<>();
261             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
262             final CountDownLatch txReadsDone = new CountDownLatch(1);
263             Thread txThread = new Thread() {
264                 @Override
265                 public void run() {
266                     try {
267                         readWriteTx.write(TestModel.TEST_PATH,
268                                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
269
270                         txExistsFuture.set(readWriteTx.exists(TestModel.TEST_PATH));
271
272                         txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
273                     } catch(Exception e) {
274                         caughtEx.set(e);
275                         return;
276                     } finally {
277                         txReadsDone.countDown();
278                     }
279                 }
280             };
281
282             txThread.start();
283
284             // Wait for the Tx operations to complete.
285
286             boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS);
287             if(caughtEx.get() != null) {
288                 throw caughtEx.get();
289             }
290
291             assertEquals("Tx reads done", true, done);
292
293             // At this point the Tx operations should be waiting for the shard to initialize so
294             // trigger the latch to let the shard recovery to continue.
295
296             blockRecoveryLatch.countDown();
297
298             // Wait for the reads to complete and verify.
299
300             assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS));
301             assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent());
302
303             readWriteTx.close();
304
305             cleanup(dataStore);
306         }};
307     }
308
309     @Test(expected=NotInitializedException.class)
310     public void testTransactionCommitFailureWithShardNotInitialized() throws Throwable{
311         new IntegrationTestKit(getSystem()) {{
312             String testName = "testTransactionCommitFailureWithShardNotInitialized";
313             String shardName = "test-1";
314
315             // Set the shard initialization timeout low for the test.
316
317             datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
318
319             // Setup the InMemoryJournal to block shard recovery indefinitely.
320
321             String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
322             CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
323             InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
324
325             DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
326
327             // Create the write Tx
328
329             final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
330             assertNotNull("newReadWriteTransaction returned null", writeTx);
331
332             // Do some modifications and ready the Tx on a separate thread.
333
334             final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
335             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
336             final CountDownLatch txReady = new CountDownLatch(1);
337             Thread txThread = new Thread() {
338                 @Override
339                 public void run() {
340                     try {
341                         writeTx.write(TestModel.TEST_PATH,
342                                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
343
344                         txCohort.set(writeTx.ready());
345                     } catch(Exception e) {
346                         caughtEx.set(e);
347                         return;
348                     } finally {
349                         txReady.countDown();
350                     }
351                 }
352             };
353
354             txThread.start();
355
356             // Wait for the Tx operations to complete.
357
358             boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
359             if(caughtEx.get() != null) {
360                 throw caughtEx.get();
361             }
362
363             assertEquals("Tx ready", true, done);
364
365             // Wait for the commit to complete. Since the shard never initialized, the Tx should
366             // have timed out and throw an appropriate exception cause.
367
368             try {
369                 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
370             } catch(ExecutionException e) {
371                 throw e.getCause();
372             } finally {
373                 blockRecoveryLatch.countDown();
374                 cleanup(dataStore);
375             }
376         }};
377     }
378
379     @Test(expected=NotInitializedException.class)
380     public void testTransactionReadFailureWithShardNotInitialized() throws Throwable{
381         new IntegrationTestKit(getSystem()) {{
382             String testName = "testTransactionReadFailureWithShardNotInitialized";
383             String shardName = "test-1";
384
385             // Set the shard initialization timeout low for the test.
386
387             datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
388
389             // Setup the InMemoryJournal to block shard recovery indefinitely.
390
391             String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
392             CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
393             InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
394
395             DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
396
397             // Create the read-write Tx
398
399             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
400             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
401
402             // Do a read on the Tx on a separate thread.
403
404             final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
405                     txReadFuture = new AtomicReference<>();
406             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
407             final CountDownLatch txReadDone = new CountDownLatch(1);
408             Thread txThread = new Thread() {
409                 @Override
410                 public void run() {
411                     try {
412                         readWriteTx.write(TestModel.TEST_PATH,
413                                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
414
415                         txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
416
417                         readWriteTx.close();
418                     } catch(Exception e) {
419                         caughtEx.set(e);
420                         return;
421                     } finally {
422                         txReadDone.countDown();
423                     }
424                 }
425             };
426
427             txThread.start();
428
429             // Wait for the Tx operations to complete.
430
431             boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS);
432             if(caughtEx.get() != null) {
433                 throw caughtEx.get();
434             }
435
436             assertEquals("Tx read done", true, done);
437
438             // Wait for the read to complete. Since the shard never initialized, the Tx should
439             // have timed out and throw an appropriate exception cause.
440
441             try {
442                 txReadFuture.get().checkedGet(5, TimeUnit.SECONDS);
443             } catch(ReadFailedException e) {
444                 throw e.getCause();
445             } finally {
446                 blockRecoveryLatch.countDown();
447                 cleanup(dataStore);
448             }
449         }};
450     }
451
452     @Test(expected=NoShardLeaderException.class)
453     public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
454         new IntegrationTestKit(getSystem()) {{
455             String testName = "testTransactionCommitFailureWithNoShardLeader";
456             String shardName = "test-1";
457
458             // We don't want the shard to become the leader so prevent shard election from completing
459             // by setting the election timeout, which is based on the heartbeat interval, really high.
460
461             datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
462
463             // Set the leader election timeout low for the test.
464
465             datastoreContextBuilder.shardLeaderElectionTimeout(1, TimeUnit.MILLISECONDS);
466
467             DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
468
469             // Create the write Tx.
470
471             final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
472             assertNotNull("newReadWriteTransaction returned null", writeTx);
473
474             // Do some modifications and ready the Tx on a separate thread.
475
476             final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
477             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
478             final CountDownLatch txReady = new CountDownLatch(1);
479             Thread txThread = new Thread() {
480                 @Override
481                 public void run() {
482                     try {
483                         writeTx.write(TestModel.TEST_PATH,
484                                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
485
486                         txCohort.set(writeTx.ready());
487                     } catch(Exception e) {
488                         caughtEx.set(e);
489                         return;
490                     } finally {
491                         txReady.countDown();
492                     }
493                 }
494             };
495
496             txThread.start();
497
498             // Wait for the Tx operations to complete.
499
500             boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
501             if(caughtEx.get() != null) {
502                 throw caughtEx.get();
503             }
504
505             assertEquals("Tx ready", true, done);
506
507             // Wait for the commit to complete. Since no shard leader was elected in time, the Tx
508             // should have timed out and throw an appropriate exception cause.
509
510             try {
511                 txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
512             } catch(ExecutionException e) {
513                 throw e.getCause();
514             } finally {
515                 cleanup(dataStore);
516             }
517         }};
518     }
519
520     @Test
521     public void testTransactionAbort() throws Exception{
522         System.setProperty("shard.persistent", "true");
523         new IntegrationTestKit(getSystem()) {{
524             DistributedDataStore dataStore =
525                     setupDistributedDataStore("transactionAbortIntegrationTest", "test-1");
526
527             DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
528             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
529
530             writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
531
532             DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
533
534             cohort.canCommit().get(5, TimeUnit.SECONDS);
535
536             cohort.abort().get(5, TimeUnit.SECONDS);
537
538             testWriteTransaction(dataStore, TestModel.TEST_PATH,
539                     ImmutableNodes.containerNode(TestModel.TEST_QNAME));
540
541             cleanup(dataStore);
542         }};
543     }
544
545     @Test
546     public void testTransactionChain() throws Exception{
547         new IntegrationTestKit(getSystem()) {{
548             DistributedDataStore dataStore = setupDistributedDataStore("testTransactionChain", "test-1");
549
550             // 1. Create a Tx chain and write-only Tx
551
552             DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
553
554             DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
555             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
556
557             // 2. Write some data
558
559             NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
560             writeTx.write(TestModel.TEST_PATH, testNode);
561
562             // 3. Ready the Tx for commit
563
564             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
565
566             // 4. Commit the Tx on another thread that first waits for the second read Tx.
567
568             final CountDownLatch continueCommit1 = new CountDownLatch(1);
569             final CountDownLatch commit1Done = new CountDownLatch(1);
570             final AtomicReference<Exception> commit1Error = new AtomicReference<>();
571             new Thread() {
572                 @Override
573                 public void run() {
574                     try {
575                         continueCommit1.await();
576                         doCommit(cohort1);
577                     } catch (Exception e) {
578                         commit1Error.set(e);
579                     } finally {
580                         commit1Done.countDown();
581                     }
582                 }
583             }.start();
584
585             // 5. Create a new read Tx from the chain to read and verify the data from the first
586             // Tx is visible after being readied.
587
588             DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
589             Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
590             assertEquals("isPresent", true, optional.isPresent());
591             assertEquals("Data node", testNode, optional.get());
592
593             // 6. Create a new RW Tx from the chain, write more data, and ready it
594
595             DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
596             MapNode outerNode = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build();
597             rwTx.write(TestModel.OUTER_LIST_PATH, outerNode);
598
599             DOMStoreThreePhaseCommitCohort cohort2 = rwTx.ready();
600
601             // 7. Create a new read Tx from the chain to read the data from the last RW Tx to
602             // verify it is visible.
603
604             readTx = txChain.newReadOnlyTransaction();
605             optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
606             assertEquals("isPresent", true, optional.isPresent());
607             assertEquals("Data node", outerNode, optional.get());
608
609             // 8. Wait for the 2 commits to complete and close the chain.
610
611             continueCommit1.countDown();
612             Uninterruptibles.awaitUninterruptibly(commit1Done, 5, TimeUnit.SECONDS);
613
614             if(commit1Error.get() != null) {
615                 throw commit1Error.get();
616             }
617
618             doCommit(cohort2);
619
620             txChain.close();
621
622             // 9. Create a new read Tx from the data store and verify committed data.
623
624             readTx = dataStore.newReadOnlyTransaction();
625             optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
626             assertEquals("isPresent", true, optional.isPresent());
627             assertEquals("Data node", outerNode, optional.get());
628
629             cleanup(dataStore);
630         }};
631     }
632
633     @Test
634     public void testChangeListenerRegistration() throws Exception{
635         new IntegrationTestKit(getSystem()) {{
636             DistributedDataStore dataStore =
637                     setupDistributedDataStore("testChangeListenerRegistration", "test-1");
638
639             testWriteTransaction(dataStore, TestModel.TEST_PATH,
640                     ImmutableNodes.containerNode(TestModel.TEST_QNAME));
641
642             MockDataChangeListener listener = new MockDataChangeListener(1);
643
644             ListenerRegistration<MockDataChangeListener>
645                     listenerReg = dataStore.registerChangeListener(TestModel.TEST_PATH, listener,
646                             DataChangeScope.SUBTREE);
647
648             assertNotNull("registerChangeListener returned null", listenerReg);
649
650             // Wait for the initial notification
651
652             listener.waitForChangeEvents(TestModel.TEST_PATH);
653
654             listener.reset(2);
655
656             // Write 2 updates.
657
658             testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
659                     ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
660
661             YangInstanceIdentifier listPath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
662                     nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build();
663             testWriteTransaction(dataStore, listPath,
664                     ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
665
666             // Wait for the 2 updates.
667
668             listener.waitForChangeEvents(TestModel.OUTER_LIST_PATH, listPath);
669
670             listenerReg.close();
671
672             testWriteTransaction(dataStore, YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
673                     nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build(),
674                     ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2));
675
676             listener.expectNoMoreChanges("Received unexpected change after close");
677
678             cleanup(dataStore);
679         }};
680     }
681
682     class IntegrationTestKit extends ShardTestKit {
683
684         IntegrationTestKit(ActorSystem actorSystem) {
685             super(actorSystem);
686         }
687
688         DistributedDataStore setupDistributedDataStore(String typeName, String... shardNames) {
689             return setupDistributedDataStore(typeName, true, shardNames);
690         }
691
692         DistributedDataStore setupDistributedDataStore(String typeName, boolean waitUntilLeader,
693                 String... shardNames) {
694             MockClusterWrapper cluster = new MockClusterWrapper();
695             Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
696             ShardStrategyFactory.setConfiguration(config);
697
698             DatastoreContext datastoreContext = datastoreContextBuilder.build();
699             DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
700                     config, datastoreContext);
701
702             SchemaContext schemaContext = SchemaContextHelper.full();
703             dataStore.onGlobalContextUpdated(schemaContext);
704
705             if(waitUntilLeader) {
706                 for(String shardName: shardNames) {
707                     ActorRef shard = null;
708                     for(int i = 0; i < 20 * 5 && shard == null; i++) {
709                         Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
710                         Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
711                         if(shardReply.isPresent()) {
712                             shard = shardReply.get();
713                         }
714                     }
715
716                     assertNotNull("Shard was not created", shard);
717
718                     waitUntilLeader(shard);
719                 }
720             }
721
722             return dataStore;
723         }
724
725         void testWriteTransaction(DistributedDataStore dataStore, YangInstanceIdentifier nodePath,
726                 NormalizedNode<?, ?> nodeToWrite) throws Exception {
727
728             // 1. Create a write-only Tx
729
730             DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
731             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
732
733             // 2. Write some data
734
735             writeTx.write(nodePath, nodeToWrite);
736
737             // 3. Ready the Tx for commit
738
739             DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
740
741             // 4. Commit the Tx
742
743             doCommit(cohort);
744
745             // 5. Verify the data in the store
746
747             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
748
749             Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
750             assertEquals("isPresent", true, optional.isPresent());
751             assertEquals("Data node", nodeToWrite, optional.get());
752         }
753
754         void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
755             Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
756             assertEquals("canCommit", true, canCommit);
757             cohort.preCommit().get(5, TimeUnit.SECONDS);
758             cohort.commit().get(5, TimeUnit.SECONDS);
759         }
760
761         void cleanup(DistributedDataStore dataStore) {
762             dataStore.getActorContext().getShardManager().tell(PoisonPill.getInstance(), null);
763         }
764     }
765
766 }