Adjust to mdsal DOM read/exists FluentFuture change
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / datastore / DistributedDataStoreRemotingIntegrationTest.java
1 /*
2  * Copyright (c) 2015, 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import static org.junit.Assert.assertEquals;
11 import static org.junit.Assert.assertNotNull;
12 import static org.junit.Assert.assertTrue;
13 import static org.junit.Assert.fail;
14 import static org.mockito.Matchers.any;
15 import static org.mockito.Matchers.eq;
16 import static org.mockito.Mockito.timeout;
17 import static org.mockito.Mockito.verify;
18
19 import akka.actor.ActorRef;
20 import akka.actor.ActorSelection;
21 import akka.actor.ActorSystem;
22 import akka.actor.Address;
23 import akka.actor.AddressFromURIString;
24 import akka.cluster.Cluster;
25 import akka.dispatch.Futures;
26 import akka.pattern.Patterns;
27 import akka.testkit.javadsl.TestKit;
28 import com.google.common.base.Stopwatch;
29 import com.google.common.base.Supplier;
30 import com.google.common.base.Throwables;
31 import com.google.common.collect.ImmutableMap;
32 import com.google.common.util.concurrent.ListenableFuture;
33 import com.google.common.util.concurrent.MoreExecutors;
34 import com.google.common.util.concurrent.Uninterruptibles;
35 import com.typesafe.config.ConfigFactory;
36 import java.math.BigInteger;
37 import java.util.Arrays;
38 import java.util.Collection;
39 import java.util.Collections;
40 import java.util.LinkedList;
41 import java.util.List;
42 import java.util.Optional;
43 import java.util.concurrent.ExecutionException;
44 import java.util.concurrent.TimeUnit;
45 import java.util.concurrent.atomic.AtomicLong;
46 import org.junit.After;
47 import org.junit.Assume;
48 import org.junit.Before;
49 import org.junit.Test;
50 import org.junit.runner.RunWith;
51 import org.junit.runners.Parameterized;
52 import org.junit.runners.Parameterized.Parameter;
53 import org.junit.runners.Parameterized.Parameters;
54 import org.mockito.Mockito;
55 import org.mockito.stubbing.Answer;
56 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
57 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
58 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
59 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
60 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
61 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
62 import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
63 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
64 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
65 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
66 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
67 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
68 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
69 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
70 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
71 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
72 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
73 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
74 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
75 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
76 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
77 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
78 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
79 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
80 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
81 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
82 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
83 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
84 import org.opendaylight.mdsal.common.api.TransactionChainListener;
85 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
86 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
87 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
88 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
89 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
90 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
91 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
92 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
93 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
94 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
95 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
96 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
97 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
98 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
99 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
100 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
101 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
102 import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
103 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
104 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
105 import scala.concurrent.Await;
106 import scala.concurrent.Future;
107 import scala.concurrent.duration.FiniteDuration;
108
109 /**
110  * End-to-end distributed data store tests that exercise remote shards and transactions.
111  *
112  * @author Thomas Pantelis
113  */
114 @RunWith(Parameterized.class)
115 public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
116
117     @Parameters(name = "{0}")
118     public static Collection<Object[]> data() {
119         return Arrays.asList(new Object[][] {
120                 { DistributedDataStore.class, 7}, { ClientBackedDataStore.class, 12 }
121         });
122     }
123
124     @Parameter(0)
125     public Class<? extends AbstractDataStore> testParameter;
126     @Parameter(1)
127     public int commitTimeout;
128
129     private static final String[] CARS_AND_PEOPLE = {"cars", "people"};
130     private static final String[] CARS = {"cars"};
131
132     private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
133             "akka://cluster-test@127.0.0.1:2558");
134     private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
135             "akka://cluster-test@127.0.0.1:2559");
136
137     private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
138     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
139     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2_3 = "module-shards-member1-and-2-and-3.conf";
140     private static final String MODULE_SHARDS_CARS_1_2_3 = "module-shards-cars-member-1-and-2-and-3.conf";
141
142     private ActorSystem leaderSystem;
143     private ActorSystem followerSystem;
144     private ActorSystem follower2System;
145
146     private final DatastoreContext.Builder leaderDatastoreContextBuilder =
147             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
148
149     private final DatastoreContext.Builder followerDatastoreContextBuilder =
150             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
151                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
152     private final TransactionIdentifier tx1 = nextTransactionId();
153     private final TransactionIdentifier tx2 = nextTransactionId();
154
155     private AbstractDataStore followerDistributedDataStore;
156     private AbstractDataStore leaderDistributedDataStore;
157     private IntegrationTestKit followerTestKit;
158     private IntegrationTestKit leaderTestKit;
159
160     @Before
161     public void setUp() {
162         InMemoryJournal.clear();
163         InMemorySnapshotStore.clear();
164
165         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
166         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
167
168         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
169         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
170
171         follower2System = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member3"));
172         Cluster.get(follower2System).join(MEMBER_1_ADDRESS);
173     }
174
175     @After
176     public void tearDown() {
177         if (followerDistributedDataStore != null) {
178             leaderDistributedDataStore.close();
179         }
180         if (leaderDistributedDataStore != null) {
181             leaderDistributedDataStore.close();
182         }
183
184         TestKit.shutdownActorSystem(leaderSystem);
185         TestKit.shutdownActorSystem(followerSystem);
186         TestKit.shutdownActorSystem(follower2System);
187
188         InMemoryJournal.clear();
189         InMemorySnapshotStore.clear();
190     }
191
192     private void initDatastoresWithCars(final String type) throws Exception {
193         initDatastores(type, MODULE_SHARDS_CARS_ONLY_1_2, CARS);
194     }
195
196     private void initDatastoresWithCarsAndPeople(final String type) throws Exception {
197         initDatastores(type, MODULE_SHARDS_CARS_PEOPLE_1_2, CARS_AND_PEOPLE);
198     }
199
200     private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards)
201             throws Exception {
202         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder, commitTimeout);
203
204         leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
205                 testParameter, type, moduleShardsConfig, false, shards);
206
207         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder, commitTimeout);
208         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
209                 testParameter, type, moduleShardsConfig, false, shards);
210
211         leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), shards);
212
213         leaderTestKit.waitForMembersUp("member-2");
214         followerTestKit.waitForMembersUp("member-1");
215     }
216
217     private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
218             throws Exception {
219         final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
220         assertEquals("isPresent", true, optional.isPresent());
221
222         final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
223                 CarsModel.CAR_QNAME);
224         for (final NormalizedNode<?, ?> entry: entries) {
225             listBuilder.withChild((MapEntryNode) entry);
226         }
227
228         assertEquals("Car list node", listBuilder.build(), optional.get());
229     }
230
231     private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
232             final NormalizedNode<?, ?> expNode) throws Exception {
233         final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
234         assertEquals("isPresent", true, optional.isPresent());
235         assertEquals("Data node", expNode, optional.get());
236     }
237
238     private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
239             throws Exception {
240         final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
241         assertEquals("exists", true, exists);
242     }
243
244     @Test
245     public void testWriteTransactionWithSingleShard() throws Exception {
246         final String testName = "testWriteTransactionWithSingleShard";
247         initDatastoresWithCars(testName);
248
249         final String followerCarShardName = "member-2-shard-cars-" + testName;
250
251         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
252         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
253
254         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
255         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
256
257         final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
258         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
259         writeTx.merge(car1Path, car1);
260
261         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
262         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
263         writeTx.merge(car2Path, car2);
264
265         followerTestKit.doCommit(writeTx.ready());
266
267         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
268
269         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
270
271         // Test delete
272
273         writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
274
275         writeTx.delete(car1Path);
276
277         followerTestKit.doCommit(writeTx.ready());
278
279         verifyExists(followerDistributedDataStore.newReadOnlyTransaction(), car2Path);
280
281         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
282
283         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car2);
284
285         // Re-instate the follower member 2 as a single-node to verify replication and recovery.
286
287         // The following is a bit tricky. Before we reinstate the follower we need to ensure it has persisted and
288         // applied and all the log entries from the leader. Since we've verified the car data above we know that
289         // all the transactions have been applied on the leader so we first read and capture its lastAppliedIndex.
290         final AtomicLong leaderLastAppliedIndex = new AtomicLong();
291         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, CARS[0],
292             state -> leaderLastAppliedIndex.set(state.getLastApplied()));
293
294         // Now we need to make sure the follower has persisted the leader's lastAppliedIndex via ApplyJournalEntries.
295         // However we don't know exactly how many ApplyJournalEntries messages there will be as it can differ between
296         // the tell-based and ask-based front-ends. For ask-based there will be exactly 2 ApplyJournalEntries but
297         // tell-based persists additional payloads which could be replicated and applied in a batch resulting in
298         // either 2 or 3 ApplyJournalEntries. To handle this we read the follower's persisted ApplyJournalEntries
299         // until we find the one that encompasses the leader's lastAppliedIndex.
300         Stopwatch sw = Stopwatch.createStarted();
301         boolean done = false;
302         while (!done) {
303             final List<ApplyJournalEntries> entries = InMemoryJournal.get(followerCarShardName,
304                     ApplyJournalEntries.class);
305             for (ApplyJournalEntries aje: entries) {
306                 if (aje.getToIndex() >= leaderLastAppliedIndex.get()) {
307                     done = true;
308                     break;
309                 }
310             }
311
312             assertTrue("Follower did not persist ApplyJournalEntries containing leader's lastAppliedIndex "
313                     + leaderLastAppliedIndex + ". Entries persisted: " + entries, sw.elapsed(TimeUnit.SECONDS) <= 5);
314
315             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
316         }
317
318         TestKit.shutdownActorSystem(leaderSystem, Boolean.TRUE);
319         TestKit.shutdownActorSystem(followerSystem, Boolean.TRUE);
320
321         final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
322
323         try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
324                 commitTimeout)
325                 .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
326             verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
327         }
328     }
329
330     @Test
331     public void testReadWriteTransactionWithSingleShard() throws Exception {
332         initDatastoresWithCars("testReadWriteTransactionWithSingleShard");
333
334         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
335         assertNotNull("newReadWriteTransaction returned null", rwTx);
336
337         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
338         rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
339
340         final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
341         rwTx.merge(CarsModel.newCarPath("optima"), car1);
342
343         verifyCars(rwTx, car1);
344
345         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
346         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
347         rwTx.merge(car2Path, car2);
348
349         verifyExists(rwTx, car2Path);
350
351         followerTestKit.doCommit(rwTx.ready());
352
353         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
354     }
355
356     @Test
357     public void testWriteTransactionWithMultipleShards() throws Exception {
358         initDatastoresWithCarsAndPeople("testWriteTransactionWithMultipleShards");
359
360         final DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
361         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
362
363         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
364         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
365         writeTx.write(carsPath, carsNode);
366
367         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
368         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
369         writeTx.write(peoplePath, peopleNode);
370
371         followerTestKit.doCommit(writeTx.ready());
372
373         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
374
375         verifyNode(readTx, carsPath, carsNode);
376         verifyNode(readTx, peoplePath, peopleNode);
377     }
378
379     @Test
380     public void testReadWriteTransactionWithMultipleShards() throws Exception {
381         initDatastoresWithCarsAndPeople("testReadWriteTransactionWithMultipleShards");
382
383         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
384         assertNotNull("newReadWriteTransaction returned null", rwTx);
385
386         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
387         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
388         rwTx.write(carsPath, carsNode);
389
390         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
391         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
392         rwTx.write(peoplePath, peopleNode);
393
394         followerTestKit.doCommit(rwTx.ready());
395
396         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
397
398         verifyNode(readTx, carsPath, carsNode);
399         verifyNode(readTx, peoplePath, peopleNode);
400     }
401
402     @Test
403     public void testTransactionChainWithSingleShard() throws Exception {
404         initDatastoresWithCars("testTransactionChainWithSingleShard");
405
406         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
407
408         // Add the top-level cars container with write-only.
409
410         final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
411         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
412
413         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
414
415         final DOMStoreThreePhaseCommitCohort writeTxReady = writeTx.ready();
416
417         // Verify the top-level cars container with read-only.
418
419         verifyNode(txChain.newReadOnlyTransaction(), CarsModel.BASE_PATH, CarsModel.emptyContainer());
420
421         // Perform car operations with read-write.
422
423         final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
424
425         verifyNode(rwTx, CarsModel.BASE_PATH, CarsModel.emptyContainer());
426
427         rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
428
429         final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
430         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
431         rwTx.write(car1Path, car1);
432
433         verifyExists(rwTx, car1Path);
434
435         verifyCars(rwTx, car1);
436
437         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
438         rwTx.merge(CarsModel.newCarPath("sportage"), car2);
439
440         rwTx.delete(car1Path);
441
442         followerTestKit.doCommit(writeTxReady);
443
444         followerTestKit.doCommit(rwTx.ready());
445
446         txChain.close();
447
448         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
449     }
450
451     @Test
452     public void testTransactionChainWithMultipleShards() throws Exception {
453         initDatastoresWithCarsAndPeople("testTransactionChainWithMultipleShards");
454
455         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
456
457         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
458         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
459
460         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
461         writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
462
463         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
464         writeTx.write(PeopleModel.PERSON_LIST_PATH, PeopleModel.newPersonMapNode());
465
466         followerTestKit.doCommit(writeTx.ready());
467
468         final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
469
470         final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
471         final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
472         readWriteTx.write(carPath, car);
473
474         final MapEntryNode person = PeopleModel.newPersonEntry("jack");
475         final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
476         readWriteTx.merge(personPath, person);
477
478         Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
479         assertEquals("isPresent", true, optional.isPresent());
480         assertEquals("Data node", car, optional.get());
481
482         optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
483         assertEquals("isPresent", true, optional.isPresent());
484         assertEquals("Data node", person, optional.get());
485
486         final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
487
488         writeTx = txChain.newWriteOnlyTransaction();
489
490         writeTx.delete(personPath);
491
492         final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready();
493
494         followerTestKit.doCommit(cohort2);
495         followerTestKit.doCommit(cohort3);
496
497         txChain.close();
498
499         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
500         verifyCars(readTx, car);
501
502         optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
503         assertEquals("isPresent", false, optional.isPresent());
504     }
505
506     @Test
507     public void testChainedTransactionFailureWithSingleShard() throws Exception {
508         initDatastoresWithCars("testChainedTransactionFailureWithSingleShard");
509
510         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
511                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
512                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
513                         MoreExecutors.directExecutor());
514
515         final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class);
516         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
517
518         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
519
520         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
521                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
522                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
523
524         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
525
526         try {
527             writeTx.commit().get(5, TimeUnit.SECONDS);
528             fail("Expected TransactionCommitFailedException");
529         } catch (final ExecutionException e) {
530             // Expected
531         }
532
533         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
534
535         txChain.close();
536         broker.close();
537     }
538
539     @Test
540     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
541         initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
542
543         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
544                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
545                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
546                         MoreExecutors.directExecutor());
547
548         final TransactionChainListener listener = Mockito.mock(TransactionChainListener.class);
549         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
550
551         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
552
553         writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
554
555         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
556                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
557                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
558
559         // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
560         // done for put for performance reasons.
561         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
562
563         try {
564             writeTx.commit().get(5, TimeUnit.SECONDS);
565             fail("Expected TransactionCommitFailedException");
566         } catch (final ExecutionException e) {
567             // Expected
568         }
569
570         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
571
572         txChain.close();
573         broker.close();
574     }
575
576     @Test
577     public void testSingleShardTransactionsWithLeaderChanges() throws Exception {
578         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
579         final String testName = "testSingleShardTransactionsWithLeaderChanges";
580         initDatastoresWithCars(testName);
581
582         final String followerCarShardName = "member-2-shard-cars-" + testName;
583         InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 1, ApplyJournalEntries.class);
584
585         // Write top-level car container from the follower so it uses a remote Tx.
586
587         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
588
589         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
590         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
591
592         followerTestKit.doCommit(writeTx.ready());
593
594         InMemoryJournal.waitForWriteMessagesComplete(followerCarShardName);
595
596         // Switch the leader to the follower
597
598         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
599                 .shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
600
601         TestKit.shutdownActorSystem(leaderSystem, true);
602         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
603
604         followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorContext(), CARS);
605
606         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
607         Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS);
608
609         final DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder()
610                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
611         IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
612
613         try (AbstractDataStore ds =
614                 newMember1TestKit.setupAbstractDataStore(
615                         testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
616
617             followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), CARS);
618
619             // Write a car entry to the new leader - should switch to local Tx
620
621             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
622
623             MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
624             YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
625             writeTx.merge(car1Path, car1);
626
627             followerTestKit.doCommit(writeTx.ready());
628
629             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1);
630         }
631     }
632
633     @SuppressWarnings("unchecked")
634     @Test
635     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
636         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
637         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars");
638
639         final com.google.common.base.Optional<ActorRef> carsFollowerShard =
640                 followerDistributedDataStore.getActorContext().findLocalShard("cars");
641         assertEquals("Cars follower shard found", true, carsFollowerShard.isPresent());
642
643         final DataTree dataTree = new InMemoryDataTreeFactory().create(
644             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
645
646         // Send a tx with immediate commit.
647
648         DataTreeModification modification = dataTree.takeSnapshot().newModification();
649         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
650         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
651
652         final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
653         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
654         modification.ready();
655
656         ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true,
657                 java.util.Optional.empty());
658
659         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
660         Object resp = followerTestKit.expectMsgClass(Object.class);
661         if (resp instanceof akka.actor.Status.Failure) {
662             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
663         }
664
665         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
666
667         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
668
669         // Send another tx without immediate commit.
670
671         modification = dataTree.takeSnapshot().newModification();
672         MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000));
673         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
674         modification.ready();
675
676         readyLocal = new ReadyLocalTransaction(tx2 , modification, false, java.util.Optional.empty());
677
678         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
679         resp = followerTestKit.expectMsgClass(Object.class);
680         if (resp instanceof akka.actor.Status.Failure) {
681             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
682         }
683
684         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
685
686         final ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection(
687                 ((ReadyTransactionReply)resp).getCohortPath());
688
689         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
690         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
691         ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
692                 leaderDistributedDataStore.getActorContext(), Arrays.asList(
693                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
694         cohort.canCommit().get(5, TimeUnit.SECONDS);
695         cohort.preCommit().get(5, TimeUnit.SECONDS);
696         cohort.commit().get(5, TimeUnit.SECONDS);
697
698         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
699     }
700
701     @SuppressWarnings("unchecked")
702     @Test
703     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
704         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
705         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorContext(), "cars");
706
707         final com.google.common.base.Optional<ActorRef> carsFollowerShard =
708                 followerDistributedDataStore.getActorContext().findLocalShard("cars");
709         assertEquals("Cars follower shard found", true, carsFollowerShard.isPresent());
710
711         carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
712         final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
713
714         // Send a tx with immediate commit.
715
716         DataTreeModification modification = dataTree.takeSnapshot().newModification();
717         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
718         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
719
720         final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
721         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
722
723         ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
724                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
725                         Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
726                 java.util.Optional.empty());
727
728         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
729         Object resp = followerTestKit.expectMsgClass(Object.class);
730         if (resp instanceof akka.actor.Status.Failure) {
731             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
732         }
733
734         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
735
736         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
737
738         // Send another tx without immediate commit.
739
740         modification = dataTree.takeSnapshot().newModification();
741         MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000));
742         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
743
744         forwardedReady = new ForwardedReadyTransaction(tx2,
745                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
746                         Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
747                 java.util.Optional.empty());
748
749         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
750         resp = followerTestKit.expectMsgClass(Object.class);
751         if (resp instanceof akka.actor.Status.Failure) {
752             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
753         }
754
755         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
756
757         ActorSelection txActor = leaderDistributedDataStore.getActorContext().actorSelection(
758                 ((ReadyTransactionReply)resp).getCohortPath());
759
760         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
761         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
762         final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
763                 leaderDistributedDataStore.getActorContext(), Arrays.asList(
764                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
765         cohort.canCommit().get(5, TimeUnit.SECONDS);
766         cohort.preCommit().get(5, TimeUnit.SECONDS);
767         cohort.commit().get(5, TimeUnit.SECONDS);
768
769         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
770     }
771
772     @Test
773     public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
774         //TODO remove when test passes also for ClientBackedDataStore
775         Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
776         followerDatastoreContextBuilder.shardBatchedModificationCount(2);
777         leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
778         initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
779
780         // Do an initial write to get the primary shard info cached.
781
782         final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
783         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
784         initialWriteTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
785         followerTestKit.doCommit(initialWriteTx.ready());
786
787         // Wait for the commit to be replicated to the follower.
788
789         MemberNode.verifyRaftState(followerDistributedDataStore, "cars",
790             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
791
792         MemberNode.verifyRaftState(followerDistributedDataStore, "people",
793             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
794
795         // Prepare, ready and canCommit a WO tx that writes to 2 shards. This will become the current tx in
796         // the leader shard.
797
798         final DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction();
799         writeTx1.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
800         writeTx1.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
801         final DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready();
802         final ListenableFuture<Boolean> writeTx1CanCommit = writeTx1Cohort.canCommit();
803         writeTx1CanCommit.get(5, TimeUnit.SECONDS);
804
805         // Prepare and ready another WO tx that writes to 2 shards but don't canCommit yet. This will be queued
806         // in the leader shard.
807
808         final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction();
809         final LinkedList<MapEntryNode> cars = new LinkedList<>();
810         int carIndex = 1;
811         cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
812         writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
813         carIndex++;
814         NormalizedNode<?, ?> people = PeopleModel.newPersonMapNode();
815         writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
816         final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
817
818         // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
819         // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
820         // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
821         // sent on ready.
822
823         final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
824         for (int i = 1; i <= 5; i++, carIndex++) {
825             cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
826             writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
827         }
828
829         // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
830         // message on ready.
831
832         final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
833         cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
834         writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
835         carIndex++;
836
837         // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
838         // leader shard on ready.
839
840         final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
841         cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
842         readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
843
844         IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
845             stats -> assertEquals("getReadWriteTransactionCount", 1, stats.getReadWriteTransactionCount()));
846
847         // Disable elections on the leader so it switches to follower.
848
849         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
850                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
851                 .shardElectionTimeoutFactor(10));
852
853         leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorContext(), "cars");
854
855         // Submit all tx's - the messages should get queued for retry.
856
857         final ListenableFuture<Boolean> writeTx2CanCommit = writeTx2Cohort.canCommit();
858         final DOMStoreThreePhaseCommitCohort writeTx3Cohort = writeTx3.ready();
859         final DOMStoreThreePhaseCommitCohort writeTx4Cohort = writeTx4.ready();
860         final DOMStoreThreePhaseCommitCohort rwTxCohort = readWriteTx.ready();
861
862         // Enable elections on the other follower so it becomes the leader, at which point the
863         // tx's should get forwarded from the previous leader to the new leader to complete the commits.
864
865         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
866                 .customRaftPolicyImplementation(null).shardElectionTimeoutFactor(1));
867         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorContext(), "cars")
868                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
869         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorContext(), "people")
870                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
871
872         followerTestKit.doCommit(writeTx1CanCommit, writeTx1Cohort);
873         followerTestKit.doCommit(writeTx2CanCommit, writeTx2Cohort);
874         followerTestKit.doCommit(writeTx3Cohort);
875         followerTestKit.doCommit(writeTx4Cohort);
876         followerTestKit.doCommit(rwTxCohort);
877
878         DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
879         verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
880         verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
881     }
882
883     @Test
884     public void testLeadershipTransferOnShutdown() throws Exception {
885         //TODO remove when test passes also for ClientBackedDataStore
886         Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
887         leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
888         followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
889         final String testName = "testLeadershipTransferOnShutdown";
890         initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
891
892         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
893                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(100),
894                 commitTimeout);
895         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
896                 testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
897
898             followerTestKit.waitForMembersUp("member-3");
899             follower2TestKit.waitForMembersUp("member-1", "member-2");
900
901             // Create and submit a couple tx's so they're pending.
902
903             DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
904             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
905             writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
906             writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
907             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
908
909             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
910                 stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
911
912             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
913             final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
914             writeTx.write(CarsModel.newCarPath("optima"), car);
915             final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
916
917             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
918                 stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
919
920             // Gracefully stop the leader via a Shutdown message.
921
922             sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
923                 .shardElectionTimeoutFactor(100));
924
925             final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
926             final Future<ActorRef> future = leaderDistributedDataStore.getActorContext().findLocalShardAsync("cars");
927             final ActorRef leaderActor = Await.result(future, duration);
928
929             final Future<Boolean> stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE);
930
931             // Commit the 2 transactions. They should finish and succeed.
932
933             followerTestKit.doCommit(cohort1);
934             followerTestKit.doCommit(cohort2);
935
936             // Wait for the leader actor stopped.
937
938             final Boolean stopped = Await.result(stopFuture, duration);
939             assertEquals("Stopped", Boolean.TRUE, stopped);
940
941             // Verify leadership was transferred by reading the committed data from the other nodes.
942
943             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car);
944             verifyCars(follower2DistributedDataStore.newReadOnlyTransaction(), car);
945         }
946     }
947
948     @Test
949     public void testTransactionWithIsolatedLeader() throws Exception {
950         //TODO remove when test passes also for ClientBackedDataStore
951         Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
952         // Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
953         leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
954         final String testName = "testTransactionWithIsolatedLeader";
955         initDatastoresWithCars(testName);
956
957         // Tx that is submitted after the follower is stopped but before the leader transitions to IsolatedLeader.
958         final DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
959         preIsolatedLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
960
961         // Tx that is submitted after the leader transitions to IsolatedLeader.
962         final DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
963         noShardLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
964
965         // Tx that is submitted after the follower is reinstated.
966         final DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
967         successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
968
969         // Stop the follower
970         followerTestKit.watch(followerDistributedDataStore.getActorContext().getShardManager());
971         followerDistributedDataStore.close();
972         followerTestKit.expectTerminated(followerDistributedDataStore.getActorContext().getShardManager());
973
974         // Submit the preIsolatedLeaderWriteTx so it's pending
975         final DOMStoreThreePhaseCommitCohort preIsolatedLeaderTxCohort = preIsolatedLeaderWriteTx.ready();
976
977         // Change the isolated leader check interval low so it changes to IsolatedLeader.
978         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
979                 .shardIsolatedLeaderCheckIntervalInMillis(200));
980
981         MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
982             raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
983
984         try {
985             leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
986             fail("Expected NoShardLeaderException");
987         } catch (final ExecutionException e) {
988             assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
989         }
990
991         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
992                 .shardElectionTimeoutFactor(100));
993
994         final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
995
996         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
997                 testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
998
999         leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
1000         leaderTestKit.doCommit(successTxCohort);
1001     }
1002
1003     @Test
1004     public void testTransactionWithShardLeaderNotResponding() throws Exception {
1005         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1006         followerDatastoreContextBuilder.shardElectionTimeoutFactor(50);
1007         initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
1008
1009         // Do an initial read to get the primary shard info cached.
1010
1011         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1012         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1013
1014         // Shutdown the leader and try to create a new tx.
1015
1016         TestKit.shutdownActorSystem(leaderSystem, true);
1017
1018         followerDatastoreContextBuilder.operationTimeoutInMillis(50).shardElectionTimeoutFactor(1);
1019         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder);
1020
1021         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1022
1023         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1024
1025         try {
1026             followerTestKit.doCommit(rwTx.ready());
1027             fail("Exception expected");
1028         } catch (final ExecutionException e) {
1029             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1030             if (DistributedDataStore.class.equals(testParameter)) {
1031                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
1032                         || e.getCause() instanceof ShardLeaderNotRespondingException);
1033             } else {
1034                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1035             }
1036         }
1037     }
1038
1039     @Test
1040     public void testTransactionWithCreateTxFailureDueToNoLeader() throws Exception {
1041         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1042         initDatastoresWithCars("testTransactionWithCreateTxFailureDueToNoLeader");
1043
1044         // Do an initial read to get the primary shard info cached.
1045
1046         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1047         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1048
1049         // Shutdown the leader and try to create a new tx.
1050
1051         TestKit.shutdownActorSystem(leaderSystem, true);
1052
1053         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1054
1055         Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
1056
1057         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1058                 .operationTimeoutInMillis(10).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
1059
1060         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1061
1062         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1063
1064         try {
1065             followerTestKit.doCommit(rwTx.ready());
1066             fail("Exception expected");
1067         } catch (final ExecutionException e) {
1068             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1069             if (DistributedDataStore.class.equals(testParameter)) {
1070                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
1071             } else {
1072                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1073             }
1074         }
1075     }
1076
1077     @Test
1078     public void testTransactionRetryWithInitialAskTimeoutExOnCreateTx() throws Exception {
1079         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
1080         String testName = "testTransactionRetryWithInitialAskTimeoutExOnCreateTx";
1081         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1082
1083         final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1084                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1085         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1086                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1087
1088         try (AbstractDataStore ds =
1089                 follower2TestKit.setupAbstractDataStore(
1090                         testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
1091
1092             followerTestKit.waitForMembersUp("member-1", "member-3");
1093             follower2TestKit.waitForMembersUp("member-1", "member-2");
1094
1095             // Do an initial read to get the primary shard info cached.
1096
1097             final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1098             readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1099
1100             // Shutdown the leader and try to create a new tx.
1101
1102             TestKit.shutdownActorSystem(leaderSystem, true);
1103
1104             Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1105
1106             sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1107                 .operationTimeoutInMillis(500).shardElectionTimeoutFactor(5).customRaftPolicyImplementation(null));
1108
1109             final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1110
1111             rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1112
1113             followerTestKit.doCommit(rwTx.ready());
1114         }
1115     }
1116
1117     @Test
1118     public void testInstallSnapshot() throws Exception {
1119         final String testName = "testInstallSnapshot";
1120         final String leaderCarShardName = "member-1-shard-cars-" + testName;
1121         final String followerCarShardName = "member-2-shard-cars-" + testName;
1122
1123         // Setup a saved snapshot on the leader. The follower will startup with no data and the leader should
1124         // install a snapshot to sync the follower.
1125
1126         DataTree tree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_CONFIGURATION,
1127             SchemaContextHelper.full());
1128
1129         final ContainerNode carsNode = CarsModel.newCarsNode(
1130                 CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000))));
1131         AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
1132
1133         final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.EMPTY);
1134         final Snapshot initialSnapshot = Snapshot.create(
1135                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
1136                 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
1137         InMemorySnapshotStore.addSnapshot(leaderCarShardName, initialSnapshot);
1138
1139         InMemorySnapshotStore.addSnapshotSavedLatch(leaderCarShardName);
1140         InMemorySnapshotStore.addSnapshotSavedLatch(followerCarShardName);
1141
1142         initDatastoresWithCars(testName);
1143
1144         final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
1145                 CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1146         assertEquals("isPresent", true, readOptional.isPresent());
1147         assertEquals("Node", carsNode, readOptional.get());
1148
1149         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
1150                 initialSnapshot, snapshotRoot);
1151
1152         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(followerCarShardName, Snapshot.class),
1153                 initialSnapshot, snapshotRoot);
1154     }
1155
1156     @Test
1157     public void testReadWriteMessageSlicing() throws Exception {
1158         // The slicing is only implemented for tell-based protocol
1159         Assume.assumeTrue(testParameter.equals(ClientBackedDataStore.class));
1160
1161         leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
1162         followerDatastoreContextBuilder.maximumMessageSliceSize(100);
1163         initDatastoresWithCars("testLargeReadReplySlicing");
1164
1165         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1166
1167         final NormalizedNode<?, ?> carsNode = CarsModel.create();
1168         rwTx.write(CarsModel.BASE_PATH, carsNode);
1169
1170         verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
1171     }
1172
1173     private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
1174                                        final NormalizedNode<?, ?> expRoot) {
1175         assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
1176         assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
1177         assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
1178         assertEquals("Snapshot getLastIndex", expected.getLastIndex(), actual.getLastIndex());
1179         assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
1180         MetadataShardDataTreeSnapshot shardSnapshot =
1181                 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
1182         assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
1183     }
1184
1185     private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
1186         final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
1187         final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
1188         final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
1189         Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
1190         Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
1191         dataStore.onDatastoreContextUpdated(mockContextFactory);
1192     }
1193 }