9c78db579e8fa9cc53bc4024bcf8f54ec22bc657
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / datastore / DistributedDataStoreRemotingIntegrationTest.java
1 /*
2  * Copyright (c) 2015, 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import static org.awaitility.Awaitility.await;
11 import static org.hamcrest.Matchers.equalTo;
12 import static org.junit.Assert.assertEquals;
13 import static org.junit.Assert.assertFalse;
14 import static org.junit.Assert.assertNotNull;
15 import static org.junit.Assert.assertTrue;
16 import static org.junit.Assert.fail;
17 import static org.mockito.ArgumentMatchers.any;
18 import static org.mockito.ArgumentMatchers.eq;
19 import static org.mockito.Mockito.timeout;
20 import static org.mockito.Mockito.verify;
21
22 import akka.actor.ActorRef;
23 import akka.actor.ActorSelection;
24 import akka.actor.ActorSystem;
25 import akka.actor.Address;
26 import akka.actor.AddressFromURIString;
27 import akka.cluster.Cluster;
28 import akka.cluster.Member;
29 import akka.dispatch.Futures;
30 import akka.pattern.Patterns;
31 import akka.testkit.javadsl.TestKit;
32 import com.google.common.base.Stopwatch;
33 import com.google.common.base.Throwables;
34 import com.google.common.collect.ImmutableMap;
35 import com.google.common.primitives.UnsignedLong;
36 import com.google.common.util.concurrent.ListenableFuture;
37 import com.google.common.util.concurrent.MoreExecutors;
38 import com.google.common.util.concurrent.Uninterruptibles;
39 import com.typesafe.config.ConfigFactory;
40 import java.util.Arrays;
41 import java.util.Collection;
42 import java.util.Collections;
43 import java.util.Iterator;
44 import java.util.LinkedList;
45 import java.util.List;
46 import java.util.Optional;
47 import java.util.concurrent.ExecutionException;
48 import java.util.concurrent.ExecutorService;
49 import java.util.concurrent.Executors;
50 import java.util.concurrent.TimeUnit;
51 import java.util.concurrent.atomic.AtomicBoolean;
52 import java.util.concurrent.atomic.AtomicLong;
53 import java.util.function.Supplier;
54 import org.junit.After;
55 import org.junit.Assume;
56 import org.junit.Before;
57 import org.junit.Ignore;
58 import org.junit.Test;
59 import org.junit.runner.RunWith;
60 import org.junit.runners.Parameterized;
61 import org.junit.runners.Parameterized.Parameter;
62 import org.junit.runners.Parameterized.Parameters;
63 import org.mockito.Mockito;
64 import org.mockito.stubbing.Answer;
65 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
66 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
67 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
68 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
69 import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
70 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
71 import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
72 import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
73 import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
74 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
75 import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
76 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
77 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
78 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
79 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
80 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
81 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
82 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
83 import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
84 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
85 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
86 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
87 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
88 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
89 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
90 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
91 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
92 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
93 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
94 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
95 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
96 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
97 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
98 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
99 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
100 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
101 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
102 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
103 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
104 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
105 import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
106 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
107 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
108 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
109 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
110 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
111 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
112 import org.opendaylight.yangtools.yang.common.Uint64;
113 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
114 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
115 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
116 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
117 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
118 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
119 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
120 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
121 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
122 import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
123 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
124 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
125 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
126 import scala.collection.Set;
127 import scala.concurrent.Await;
128 import scala.concurrent.Future;
129 import scala.concurrent.duration.FiniteDuration;
130
131 /**
132  * End-to-end distributed data store tests that exercise remote shards and transactions.
133  *
134  * @author Thomas Pantelis
135  */
136 @RunWith(Parameterized.class)
137 public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
138
139     @Parameters(name = "{0}")
140     public static Collection<Object[]> data() {
141         return Arrays.asList(new Object[][] {
142                 { TestDistributedDataStore.class, 7}, { TestClientBackedDataStore.class, 12 }
143         });
144     }
145
146     @Parameter(0)
147     public Class<? extends AbstractDataStore> testParameter;
148     @Parameter(1)
149     public int commitTimeout;
150
151     private static final String[] CARS_AND_PEOPLE = {"cars", "people"};
152     private static final String[] CARS = {"cars"};
153
154     private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
155             "akka://cluster-test@127.0.0.1:2558");
156     private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
157             "akka://cluster-test@127.0.0.1:2559");
158
159     private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
160     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
161     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2_3 = "module-shards-member1-and-2-and-3.conf";
162     private static final String MODULE_SHARDS_CARS_1_2_3 = "module-shards-cars-member-1-and-2-and-3.conf";
163
164     private ActorSystem leaderSystem;
165     private ActorSystem followerSystem;
166     private ActorSystem follower2System;
167
168     private final DatastoreContext.Builder leaderDatastoreContextBuilder =
169             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
170
171     private final DatastoreContext.Builder followerDatastoreContextBuilder =
172             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
173                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
174     private final TransactionIdentifier tx1 = nextTransactionId();
175     private final TransactionIdentifier tx2 = nextTransactionId();
176
177     private AbstractDataStore followerDistributedDataStore;
178     private AbstractDataStore leaderDistributedDataStore;
179     private IntegrationTestKit followerTestKit;
180     private IntegrationTestKit leaderTestKit;
181
182     @Before
183     public void setUp() {
184         InMemoryJournal.clear();
185         InMemorySnapshotStore.clear();
186
187         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
188         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
189
190         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
191         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
192
193         follower2System = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member3"));
194         Cluster.get(follower2System).join(MEMBER_1_ADDRESS);
195     }
196
197     @After
198     public void tearDown() {
199         if (followerDistributedDataStore != null) {
200             leaderDistributedDataStore.close();
201         }
202         if (leaderDistributedDataStore != null) {
203             leaderDistributedDataStore.close();
204         }
205
206         TestKit.shutdownActorSystem(leaderSystem);
207         TestKit.shutdownActorSystem(followerSystem);
208         TestKit.shutdownActorSystem(follower2System);
209
210         InMemoryJournal.clear();
211         InMemorySnapshotStore.clear();
212     }
213
214     private void initDatastoresWithCars(final String type) throws Exception {
215         initDatastores(type, MODULE_SHARDS_CARS_ONLY_1_2, CARS);
216     }
217
218     private void initDatastoresWithCarsAndPeople(final String type) throws Exception {
219         initDatastores(type, MODULE_SHARDS_CARS_PEOPLE_1_2, CARS_AND_PEOPLE);
220     }
221
222     private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards)
223             throws Exception {
224         initDatastores(type, moduleShardsConfig, shards, leaderDatastoreContextBuilder,
225                 followerDatastoreContextBuilder);
226     }
227
228     private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards,
229             final DatastoreContext.Builder leaderBuilder, final DatastoreContext.Builder followerBuilder)
230                     throws Exception {
231         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderBuilder, commitTimeout);
232
233         leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
234                 testParameter, type, moduleShardsConfig, false, shards);
235
236         followerTestKit = new IntegrationTestKit(followerSystem, followerBuilder, commitTimeout);
237         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
238                 testParameter, type, moduleShardsConfig, false, shards);
239
240         leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
241
242         leaderTestKit.waitForMembersUp("member-2");
243         followerTestKit.waitForMembersUp("member-1");
244     }
245
246     private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
247             throws Exception {
248         final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
249         assertTrue("isPresent", optional.isPresent());
250
251         final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
252                 CarsModel.CAR_QNAME);
253         for (final NormalizedNode<?, ?> entry: entries) {
254             listBuilder.withChild((MapEntryNode) entry);
255         }
256
257         assertEquals("Car list node", listBuilder.build(), optional.get());
258     }
259
260     private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
261             final NormalizedNode<?, ?> expNode) throws Exception {
262         final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
263         assertTrue("isPresent", optional.isPresent());
264         assertEquals("Data node", expNode, optional.get());
265     }
266
267     private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
268             throws Exception {
269         final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
270         assertEquals("exists", Boolean.TRUE, exists);
271     }
272
273     @Test
274     public void testWriteTransactionWithSingleShard() throws Exception {
275         final String testName = "testWriteTransactionWithSingleShard";
276         initDatastoresWithCars(testName);
277
278         final String followerCarShardName = "member-2-shard-cars-" + testName;
279
280         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
281         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
282
283         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
284         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
285
286         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
287         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
288         writeTx.merge(car1Path, car1);
289
290         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
291         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
292         writeTx.merge(car2Path, car2);
293
294         followerTestKit.doCommit(writeTx.ready());
295
296         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
297
298         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
299
300         // Test delete
301
302         writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
303
304         writeTx.delete(car1Path);
305
306         followerTestKit.doCommit(writeTx.ready());
307
308         verifyExists(followerDistributedDataStore.newReadOnlyTransaction(), car2Path);
309
310         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
311
312         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car2);
313
314         // Re-instate the follower member 2 as a single-node to verify replication and recovery.
315
316         // The following is a bit tricky. Before we reinstate the follower we need to ensure it has persisted and
317         // applied and all the log entries from the leader. Since we've verified the car data above we know that
318         // all the transactions have been applied on the leader so we first read and capture its lastAppliedIndex.
319         final AtomicLong leaderLastAppliedIndex = new AtomicLong();
320         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, CARS[0],
321             state -> leaderLastAppliedIndex.set(state.getLastApplied()));
322
323         // Now we need to make sure the follower has persisted the leader's lastAppliedIndex via ApplyJournalEntries.
324         // However we don't know exactly how many ApplyJournalEntries messages there will be as it can differ between
325         // the tell-based and ask-based front-ends. For ask-based there will be exactly 2 ApplyJournalEntries but
326         // tell-based persists additional payloads which could be replicated and applied in a batch resulting in
327         // either 2 or 3 ApplyJournalEntries. To handle this we read the follower's persisted ApplyJournalEntries
328         // until we find the one that encompasses the leader's lastAppliedIndex.
329         Stopwatch sw = Stopwatch.createStarted();
330         boolean done = false;
331         while (!done) {
332             final List<ApplyJournalEntries> entries = InMemoryJournal.get(followerCarShardName,
333                     ApplyJournalEntries.class);
334             for (ApplyJournalEntries aje: entries) {
335                 if (aje.getToIndex() >= leaderLastAppliedIndex.get()) {
336                     done = true;
337                     break;
338                 }
339             }
340
341             assertTrue("Follower did not persist ApplyJournalEntries containing leader's lastAppliedIndex "
342                     + leaderLastAppliedIndex + ". Entries persisted: " + entries, sw.elapsed(TimeUnit.SECONDS) <= 5);
343
344             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
345         }
346
347         TestKit.shutdownActorSystem(leaderSystem, true);
348         TestKit.shutdownActorSystem(followerSystem, true);
349
350         final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
351
352         try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
353                 commitTimeout)
354                 .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
355             verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
356         }
357     }
358
359     @Test
360     public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
361         final String testName = "testWriteTransactionWithSingleShard";
362         initDatastoresWithCars(testName);
363
364         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
365
366         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
367         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
368         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
369         followerTestKit.doCommit(writeTx.ready());
370
371         int numCars = 5;
372         for (int i = 0; i < numCars; i++) {
373             writeTx = txChain.newWriteOnlyTransaction();
374             writeTx.write(CarsModel.newCarPath("car" + i),
375                     CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
376
377             followerTestKit.doCommit(writeTx.ready());
378
379             DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
380             domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
381
382             domStoreReadTransaction.close();
383         }
384
385         // wait to let the shard catch up with purged
386         await("Range set leak test").atMost(5, TimeUnit.SECONDS)
387                 .pollInterval(500, TimeUnit.MILLISECONDS)
388                 .untilAsserted(() -> {
389                     Optional<ActorRef> localShard =
390                             leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
391                     FrontendShardDataTreeSnapshotMetadata frontendMetadata =
392                             (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
393                                     .executeOperation(localShard.get(), new RequestFrontendMetadata());
394
395                     if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
396                         Iterator<FrontendHistoryMetadata> iterator =
397                                 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
398                         FrontendHistoryMetadata metadata = iterator.next();
399                         while (iterator.hasNext() && metadata.getHistoryId() != 1) {
400                             metadata = iterator.next();
401                         }
402
403                         assertEquals(0, metadata.getClosedTransactions().size());
404
405                         final var purgedRanges = metadata.getPurgedTransactions().ranges();
406                         assertEquals(1, purgedRanges.size());
407                         final var purgedRange = purgedRanges.first();
408                         assertEquals(UnsignedLong.ZERO, purgedRange.lower());
409                         assertEquals(UnsignedLong.valueOf(10), purgedRange.upper());
410                     } else {
411                         // ask based should track no metadata
412                         assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
413                     }
414                 });
415
416         final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
417                 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
418         assertTrue("isPresent", optional.isPresent());
419         assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
420     }
421
422     @Test
423     @Ignore("Flushes out tell based leak needs to be handled separately")
424     public void testCloseTransactionMetadataLeak() throws Exception {
425         // Ask based frontend seems to have some issues with back to back close
426         Assume.assumeTrue(testParameter.isAssignableFrom(TestClientBackedDataStore.class));
427
428         final String testName = "testWriteTransactionWithSingleShard";
429         initDatastoresWithCars(testName);
430
431         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
432
433         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
434         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
435         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
436         followerTestKit.doCommit(writeTx.ready());
437
438         int numCars = 5;
439         for (int i = 0; i < numCars; i++) {
440             writeTx = txChain.newWriteOnlyTransaction();
441             writeTx.close();
442
443             DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
444             domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
445
446             domStoreReadTransaction.close();
447         }
448
449         writeTx = txChain.newWriteOnlyTransaction();
450         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
451         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
452         followerTestKit.doCommit(writeTx.ready());
453
454         // wait to let the shard catch up with purged
455         await("Close transaction purge leak test.").atMost(5, TimeUnit.SECONDS)
456                 .pollInterval(500, TimeUnit.MILLISECONDS)
457                 .untilAsserted(() -> {
458                     Optional<ActorRef> localShard =
459                             leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
460                     FrontendShardDataTreeSnapshotMetadata frontendMetadata =
461                             (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
462                                     .executeOperation(localShard.get(), new RequestFrontendMetadata());
463
464                     if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
465                         Iterator<FrontendHistoryMetadata> iterator =
466                                 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
467                         FrontendHistoryMetadata metadata = iterator.next();
468                         while (iterator.hasNext() && metadata.getHistoryId() != 1) {
469                             metadata = iterator.next();
470                         }
471
472                         assertEquals(0, metadata.getClosedTransactions().size());
473                         assertEquals(1, metadata.getPurgedTransactions().size());
474                     } else {
475                         // ask based should track no metadata
476                         assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
477                     }
478                 });
479
480         final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
481                 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
482         assertTrue("isPresent", optional.isPresent());
483         assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
484     }
485
486     @Test
487     public void testReadWriteTransactionWithSingleShard() throws Exception {
488         initDatastoresWithCars("testReadWriteTransactionWithSingleShard");
489
490         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
491         assertNotNull("newReadWriteTransaction returned null", rwTx);
492
493         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
494         rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
495
496         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
497         rwTx.merge(CarsModel.newCarPath("optima"), car1);
498
499         verifyCars(rwTx, car1);
500
501         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
502         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
503         rwTx.merge(car2Path, car2);
504
505         verifyExists(rwTx, car2Path);
506
507         followerTestKit.doCommit(rwTx.ready());
508
509         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
510     }
511
512     @Test
513     public void testWriteTransactionWithMultipleShards() throws Exception {
514         initDatastoresWithCarsAndPeople("testWriteTransactionWithMultipleShards");
515
516         final DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
517         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
518
519         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
520         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
521         writeTx.write(carsPath, carsNode);
522
523         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
524         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
525         writeTx.write(peoplePath, peopleNode);
526
527         followerTestKit.doCommit(writeTx.ready());
528
529         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
530
531         verifyNode(readTx, carsPath, carsNode);
532         verifyNode(readTx, peoplePath, peopleNode);
533     }
534
535     @Test
536     public void testReadWriteTransactionWithMultipleShards() throws Exception {
537         initDatastoresWithCarsAndPeople("testReadWriteTransactionWithMultipleShards");
538
539         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
540         assertNotNull("newReadWriteTransaction returned null", rwTx);
541
542         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
543         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
544         rwTx.write(carsPath, carsNode);
545
546         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
547         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
548         rwTx.write(peoplePath, peopleNode);
549
550         followerTestKit.doCommit(rwTx.ready());
551
552         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
553
554         verifyNode(readTx, carsPath, carsNode);
555         verifyNode(readTx, peoplePath, peopleNode);
556     }
557
558     @Test
559     public void testTransactionChainWithSingleShard() throws Exception {
560         initDatastoresWithCars("testTransactionChainWithSingleShard");
561
562         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
563
564         // Add the top-level cars container with write-only.
565
566         final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
567         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
568
569         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
570
571         final DOMStoreThreePhaseCommitCohort writeTxReady = writeTx.ready();
572
573         // Verify the top-level cars container with read-only.
574
575         verifyNode(txChain.newReadOnlyTransaction(), CarsModel.BASE_PATH, CarsModel.emptyContainer());
576
577         // Perform car operations with read-write.
578
579         final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
580
581         verifyNode(rwTx, CarsModel.BASE_PATH, CarsModel.emptyContainer());
582
583         rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
584
585         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
586         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
587         rwTx.write(car1Path, car1);
588
589         verifyExists(rwTx, car1Path);
590
591         verifyCars(rwTx, car1);
592
593         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
594         rwTx.merge(CarsModel.newCarPath("sportage"), car2);
595
596         rwTx.delete(car1Path);
597
598         followerTestKit.doCommit(writeTxReady);
599
600         followerTestKit.doCommit(rwTx.ready());
601
602         txChain.close();
603
604         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
605     }
606
607     @Test
608     public void testTransactionChainWithMultipleShards() throws Exception {
609         initDatastoresWithCarsAndPeople("testTransactionChainWithMultipleShards");
610
611         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
612
613         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
614         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
615
616         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
617         writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
618
619         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
620         writeTx.write(PeopleModel.PERSON_LIST_PATH, PeopleModel.newPersonMapNode());
621
622         followerTestKit.doCommit(writeTx.ready());
623
624         final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
625
626         final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
627         final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
628         readWriteTx.write(carPath, car);
629
630         final MapEntryNode person = PeopleModel.newPersonEntry("jack");
631         final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
632         readWriteTx.merge(personPath, person);
633
634         Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
635         assertTrue("isPresent", optional.isPresent());
636         assertEquals("Data node", car, optional.get());
637
638         optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
639         assertTrue("isPresent", optional.isPresent());
640         assertEquals("Data node", person, optional.get());
641
642         final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
643
644         writeTx = txChain.newWriteOnlyTransaction();
645
646         writeTx.delete(personPath);
647
648         final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready();
649
650         followerTestKit.doCommit(cohort2);
651         followerTestKit.doCommit(cohort3);
652
653         txChain.close();
654
655         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
656         verifyCars(readTx, car);
657
658         optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
659         assertFalse("isPresent", optional.isPresent());
660     }
661
662     @Test
663     public void testChainedTransactionFailureWithSingleShard() throws Exception {
664         initDatastoresWithCars("testChainedTransactionFailureWithSingleShard");
665
666         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
667                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
668                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
669                         MoreExecutors.directExecutor());
670
671         final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
672         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
673
674         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
675
676         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
677                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
678                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
679
680         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
681
682         try {
683             writeTx.commit().get(5, TimeUnit.SECONDS);
684             fail("Expected TransactionCommitFailedException");
685         } catch (final ExecutionException e) {
686             // Expected
687         }
688
689         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
690
691         txChain.close();
692         broker.close();
693     }
694
695     @Test
696     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
697         initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
698
699         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
700                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
701                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
702                         MoreExecutors.directExecutor());
703
704         final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
705         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
706
707         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
708
709         writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
710
711         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
712                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
713                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
714
715         // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
716         // done for put for performance reasons.
717         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
718
719         try {
720             writeTx.commit().get(5, TimeUnit.SECONDS);
721             fail("Expected TransactionCommitFailedException");
722         } catch (final ExecutionException e) {
723             // Expected
724         }
725
726         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
727
728         txChain.close();
729         broker.close();
730     }
731
732     @Test
733     public void testSingleShardTransactionsWithLeaderChanges() throws Exception {
734         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
735         final String testName = "testSingleShardTransactionsWithLeaderChanges";
736         initDatastoresWithCars(testName);
737
738         final String followerCarShardName = "member-2-shard-cars-" + testName;
739         InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 1, ApplyJournalEntries.class);
740
741         // Write top-level car container from the follower so it uses a remote Tx.
742
743         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
744
745         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
746         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
747
748         followerTestKit.doCommit(writeTx.ready());
749
750         InMemoryJournal.waitForWriteMessagesComplete(followerCarShardName);
751
752         // Switch the leader to the follower
753
754         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
755                 .shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
756
757         TestKit.shutdownActorSystem(leaderSystem, true);
758         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
759
760         followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorUtils(), CARS);
761
762         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
763         Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS);
764
765         final DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder()
766                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
767         IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
768
769         try (AbstractDataStore ds =
770                 newMember1TestKit.setupAbstractDataStore(
771                         testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
772
773             followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
774
775             // Write a car entry to the new leader - should switch to local Tx
776
777             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
778
779             MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
780             YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
781             writeTx.merge(car1Path, car1);
782
783             followerTestKit.doCommit(writeTx.ready());
784
785             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1);
786         }
787     }
788
789     @SuppressWarnings("unchecked")
790     @Test
791     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
792         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
793         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
794
795         final Optional<ActorRef> carsFollowerShard =
796                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
797         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
798
799         final DataTree dataTree = new InMemoryDataTreeFactory().create(
800             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
801
802         // Send a tx with immediate commit.
803
804         DataTreeModification modification = dataTree.takeSnapshot().newModification();
805         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
806         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
807
808         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
809         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
810         modification.ready();
811
812         ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
813
814         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
815         Object resp = followerTestKit.expectMsgClass(Object.class);
816         if (resp instanceof akka.actor.Status.Failure) {
817             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
818         }
819
820         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
821
822         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
823
824         // Send another tx without immediate commit.
825
826         modification = dataTree.takeSnapshot().newModification();
827         MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
828         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
829         modification.ready();
830
831         readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
832
833         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
834         resp = followerTestKit.expectMsgClass(Object.class);
835         if (resp instanceof akka.actor.Status.Failure) {
836             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
837         }
838
839         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
840
841         final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
842                 ((ReadyTransactionReply)resp).getCohortPath());
843
844         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
845         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
846         ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
847                 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
848                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
849         cohort.canCommit().get(5, TimeUnit.SECONDS);
850         cohort.preCommit().get(5, TimeUnit.SECONDS);
851         cohort.commit().get(5, TimeUnit.SECONDS);
852
853         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
854     }
855
856     @SuppressWarnings("unchecked")
857     @Test
858     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
859         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
860         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
861
862         final Optional<ActorRef> carsFollowerShard =
863                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
864         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
865
866         carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
867         final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
868
869         // Send a tx with immediate commit.
870
871         DataTreeModification modification = dataTree.takeSnapshot().newModification();
872         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
873         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
874
875         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
876         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
877
878         ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
879                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
880                         Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
881                 Optional.empty());
882
883         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
884         Object resp = followerTestKit.expectMsgClass(Object.class);
885         if (resp instanceof akka.actor.Status.Failure) {
886             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
887         }
888
889         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
890
891         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
892
893         // Send another tx without immediate commit.
894
895         modification = dataTree.takeSnapshot().newModification();
896         MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
897         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
898
899         forwardedReady = new ForwardedReadyTransaction(tx2,
900                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
901                         Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
902                 Optional.empty());
903
904         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
905         resp = followerTestKit.expectMsgClass(Object.class);
906         if (resp instanceof akka.actor.Status.Failure) {
907             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
908         }
909
910         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
911
912         ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
913                 ((ReadyTransactionReply)resp).getCohortPath());
914
915         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
916         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
917         final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
918                 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
919                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
920         cohort.canCommit().get(5, TimeUnit.SECONDS);
921         cohort.preCommit().get(5, TimeUnit.SECONDS);
922         cohort.commit().get(5, TimeUnit.SECONDS);
923
924         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
925     }
926
927     @Test
928     public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
929         // FIXME: remove when test passes also for ClientBackedDataStore
930         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
931         followerDatastoreContextBuilder.shardBatchedModificationCount(2);
932         leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
933         initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
934
935         // Do an initial write to get the primary shard info cached.
936
937         final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
938         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
939         initialWriteTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
940         followerTestKit.doCommit(initialWriteTx.ready());
941
942         // Wait for the commit to be replicated to the follower.
943
944         MemberNode.verifyRaftState(followerDistributedDataStore, "cars",
945             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
946
947         MemberNode.verifyRaftState(followerDistributedDataStore, "people",
948             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
949
950         // Prepare, ready and canCommit a WO tx that writes to 2 shards. This will become the current tx in
951         // the leader shard.
952
953         final DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction();
954         writeTx1.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
955         writeTx1.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
956         final DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready();
957         final ListenableFuture<Boolean> writeTx1CanCommit = writeTx1Cohort.canCommit();
958         writeTx1CanCommit.get(5, TimeUnit.SECONDS);
959
960         // Prepare and ready another WO tx that writes to 2 shards but don't canCommit yet. This will be queued
961         // in the leader shard.
962
963         final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction();
964         final LinkedList<MapEntryNode> cars = new LinkedList<>();
965         int carIndex = 1;
966         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
967         writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
968         carIndex++;
969         NormalizedNode<?, ?> people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
970                 .withChild(PeopleModel.newPersonEntry("Dude")).build();
971         writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
972         final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
973
974         // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
975         // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
976         // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
977         // sent on ready.
978
979         final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
980         for (int i = 1; i <= 5; i++, carIndex++) {
981             cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
982             writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
983         }
984
985         // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
986         // message on ready.
987
988         final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
989         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
990         writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
991         carIndex++;
992
993         // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
994         // leader shard on ready.
995
996         final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
997         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
998         readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
999
1000         IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1001             stats -> assertEquals("getReadWriteTransactionCount", 5, stats.getReadWriteTransactionCount()));
1002
1003         // Disable elections on the leader so it switches to follower.
1004
1005         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1006                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
1007                 .shardElectionTimeoutFactor(10));
1008
1009         leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorUtils(), "cars");
1010
1011         // Submit all tx's - the messages should get queued for retry.
1012
1013         final ListenableFuture<Boolean> writeTx2CanCommit = writeTx2Cohort.canCommit();
1014         final DOMStoreThreePhaseCommitCohort writeTx3Cohort = writeTx3.ready();
1015         final DOMStoreThreePhaseCommitCohort writeTx4Cohort = writeTx4.ready();
1016         final DOMStoreThreePhaseCommitCohort rwTxCohort = readWriteTx.ready();
1017
1018         // Enable elections on the other follower so it becomes the leader, at which point the
1019         // tx's should get forwarded from the previous leader to the new leader to complete the commits.
1020
1021         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1022                 .customRaftPolicyImplementation(null).shardElectionTimeoutFactor(1));
1023         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "cars")
1024                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1025         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "people")
1026                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1027
1028         followerTestKit.doCommit(writeTx1CanCommit, writeTx1Cohort);
1029         followerTestKit.doCommit(writeTx2CanCommit, writeTx2Cohort);
1030         followerTestKit.doCommit(writeTx3Cohort);
1031         followerTestKit.doCommit(writeTx4Cohort);
1032         followerTestKit.doCommit(rwTxCohort);
1033
1034         DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
1035         verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
1036         verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
1037     }
1038
1039     @Test
1040     public void testLeadershipTransferOnShutdown() throws Exception {
1041         // FIXME: remove when test passes also for ClientBackedDataStore
1042         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1043         leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
1044         followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
1045         final String testName = "testLeadershipTransferOnShutdown";
1046         initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
1047
1048         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1049                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
1050                 commitTimeout);
1051         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1052                 testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
1053
1054             followerTestKit.waitForMembersUp("member-3");
1055             follower2TestKit.waitForMembersUp("member-1", "member-2");
1056
1057             // Create and submit a couple tx's so they're pending.
1058
1059             DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1060             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1061             writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1062             writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
1063             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
1064
1065             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1066                 stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
1067
1068             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1069             final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
1070             writeTx.write(CarsModel.newCarPath("optima"), car);
1071             final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
1072
1073             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1074                 stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
1075
1076             // Gracefully stop the leader via a Shutdown message.
1077
1078             sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1079                 .shardElectionTimeoutFactor(100));
1080
1081             final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
1082             final Future<ActorRef> future = leaderDistributedDataStore.getActorUtils().findLocalShardAsync("cars");
1083             final ActorRef leaderActor = Await.result(future, duration);
1084
1085             final Future<Boolean> stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE);
1086
1087             // Commit the 2 transactions. They should finish and succeed.
1088
1089             followerTestKit.doCommit(cohort1);
1090             followerTestKit.doCommit(cohort2);
1091
1092             // Wait for the leader actor stopped.
1093
1094             final Boolean stopped = Await.result(stopFuture, duration);
1095             assertEquals("Stopped", Boolean.TRUE, stopped);
1096
1097             // Verify leadership was transferred by reading the committed data from the other nodes.
1098
1099             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car);
1100             verifyCars(follower2DistributedDataStore.newReadOnlyTransaction(), car);
1101         }
1102     }
1103
1104     @Test
1105     public void testTransactionWithIsolatedLeader() throws Exception {
1106         // FIXME: remove when test passes also for ClientBackedDataStore
1107         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1108         // Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
1109         leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
1110         final String testName = "testTransactionWithIsolatedLeader";
1111         initDatastoresWithCars(testName);
1112
1113         // Tx that is submitted after the follower is stopped but before the leader transitions to IsolatedLeader.
1114         final DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1115         preIsolatedLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1116
1117         // Tx that is submitted after the leader transitions to IsolatedLeader.
1118         final DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1119         noShardLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1120
1121         // Tx that is submitted after the follower is reinstated.
1122         final DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1123         successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1124
1125         // Stop the follower
1126         followerTestKit.watch(followerDistributedDataStore.getActorUtils().getShardManager());
1127         followerDistributedDataStore.close();
1128         followerTestKit.expectTerminated(followerDistributedDataStore.getActorUtils().getShardManager());
1129
1130         // Submit the preIsolatedLeaderWriteTx so it's pending
1131         final DOMStoreThreePhaseCommitCohort preIsolatedLeaderTxCohort = preIsolatedLeaderWriteTx.ready();
1132
1133         // Change the isolated leader check interval low so it changes to IsolatedLeader.
1134         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1135                 .shardIsolatedLeaderCheckIntervalInMillis(200));
1136
1137         MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
1138             raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
1139
1140         try {
1141             leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
1142             fail("Expected NoShardLeaderException");
1143         } catch (final ExecutionException e) {
1144             assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
1145         }
1146
1147         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1148                 .shardElectionTimeoutFactor(100));
1149
1150         final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
1151
1152         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
1153                 testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
1154
1155         leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
1156         leaderTestKit.doCommit(successTxCohort);
1157     }
1158
1159     @Test
1160     public void testTransactionWithShardLeaderNotResponding() throws Exception {
1161         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1162         followerDatastoreContextBuilder.shardElectionTimeoutFactor(50);
1163         initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
1164
1165         // Do an initial read to get the primary shard info cached.
1166
1167         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1168         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1169
1170         // Shutdown the leader and try to create a new tx.
1171
1172         TestKit.shutdownActorSystem(leaderSystem, true);
1173
1174         followerDatastoreContextBuilder.operationTimeoutInMillis(50).shardElectionTimeoutFactor(1);
1175         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder);
1176
1177         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1178
1179         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1180
1181         try {
1182             followerTestKit.doCommit(rwTx.ready());
1183             fail("Exception expected");
1184         } catch (final ExecutionException e) {
1185             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1186             if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1187                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
1188                         || e.getCause() instanceof ShardLeaderNotRespondingException);
1189             } else {
1190                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1191             }
1192         }
1193     }
1194
1195     @Test
1196     public void testTransactionWithCreateTxFailureDueToNoLeader() throws Exception {
1197         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1198         initDatastoresWithCars("testTransactionWithCreateTxFailureDueToNoLeader");
1199
1200         // Do an initial read to get the primary shard info cached.
1201
1202         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1203         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1204
1205         // Shutdown the leader and try to create a new tx.
1206
1207         TestKit.shutdownActorSystem(leaderSystem, true);
1208
1209         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1210
1211         Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
1212
1213         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1214                 .operationTimeoutInMillis(10).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
1215
1216         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1217
1218         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1219
1220         try {
1221             followerTestKit.doCommit(rwTx.ready());
1222             fail("Exception expected");
1223         } catch (final ExecutionException e) {
1224             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1225             if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1226                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
1227             } else {
1228                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1229             }
1230         }
1231     }
1232
1233     @Test
1234     public void testTransactionRetryWithInitialAskTimeoutExOnCreateTx() throws Exception {
1235         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
1236         String testName = "testTransactionRetryWithInitialAskTimeoutExOnCreateTx";
1237         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1238
1239         final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1240                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1241         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1242                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1243
1244         try (AbstractDataStore ds =
1245                 follower2TestKit.setupAbstractDataStore(
1246                         testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
1247
1248             followerTestKit.waitForMembersUp("member-1", "member-3");
1249             follower2TestKit.waitForMembersUp("member-1", "member-2");
1250
1251             // Do an initial read to get the primary shard info cached.
1252
1253             final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1254             readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1255
1256             // Shutdown the leader and try to create a new tx.
1257
1258             TestKit.shutdownActorSystem(leaderSystem, true);
1259
1260             Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1261
1262             sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1263                 .operationTimeoutInMillis(500).shardElectionTimeoutFactor(5).customRaftPolicyImplementation(null));
1264
1265             final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1266
1267             rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1268
1269             followerTestKit.doCommit(rwTx.ready());
1270         }
1271     }
1272
1273     @Test
1274     public void testSemiReachableCandidateNotDroppingLeader() throws Exception {
1275         final String testName = "testSemiReachableCandidateNotDroppingLeader";
1276         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1277
1278         final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1279                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1280         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1281                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1282
1283         final AbstractDataStore ds2 =
1284                      follower2TestKit.setupAbstractDataStore(
1285                              testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
1286
1287         followerTestKit.waitForMembersUp("member-1", "member-3");
1288         follower2TestKit.waitForMembersUp("member-1", "member-2");
1289
1290         TestKit.shutdownActorSystem(follower2System);
1291
1292         ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1293         OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1294                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1295
1296         Cluster leaderCluster = Cluster.get(leaderSystem);
1297         Cluster followerCluster = Cluster.get(followerSystem);
1298         Cluster follower2Cluster = Cluster.get(follower2System);
1299
1300         Member follower2Member = follower2Cluster.readView().self();
1301
1302         await().atMost(10, TimeUnit.SECONDS)
1303                 .until(() -> containsUnreachable(leaderCluster, follower2Member));
1304         await().atMost(10, TimeUnit.SECONDS)
1305                 .until(() -> containsUnreachable(followerCluster, follower2Member));
1306
1307         ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1308
1309         // to simulate a follower not being able to receive messages, but still being able to send messages and becoming
1310         // candidate, we can just send a couple of RequestVotes to both leader and follower.
1311         cars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1312         followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1313         cars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1314         followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1315
1316         OnDemandRaftState stateAfter = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1317                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1318         OnDemandRaftState followerState = (OnDemandRaftState) followerDistributedDataStore.getActorUtils()
1319                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1320
1321         assertEquals(initialState.getCurrentTerm(), stateAfter.getCurrentTerm());
1322         assertEquals(initialState.getCurrentTerm(), followerState.getCurrentTerm());
1323
1324         ds2.close();
1325     }
1326
1327     private static Boolean containsUnreachable(final Cluster cluster, final Member member) {
1328         // unreachableMembers() returns scala.collection.immutable.Set, but we are using scala.collection.Set to fix JDT
1329         // see https://bugs.eclipse.org/bugs/show_bug.cgi?id=468276#c32
1330         final Set<Member> members = cluster.readView().unreachableMembers();
1331         return members.contains(member);
1332     }
1333
1334     @Test
1335     public void testInstallSnapshot() throws Exception {
1336         final String testName = "testInstallSnapshot";
1337         final String leaderCarShardName = "member-1-shard-cars-" + testName;
1338         final String followerCarShardName = "member-2-shard-cars-" + testName;
1339
1340         // Setup a saved snapshot on the leader. The follower will startup with no data and the leader should
1341         // install a snapshot to sync the follower.
1342
1343         DataTree tree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_CONFIGURATION,
1344             SchemaContextHelper.full());
1345
1346         final ContainerNode carsNode = CarsModel.newCarsNode(
1347                 CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
1348         AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
1349
1350         final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.empty());
1351         final Snapshot initialSnapshot = Snapshot.create(
1352                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
1353                 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
1354         InMemorySnapshotStore.addSnapshot(leaderCarShardName, initialSnapshot);
1355
1356         InMemorySnapshotStore.addSnapshotSavedLatch(leaderCarShardName);
1357         InMemorySnapshotStore.addSnapshotSavedLatch(followerCarShardName);
1358
1359         initDatastoresWithCars(testName);
1360
1361         final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
1362                 CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1363         assertTrue("isPresent", readOptional.isPresent());
1364         assertEquals("Node", carsNode, readOptional.get());
1365
1366         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
1367                 initialSnapshot, snapshotRoot);
1368
1369         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(followerCarShardName, Snapshot.class),
1370                 initialSnapshot, snapshotRoot);
1371     }
1372
1373     @Test
1374     public void testReadWriteMessageSlicing() throws Exception {
1375         // The slicing is only implemented for tell-based protocol
1376         Assume.assumeTrue(ClientBackedDataStore.class.isAssignableFrom(testParameter));
1377
1378         leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
1379         followerDatastoreContextBuilder.maximumMessageSliceSize(100);
1380         initDatastoresWithCars("testLargeReadReplySlicing");
1381
1382         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1383
1384         final NormalizedNode<?, ?> carsNode = CarsModel.create();
1385         rwTx.write(CarsModel.BASE_PATH, carsNode);
1386
1387         verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
1388     }
1389
1390     @SuppressWarnings("IllegalCatch")
1391     @Test
1392     public void testRaftCallbackDuringLeadershipDrop() throws Exception {
1393         final String testName = "testRaftCallbackDuringLeadershipDrop";
1394         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1395
1396         final ExecutorService executor = Executors.newSingleThreadExecutor();
1397
1398         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1399                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500)
1400                         .shardLeaderElectionTimeoutInSeconds(3600),
1401                 commitTimeout);
1402
1403         final DOMStoreWriteTransaction initialWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1404         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1405         leaderTestKit.doCommit(initialWriteTx.ready());
1406
1407         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1408                 testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false)) {
1409
1410             final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
1411                     .getLocalShards().get("cars").getActor();
1412             final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
1413                     .getLocalShards().get("cars").getActor();
1414             member2Cars.tell(new StartDropMessages(AppendEntries.class), null);
1415             member3Cars.tell(new StartDropMessages(AppendEntries.class), null);
1416
1417             final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1418             newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1419             final AtomicBoolean submitDone = new AtomicBoolean(false);
1420             executor.submit(() -> {
1421                 try {
1422                     leaderTestKit.doCommit(newTx.ready());
1423                     submitDone.set(true);
1424                 } catch (Exception e) {
1425                     throw new RuntimeException(e);
1426                 }
1427             });
1428             final ActorRef leaderCars = ((LocalShardStore) leaderDistributedDataStore).getLocalShards()
1429                     .getLocalShards().get("cars").getActor();
1430             await().atMost(10, TimeUnit.SECONDS)
1431                     .until(() -> ((OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1432                             .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE)).getLastIndex() >= 1);
1433
1434             final OnDemandRaftState raftState = (OnDemandRaftState)leaderDistributedDataStore.getActorUtils()
1435                     .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE);
1436
1437             // Simulate a follower not receiving heartbeats but still being able to send messages ie RequestVote with
1438             // new term(switching to candidate after election timeout)
1439             leaderCars.tell(new RequestVote(raftState.getCurrentTerm() + 1,
1440                     "member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
1441                             -1), member3Cars);
1442
1443             member2Cars.tell(new StopDropMessages(AppendEntries.class), null);
1444             member3Cars.tell(new StopDropMessages(AppendEntries.class), null);
1445
1446             await("Is tx stuck in COMMIT_PENDING")
1447                     .atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
1448
1449         }
1450
1451         executor.shutdownNow();
1452     }
1453
1454     @Test
1455     public void testSnapshotOnRootOverwrite() throws Exception {
1456         if (!DistributedDataStore.class.isAssignableFrom(testParameter)) {
1457             // FIXME: ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate
1458             return;
1459         }
1460
1461         final String testName = "testSnapshotOnRootOverwrite";
1462         final String[] shards = {"cars", "default"};
1463         initDatastores(testName, "module-shards-default-cars-member1-and-2.conf", shards,
1464                 leaderDatastoreContextBuilder.snapshotOnRootOverwrite(true),
1465                 followerDatastoreContextBuilder.snapshotOnRootOverwrite(true));
1466
1467         leaderTestKit.waitForMembersUp("member-2");
1468         final ContainerNode rootNode = ImmutableContainerNodeBuilder.create()
1469                 .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(SchemaContext.NAME))
1470                 .withChild((ContainerNode) CarsModel.create())
1471                 .build();
1472
1473         leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
1474
1475         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
1476             state -> assertEquals(1, state.getSnapshotIndex()));
1477
1478         IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
1479             state -> assertEquals(1, state.getSnapshotIndex()));
1480
1481         verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 1);
1482         verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
1483
1484         for (int i = 0; i < 10; i++) {
1485             leaderTestKit.testWriteTransaction(leaderDistributedDataStore, CarsModel.newCarPath("car " + i),
1486                     CarsModel.newCarEntry("car " + i, Uint64.ONE));
1487         }
1488
1489         // fake snapshot causes the snapshotIndex to move
1490         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
1491             state -> assertEquals(10, state.getSnapshotIndex()));
1492         IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
1493             state -> assertEquals(10, state.getSnapshotIndex()));
1494
1495         // however the real snapshot still has not changed and was taken at index 1
1496         verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 1);
1497         verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
1498
1499         // root overwrite so expect a snapshot
1500         leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.empty(), rootNode);
1501
1502         // this was a real snapshot so everything should be in it(1(DisableTrackingPayload) + 1 + 10 + 1)
1503         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
1504             state -> assertEquals(12, state.getSnapshotIndex()));
1505         IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
1506             state -> assertEquals(12, state.getSnapshotIndex()));
1507
1508         verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 12);
1509         verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 12);
1510     }
1511
1512     private void verifySnapshot(final String persistenceId, final long lastAppliedIndex) {
1513         await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
1514                 List<Snapshot> snap = InMemorySnapshotStore.getSnapshots(persistenceId, Snapshot.class);
1515                 assertEquals(1, snap.size());
1516                 assertEquals(lastAppliedIndex, snap.get(0).getLastAppliedIndex());
1517             }
1518         );
1519     }
1520
1521     private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
1522                                        final NormalizedNode<?, ?> expRoot) {
1523         assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
1524         assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
1525         assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
1526         assertEquals("Snapshot getLastIndex", expected.getLastIndex(), actual.getLastIndex());
1527         assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
1528         MetadataShardDataTreeSnapshot shardSnapshot =
1529                 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
1530         assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
1531     }
1532
1533     private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
1534         final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
1535         final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
1536         final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
1537         Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
1538         Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
1539         dataStore.onDatastoreContextUpdated(mockContextFactory);
1540     }
1541 }