0c74c71d832988b5959f1119da2c5e9016b98985
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / test / java / org / opendaylight / controller / cluster / datastore / DistributedDataStoreRemotingIntegrationTest.java
1 /*
2  * Copyright (c) 2015, 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8 package org.opendaylight.controller.cluster.datastore;
9
10 import static org.awaitility.Awaitility.await;
11 import static org.hamcrest.Matchers.equalTo;
12 import static org.junit.Assert.assertEquals;
13 import static org.junit.Assert.assertFalse;
14 import static org.junit.Assert.assertNotNull;
15 import static org.junit.Assert.assertTrue;
16 import static org.junit.Assert.fail;
17 import static org.mockito.ArgumentMatchers.any;
18 import static org.mockito.ArgumentMatchers.eq;
19 import static org.mockito.Mockito.timeout;
20 import static org.mockito.Mockito.verify;
21
22 import akka.actor.ActorRef;
23 import akka.actor.ActorSelection;
24 import akka.actor.ActorSystem;
25 import akka.actor.Address;
26 import akka.actor.AddressFromURIString;
27 import akka.cluster.Cluster;
28 import akka.cluster.Member;
29 import akka.dispatch.Futures;
30 import akka.pattern.Patterns;
31 import akka.testkit.javadsl.TestKit;
32 import com.google.common.base.Stopwatch;
33 import com.google.common.base.Throwables;
34 import com.google.common.collect.ImmutableMap;
35 import com.google.common.collect.Range;
36 import com.google.common.primitives.UnsignedLong;
37 import com.google.common.util.concurrent.ListenableFuture;
38 import com.google.common.util.concurrent.MoreExecutors;
39 import com.google.common.util.concurrent.Uninterruptibles;
40 import com.typesafe.config.ConfigFactory;
41 import java.util.Arrays;
42 import java.util.Collection;
43 import java.util.Collections;
44 import java.util.Iterator;
45 import java.util.LinkedList;
46 import java.util.List;
47 import java.util.Optional;
48 import java.util.Set;
49 import java.util.concurrent.ExecutionException;
50 import java.util.concurrent.ExecutorService;
51 import java.util.concurrent.Executors;
52 import java.util.concurrent.TimeUnit;
53 import java.util.concurrent.atomic.AtomicBoolean;
54 import java.util.concurrent.atomic.AtomicLong;
55 import java.util.function.Supplier;
56 import org.junit.After;
57 import org.junit.Assume;
58 import org.junit.Before;
59 import org.junit.Ignore;
60 import org.junit.Test;
61 import org.junit.runner.RunWith;
62 import org.junit.runners.Parameterized;
63 import org.junit.runners.Parameterized.Parameter;
64 import org.junit.runners.Parameterized.Parameters;
65 import org.mockito.Mockito;
66 import org.mockito.stubbing.Answer;
67 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
68 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
69 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
70 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
71 import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
72 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
73 import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
74 import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
75 import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
76 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
77 import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
78 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
79 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
80 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
81 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
82 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
83 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
84 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
85 import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
86 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
87 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
88 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
89 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
90 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
91 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
92 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
93 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
94 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
95 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
96 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
97 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
98 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
99 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
100 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
101 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
102 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
103 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
104 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
105 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
106 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
107 import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
108 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
109 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
110 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
111 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
112 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
113 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
114 import org.opendaylight.yangtools.yang.common.Uint64;
115 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
116 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
117 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
118 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
119 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
120 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
121 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
122 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
123 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
124 import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
125 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
126 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
127 import scala.concurrent.Await;
128 import scala.concurrent.Future;
129 import scala.concurrent.duration.FiniteDuration;
130
131 /**
132  * End-to-end distributed data store tests that exercise remote shards and transactions.
133  *
134  * @author Thomas Pantelis
135  */
136 @RunWith(Parameterized.class)
137 public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
138
139     @Parameters(name = "{0}")
140     public static Collection<Object[]> data() {
141         return Arrays.asList(new Object[][] {
142                 { TestDistributedDataStore.class, 7}, { TestClientBackedDataStore.class, 12 }
143         });
144     }
145
146     @Parameter(0)
147     public Class<? extends AbstractDataStore> testParameter;
148     @Parameter(1)
149     public int commitTimeout;
150
151     private static final String[] CARS_AND_PEOPLE = {"cars", "people"};
152     private static final String[] CARS = {"cars"};
153
154     private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
155             "akka://cluster-test@127.0.0.1:2558");
156     private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
157             "akka://cluster-test@127.0.0.1:2559");
158
159     private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
160     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
161     private static final String MODULE_SHARDS_CARS_PEOPLE_1_2_3 = "module-shards-member1-and-2-and-3.conf";
162     private static final String MODULE_SHARDS_CARS_1_2_3 = "module-shards-cars-member-1-and-2-and-3.conf";
163
164     private ActorSystem leaderSystem;
165     private ActorSystem followerSystem;
166     private ActorSystem follower2System;
167
168     private final DatastoreContext.Builder leaderDatastoreContextBuilder =
169             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
170
171     private final DatastoreContext.Builder followerDatastoreContextBuilder =
172             DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
173                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
174     private final TransactionIdentifier tx1 = nextTransactionId();
175     private final TransactionIdentifier tx2 = nextTransactionId();
176
177     private AbstractDataStore followerDistributedDataStore;
178     private AbstractDataStore leaderDistributedDataStore;
179     private IntegrationTestKit followerTestKit;
180     private IntegrationTestKit leaderTestKit;
181
182     @Before
183     public void setUp() {
184         InMemoryJournal.clear();
185         InMemorySnapshotStore.clear();
186
187         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
188         Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
189
190         followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
191         Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
192
193         follower2System = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member3"));
194         Cluster.get(follower2System).join(MEMBER_1_ADDRESS);
195     }
196
197     @After
198     public void tearDown() {
199         if (followerDistributedDataStore != null) {
200             leaderDistributedDataStore.close();
201         }
202         if (leaderDistributedDataStore != null) {
203             leaderDistributedDataStore.close();
204         }
205
206         TestKit.shutdownActorSystem(leaderSystem);
207         TestKit.shutdownActorSystem(followerSystem);
208         TestKit.shutdownActorSystem(follower2System);
209
210         InMemoryJournal.clear();
211         InMemorySnapshotStore.clear();
212     }
213
214     private void initDatastoresWithCars(final String type) throws Exception {
215         initDatastores(type, MODULE_SHARDS_CARS_ONLY_1_2, CARS);
216     }
217
218     private void initDatastoresWithCarsAndPeople(final String type) throws Exception {
219         initDatastores(type, MODULE_SHARDS_CARS_PEOPLE_1_2, CARS_AND_PEOPLE);
220     }
221
222     private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards)
223             throws Exception {
224         leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder, commitTimeout);
225
226         leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
227                 testParameter, type, moduleShardsConfig, false, shards);
228
229         followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder, commitTimeout);
230         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
231                 testParameter, type, moduleShardsConfig, false, shards);
232
233         leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
234
235         leaderTestKit.waitForMembersUp("member-2");
236         followerTestKit.waitForMembersUp("member-1");
237     }
238
239     private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
240             throws Exception {
241         final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
242         assertTrue("isPresent", optional.isPresent());
243
244         final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
245                 CarsModel.CAR_QNAME);
246         for (final NormalizedNode<?, ?> entry: entries) {
247             listBuilder.withChild((MapEntryNode) entry);
248         }
249
250         assertEquals("Car list node", listBuilder.build(), optional.get());
251     }
252
253     private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
254             final NormalizedNode<?, ?> expNode) throws Exception {
255         final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
256         assertTrue("isPresent", optional.isPresent());
257         assertEquals("Data node", expNode, optional.get());
258     }
259
260     private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
261             throws Exception {
262         final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
263         assertEquals("exists", Boolean.TRUE, exists);
264     }
265
266     @Test
267     public void testWriteTransactionWithSingleShard() throws Exception {
268         final String testName = "testWriteTransactionWithSingleShard";
269         initDatastoresWithCars(testName);
270
271         final String followerCarShardName = "member-2-shard-cars-" + testName;
272
273         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
274         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
275
276         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
277         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
278
279         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
280         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
281         writeTx.merge(car1Path, car1);
282
283         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
284         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
285         writeTx.merge(car2Path, car2);
286
287         followerTestKit.doCommit(writeTx.ready());
288
289         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
290
291         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
292
293         // Test delete
294
295         writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
296
297         writeTx.delete(car1Path);
298
299         followerTestKit.doCommit(writeTx.ready());
300
301         verifyExists(followerDistributedDataStore.newReadOnlyTransaction(), car2Path);
302
303         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
304
305         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car2);
306
307         // Re-instate the follower member 2 as a single-node to verify replication and recovery.
308
309         // The following is a bit tricky. Before we reinstate the follower we need to ensure it has persisted and
310         // applied and all the log entries from the leader. Since we've verified the car data above we know that
311         // all the transactions have been applied on the leader so we first read and capture its lastAppliedIndex.
312         final AtomicLong leaderLastAppliedIndex = new AtomicLong();
313         IntegrationTestKit.verifyShardState(leaderDistributedDataStore, CARS[0],
314             state -> leaderLastAppliedIndex.set(state.getLastApplied()));
315
316         // Now we need to make sure the follower has persisted the leader's lastAppliedIndex via ApplyJournalEntries.
317         // However we don't know exactly how many ApplyJournalEntries messages there will be as it can differ between
318         // the tell-based and ask-based front-ends. For ask-based there will be exactly 2 ApplyJournalEntries but
319         // tell-based persists additional payloads which could be replicated and applied in a batch resulting in
320         // either 2 or 3 ApplyJournalEntries. To handle this we read the follower's persisted ApplyJournalEntries
321         // until we find the one that encompasses the leader's lastAppliedIndex.
322         Stopwatch sw = Stopwatch.createStarted();
323         boolean done = false;
324         while (!done) {
325             final List<ApplyJournalEntries> entries = InMemoryJournal.get(followerCarShardName,
326                     ApplyJournalEntries.class);
327             for (ApplyJournalEntries aje: entries) {
328                 if (aje.getToIndex() >= leaderLastAppliedIndex.get()) {
329                     done = true;
330                     break;
331                 }
332             }
333
334             assertTrue("Follower did not persist ApplyJournalEntries containing leader's lastAppliedIndex "
335                     + leaderLastAppliedIndex + ". Entries persisted: " + entries, sw.elapsed(TimeUnit.SECONDS) <= 5);
336
337             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
338         }
339
340         TestKit.shutdownActorSystem(leaderSystem, true);
341         TestKit.shutdownActorSystem(followerSystem, true);
342
343         final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
344
345         try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
346                 commitTimeout)
347                 .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
348             verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
349         }
350     }
351
352     @Test
353     public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
354         final String testName = "testWriteTransactionWithSingleShard";
355         initDatastoresWithCars(testName);
356
357         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
358
359         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
360         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
361         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
362         followerTestKit.doCommit(writeTx.ready());
363
364         int numCars = 5;
365         for (int i = 0; i < numCars; i++) {
366             writeTx = txChain.newWriteOnlyTransaction();
367             writeTx.write(CarsModel.newCarPath("car" + i),
368                     CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
369
370             followerTestKit.doCommit(writeTx.ready());
371
372             DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
373             domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
374
375             domStoreReadTransaction.close();
376         }
377
378         // wait to let the shard catch up with purged
379         await("Range set leak test").atMost(5, TimeUnit.SECONDS)
380                 .pollInterval(500, TimeUnit.MILLISECONDS)
381                 .untilAsserted(() -> {
382                     Optional<ActorRef> localShard =
383                             leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
384                     FrontendShardDataTreeSnapshotMetadata frontendMetadata =
385                             (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
386                                     .executeOperation(localShard.get(), new RequestFrontendMetadata());
387
388                     if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
389                         Iterator<FrontendHistoryMetadata> iterator =
390                                 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
391                         FrontendHistoryMetadata metadata = iterator.next();
392                         while (iterator.hasNext() && metadata.getHistoryId() != 1) {
393                             metadata = iterator.next();
394                         }
395
396                         assertEquals(0, metadata.getClosedTransactions().size());
397                         assertEquals(Range.closedOpen(UnsignedLong.valueOf(0), UnsignedLong.valueOf(11)),
398                                 metadata.getPurgedTransactions().asRanges().iterator().next());
399                     } else {
400                         // ask based should track no metadata
401                         assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
402                     }
403                 });
404
405         final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
406                 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
407         assertTrue("isPresent", optional.isPresent());
408         assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
409     }
410
411     @Test
412     @Ignore("Flushes out tell based leak needs to be handled separately")
413     public void testCloseTransactionMetadataLeak() throws Exception {
414         // Ask based frontend seems to have some issues with back to back close
415         Assume.assumeTrue(testParameter.isAssignableFrom(TestClientBackedDataStore.class));
416
417         final String testName = "testWriteTransactionWithSingleShard";
418         initDatastoresWithCars(testName);
419
420         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
421
422         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
423         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
424         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
425         followerTestKit.doCommit(writeTx.ready());
426
427         int numCars = 5;
428         for (int i = 0; i < numCars; i++) {
429             writeTx = txChain.newWriteOnlyTransaction();
430             writeTx.close();
431
432             DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
433             domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
434
435             domStoreReadTransaction.close();
436         }
437
438         writeTx = txChain.newWriteOnlyTransaction();
439         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
440         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
441         followerTestKit.doCommit(writeTx.ready());
442
443         // wait to let the shard catch up with purged
444         await("Close transaction purge leak test.").atMost(5, TimeUnit.SECONDS)
445                 .pollInterval(500, TimeUnit.MILLISECONDS)
446                 .untilAsserted(() -> {
447                     Optional<ActorRef> localShard =
448                             leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
449                     FrontendShardDataTreeSnapshotMetadata frontendMetadata =
450                             (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
451                                     .executeOperation(localShard.get(), new RequestFrontendMetadata());
452
453                     if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
454                         Iterator<FrontendHistoryMetadata> iterator =
455                                 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
456                         FrontendHistoryMetadata metadata = iterator.next();
457                         while (iterator.hasNext() && metadata.getHistoryId() != 1) {
458                             metadata = iterator.next();
459                         }
460
461                         Set<Range<UnsignedLong>> ranges = metadata.getPurgedTransactions().asRanges();
462
463                         assertEquals(0, metadata.getClosedTransactions().size());
464                         assertEquals(1, ranges.size());
465                     } else {
466                         // ask based should track no metadata
467                         assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
468                     }
469                 });
470
471         final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
472                 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
473         assertTrue("isPresent", optional.isPresent());
474         assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
475     }
476
477     @Test
478     public void testReadWriteTransactionWithSingleShard() throws Exception {
479         initDatastoresWithCars("testReadWriteTransactionWithSingleShard");
480
481         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
482         assertNotNull("newReadWriteTransaction returned null", rwTx);
483
484         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
485         rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
486
487         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
488         rwTx.merge(CarsModel.newCarPath("optima"), car1);
489
490         verifyCars(rwTx, car1);
491
492         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
493         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
494         rwTx.merge(car2Path, car2);
495
496         verifyExists(rwTx, car2Path);
497
498         followerTestKit.doCommit(rwTx.ready());
499
500         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
501     }
502
503     @Test
504     public void testWriteTransactionWithMultipleShards() throws Exception {
505         initDatastoresWithCarsAndPeople("testWriteTransactionWithMultipleShards");
506
507         final DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
508         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
509
510         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
511         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
512         writeTx.write(carsPath, carsNode);
513
514         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
515         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
516         writeTx.write(peoplePath, peopleNode);
517
518         followerTestKit.doCommit(writeTx.ready());
519
520         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
521
522         verifyNode(readTx, carsPath, carsNode);
523         verifyNode(readTx, peoplePath, peopleNode);
524     }
525
526     @Test
527     public void testReadWriteTransactionWithMultipleShards() throws Exception {
528         initDatastoresWithCarsAndPeople("testReadWriteTransactionWithMultipleShards");
529
530         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
531         assertNotNull("newReadWriteTransaction returned null", rwTx);
532
533         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
534         final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
535         rwTx.write(carsPath, carsNode);
536
537         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
538         final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
539         rwTx.write(peoplePath, peopleNode);
540
541         followerTestKit.doCommit(rwTx.ready());
542
543         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
544
545         verifyNode(readTx, carsPath, carsNode);
546         verifyNode(readTx, peoplePath, peopleNode);
547     }
548
549     @Test
550     public void testTransactionChainWithSingleShard() throws Exception {
551         initDatastoresWithCars("testTransactionChainWithSingleShard");
552
553         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
554
555         // Add the top-level cars container with write-only.
556
557         final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
558         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
559
560         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
561
562         final DOMStoreThreePhaseCommitCohort writeTxReady = writeTx.ready();
563
564         // Verify the top-level cars container with read-only.
565
566         verifyNode(txChain.newReadOnlyTransaction(), CarsModel.BASE_PATH, CarsModel.emptyContainer());
567
568         // Perform car operations with read-write.
569
570         final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
571
572         verifyNode(rwTx, CarsModel.BASE_PATH, CarsModel.emptyContainer());
573
574         rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
575
576         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
577         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
578         rwTx.write(car1Path, car1);
579
580         verifyExists(rwTx, car1Path);
581
582         verifyCars(rwTx, car1);
583
584         final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
585         rwTx.merge(CarsModel.newCarPath("sportage"), car2);
586
587         rwTx.delete(car1Path);
588
589         followerTestKit.doCommit(writeTxReady);
590
591         followerTestKit.doCommit(rwTx.ready());
592
593         txChain.close();
594
595         verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
596     }
597
598     @Test
599     public void testTransactionChainWithMultipleShards() throws Exception {
600         initDatastoresWithCarsAndPeople("testTransactionChainWithMultipleShards");
601
602         final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
603
604         DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
605         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
606
607         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
608         writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
609
610         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
611         writeTx.write(PeopleModel.PERSON_LIST_PATH, PeopleModel.newPersonMapNode());
612
613         followerTestKit.doCommit(writeTx.ready());
614
615         final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
616
617         final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
618         final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
619         readWriteTx.write(carPath, car);
620
621         final MapEntryNode person = PeopleModel.newPersonEntry("jack");
622         final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
623         readWriteTx.merge(personPath, person);
624
625         Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
626         assertTrue("isPresent", optional.isPresent());
627         assertEquals("Data node", car, optional.get());
628
629         optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
630         assertTrue("isPresent", optional.isPresent());
631         assertEquals("Data node", person, optional.get());
632
633         final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
634
635         writeTx = txChain.newWriteOnlyTransaction();
636
637         writeTx.delete(personPath);
638
639         final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready();
640
641         followerTestKit.doCommit(cohort2);
642         followerTestKit.doCommit(cohort3);
643
644         txChain.close();
645
646         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
647         verifyCars(readTx, car);
648
649         optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
650         assertFalse("isPresent", optional.isPresent());
651     }
652
653     @Test
654     public void testChainedTransactionFailureWithSingleShard() throws Exception {
655         initDatastoresWithCars("testChainedTransactionFailureWithSingleShard");
656
657         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
658                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
659                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
660                         MoreExecutors.directExecutor());
661
662         final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
663         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
664
665         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
666
667         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
668                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
669                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
670
671         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
672
673         try {
674             writeTx.commit().get(5, TimeUnit.SECONDS);
675             fail("Expected TransactionCommitFailedException");
676         } catch (final ExecutionException e) {
677             // Expected
678         }
679
680         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
681
682         txChain.close();
683         broker.close();
684     }
685
686     @Test
687     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
688         initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
689
690         final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
691                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
692                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
693                         MoreExecutors.directExecutor());
694
695         final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
696         final DOMTransactionChain txChain = broker.createTransactionChain(listener);
697
698         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
699
700         writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
701
702         final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
703                 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
704                     .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
705
706         // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
707         // done for put for performance reasons.
708         writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
709
710         try {
711             writeTx.commit().get(5, TimeUnit.SECONDS);
712             fail("Expected TransactionCommitFailedException");
713         } catch (final ExecutionException e) {
714             // Expected
715         }
716
717         verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
718
719         txChain.close();
720         broker.close();
721     }
722
723     @Test
724     public void testSingleShardTransactionsWithLeaderChanges() throws Exception {
725         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
726         final String testName = "testSingleShardTransactionsWithLeaderChanges";
727         initDatastoresWithCars(testName);
728
729         final String followerCarShardName = "member-2-shard-cars-" + testName;
730         InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 1, ApplyJournalEntries.class);
731
732         // Write top-level car container from the follower so it uses a remote Tx.
733
734         DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
735
736         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
737         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
738
739         followerTestKit.doCommit(writeTx.ready());
740
741         InMemoryJournal.waitForWriteMessagesComplete(followerCarShardName);
742
743         // Switch the leader to the follower
744
745         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
746                 .shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
747
748         TestKit.shutdownActorSystem(leaderSystem, true);
749         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
750
751         followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorUtils(), CARS);
752
753         leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
754         Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS);
755
756         final DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder()
757                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
758         IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
759
760         try (AbstractDataStore ds =
761                 newMember1TestKit.setupAbstractDataStore(
762                         testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
763
764             followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
765
766             // Write a car entry to the new leader - should switch to local Tx
767
768             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
769
770             MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
771             YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
772             writeTx.merge(car1Path, car1);
773
774             followerTestKit.doCommit(writeTx.ready());
775
776             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1);
777         }
778     }
779
780     @SuppressWarnings("unchecked")
781     @Test
782     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
783         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
784         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
785
786         final Optional<ActorRef> carsFollowerShard =
787                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
788         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
789
790         final DataTree dataTree = new InMemoryDataTreeFactory().create(
791             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
792
793         // Send a tx with immediate commit.
794
795         DataTreeModification modification = dataTree.takeSnapshot().newModification();
796         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
797         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
798
799         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
800         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
801         modification.ready();
802
803         ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
804
805         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
806         Object resp = followerTestKit.expectMsgClass(Object.class);
807         if (resp instanceof akka.actor.Status.Failure) {
808             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
809         }
810
811         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
812
813         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
814
815         // Send another tx without immediate commit.
816
817         modification = dataTree.takeSnapshot().newModification();
818         MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
819         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
820         modification.ready();
821
822         readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
823
824         carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
825         resp = followerTestKit.expectMsgClass(Object.class);
826         if (resp instanceof akka.actor.Status.Failure) {
827             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
828         }
829
830         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
831
832         final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
833                 ((ReadyTransactionReply)resp).getCohortPath());
834
835         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
836         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
837         ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
838                 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
839                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
840         cohort.canCommit().get(5, TimeUnit.SECONDS);
841         cohort.preCommit().get(5, TimeUnit.SECONDS);
842         cohort.commit().get(5, TimeUnit.SECONDS);
843
844         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
845     }
846
847     @SuppressWarnings("unchecked")
848     @Test
849     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
850         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
851         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
852
853         final Optional<ActorRef> carsFollowerShard =
854                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
855         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
856
857         carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
858         final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
859
860         // Send a tx with immediate commit.
861
862         DataTreeModification modification = dataTree.takeSnapshot().newModification();
863         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
864         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
865
866         final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
867         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
868
869         ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
870                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
871                         Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
872                 Optional.empty());
873
874         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
875         Object resp = followerTestKit.expectMsgClass(Object.class);
876         if (resp instanceof akka.actor.Status.Failure) {
877             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
878         }
879
880         assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
881
882         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
883
884         // Send another tx without immediate commit.
885
886         modification = dataTree.takeSnapshot().newModification();
887         MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
888         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
889
890         forwardedReady = new ForwardedReadyTransaction(tx2,
891                 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
892                         Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
893                 Optional.empty());
894
895         carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
896         resp = followerTestKit.expectMsgClass(Object.class);
897         if (resp instanceof akka.actor.Status.Failure) {
898             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
899         }
900
901         assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
902
903         ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
904                 ((ReadyTransactionReply)resp).getCohortPath());
905
906         final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
907         Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
908         final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
909                 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
910                         new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
911         cohort.canCommit().get(5, TimeUnit.SECONDS);
912         cohort.preCommit().get(5, TimeUnit.SECONDS);
913         cohort.commit().get(5, TimeUnit.SECONDS);
914
915         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
916     }
917
918     @Test
919     public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
920         // FIXME: remove when test passes also for ClientBackedDataStore
921         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
922         followerDatastoreContextBuilder.shardBatchedModificationCount(2);
923         leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
924         initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
925
926         // Do an initial write to get the primary shard info cached.
927
928         final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
929         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
930         initialWriteTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
931         followerTestKit.doCommit(initialWriteTx.ready());
932
933         // Wait for the commit to be replicated to the follower.
934
935         MemberNode.verifyRaftState(followerDistributedDataStore, "cars",
936             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
937
938         MemberNode.verifyRaftState(followerDistributedDataStore, "people",
939             raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
940
941         // Prepare, ready and canCommit a WO tx that writes to 2 shards. This will become the current tx in
942         // the leader shard.
943
944         final DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction();
945         writeTx1.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
946         writeTx1.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
947         final DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready();
948         final ListenableFuture<Boolean> writeTx1CanCommit = writeTx1Cohort.canCommit();
949         writeTx1CanCommit.get(5, TimeUnit.SECONDS);
950
951         // Prepare and ready another WO tx that writes to 2 shards but don't canCommit yet. This will be queued
952         // in the leader shard.
953
954         final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction();
955         final LinkedList<MapEntryNode> cars = new LinkedList<>();
956         int carIndex = 1;
957         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
958         writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
959         carIndex++;
960         NormalizedNode<?, ?> people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
961                 .withChild(PeopleModel.newPersonEntry("Dude")).build();
962         writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
963         final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
964
965         // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
966         // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
967         // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
968         // sent on ready.
969
970         final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
971         for (int i = 1; i <= 5; i++, carIndex++) {
972             cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
973             writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
974         }
975
976         // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
977         // message on ready.
978
979         final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
980         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
981         writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
982         carIndex++;
983
984         // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
985         // leader shard on ready.
986
987         final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
988         cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
989         readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
990
991         IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
992             stats -> assertEquals("getReadWriteTransactionCount", 5, stats.getReadWriteTransactionCount()));
993
994         // Disable elections on the leader so it switches to follower.
995
996         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
997                 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
998                 .shardElectionTimeoutFactor(10));
999
1000         leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorUtils(), "cars");
1001
1002         // Submit all tx's - the messages should get queued for retry.
1003
1004         final ListenableFuture<Boolean> writeTx2CanCommit = writeTx2Cohort.canCommit();
1005         final DOMStoreThreePhaseCommitCohort writeTx3Cohort = writeTx3.ready();
1006         final DOMStoreThreePhaseCommitCohort writeTx4Cohort = writeTx4.ready();
1007         final DOMStoreThreePhaseCommitCohort rwTxCohort = readWriteTx.ready();
1008
1009         // Enable elections on the other follower so it becomes the leader, at which point the
1010         // tx's should get forwarded from the previous leader to the new leader to complete the commits.
1011
1012         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1013                 .customRaftPolicyImplementation(null).shardElectionTimeoutFactor(1));
1014         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "cars")
1015                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1016         IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "people")
1017                 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1018
1019         followerTestKit.doCommit(writeTx1CanCommit, writeTx1Cohort);
1020         followerTestKit.doCommit(writeTx2CanCommit, writeTx2Cohort);
1021         followerTestKit.doCommit(writeTx3Cohort);
1022         followerTestKit.doCommit(writeTx4Cohort);
1023         followerTestKit.doCommit(rwTxCohort);
1024
1025         DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
1026         verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
1027         verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
1028     }
1029
1030     @Test
1031     public void testLeadershipTransferOnShutdown() throws Exception {
1032         // FIXME: remove when test passes also for ClientBackedDataStore
1033         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1034         leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
1035         followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
1036         final String testName = "testLeadershipTransferOnShutdown";
1037         initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
1038
1039         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1040                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
1041                 commitTimeout);
1042         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1043                 testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
1044
1045             followerTestKit.waitForMembersUp("member-3");
1046             follower2TestKit.waitForMembersUp("member-1", "member-2");
1047
1048             // Create and submit a couple tx's so they're pending.
1049
1050             DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1051             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1052             writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1053             writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
1054             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
1055
1056             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1057                 stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
1058
1059             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1060             final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
1061             writeTx.write(CarsModel.newCarPath("optima"), car);
1062             final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
1063
1064             IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1065                 stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
1066
1067             // Gracefully stop the leader via a Shutdown message.
1068
1069             sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1070                 .shardElectionTimeoutFactor(100));
1071
1072             final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
1073             final Future<ActorRef> future = leaderDistributedDataStore.getActorUtils().findLocalShardAsync("cars");
1074             final ActorRef leaderActor = Await.result(future, duration);
1075
1076             final Future<Boolean> stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE);
1077
1078             // Commit the 2 transactions. They should finish and succeed.
1079
1080             followerTestKit.doCommit(cohort1);
1081             followerTestKit.doCommit(cohort2);
1082
1083             // Wait for the leader actor stopped.
1084
1085             final Boolean stopped = Await.result(stopFuture, duration);
1086             assertEquals("Stopped", Boolean.TRUE, stopped);
1087
1088             // Verify leadership was transferred by reading the committed data from the other nodes.
1089
1090             verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car);
1091             verifyCars(follower2DistributedDataStore.newReadOnlyTransaction(), car);
1092         }
1093     }
1094
1095     @Test
1096     public void testTransactionWithIsolatedLeader() throws Exception {
1097         // FIXME: remove when test passes also for ClientBackedDataStore
1098         Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1099         // Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
1100         leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
1101         final String testName = "testTransactionWithIsolatedLeader";
1102         initDatastoresWithCars(testName);
1103
1104         // Tx that is submitted after the follower is stopped but before the leader transitions to IsolatedLeader.
1105         final DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1106         preIsolatedLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1107
1108         // Tx that is submitted after the leader transitions to IsolatedLeader.
1109         final DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1110         noShardLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1111
1112         // Tx that is submitted after the follower is reinstated.
1113         final DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1114         successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1115
1116         // Stop the follower
1117         followerTestKit.watch(followerDistributedDataStore.getActorUtils().getShardManager());
1118         followerDistributedDataStore.close();
1119         followerTestKit.expectTerminated(followerDistributedDataStore.getActorUtils().getShardManager());
1120
1121         // Submit the preIsolatedLeaderWriteTx so it's pending
1122         final DOMStoreThreePhaseCommitCohort preIsolatedLeaderTxCohort = preIsolatedLeaderWriteTx.ready();
1123
1124         // Change the isolated leader check interval low so it changes to IsolatedLeader.
1125         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1126                 .shardIsolatedLeaderCheckIntervalInMillis(200));
1127
1128         MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
1129             raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
1130
1131         try {
1132             leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
1133             fail("Expected NoShardLeaderException");
1134         } catch (final ExecutionException e) {
1135             assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
1136         }
1137
1138         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1139                 .shardElectionTimeoutFactor(100));
1140
1141         final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
1142
1143         followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
1144                 testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
1145
1146         leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
1147         leaderTestKit.doCommit(successTxCohort);
1148     }
1149
1150     @Test
1151     public void testTransactionWithShardLeaderNotResponding() throws Exception {
1152         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1153         followerDatastoreContextBuilder.shardElectionTimeoutFactor(50);
1154         initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
1155
1156         // Do an initial read to get the primary shard info cached.
1157
1158         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1159         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1160
1161         // Shutdown the leader and try to create a new tx.
1162
1163         TestKit.shutdownActorSystem(leaderSystem, true);
1164
1165         followerDatastoreContextBuilder.operationTimeoutInMillis(50).shardElectionTimeoutFactor(1);
1166         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder);
1167
1168         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1169
1170         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1171
1172         try {
1173             followerTestKit.doCommit(rwTx.ready());
1174             fail("Exception expected");
1175         } catch (final ExecutionException e) {
1176             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1177             if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1178                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
1179                         || e.getCause() instanceof ShardLeaderNotRespondingException);
1180             } else {
1181                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1182             }
1183         }
1184     }
1185
1186     @Test
1187     public void testTransactionWithCreateTxFailureDueToNoLeader() throws Exception {
1188         followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1189         initDatastoresWithCars("testTransactionWithCreateTxFailureDueToNoLeader");
1190
1191         // Do an initial read to get the primary shard info cached.
1192
1193         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1194         readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1195
1196         // Shutdown the leader and try to create a new tx.
1197
1198         TestKit.shutdownActorSystem(leaderSystem, true);
1199
1200         Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1201
1202         Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
1203
1204         sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1205                 .operationTimeoutInMillis(10).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
1206
1207         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1208
1209         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1210
1211         try {
1212             followerTestKit.doCommit(rwTx.ready());
1213             fail("Exception expected");
1214         } catch (final ExecutionException e) {
1215             final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1216             if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1217                 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
1218             } else {
1219                 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1220             }
1221         }
1222     }
1223
1224     @Test
1225     public void testTransactionRetryWithInitialAskTimeoutExOnCreateTx() throws Exception {
1226         followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
1227         String testName = "testTransactionRetryWithInitialAskTimeoutExOnCreateTx";
1228         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1229
1230         final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1231                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1232         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1233                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1234
1235         try (AbstractDataStore ds =
1236                 follower2TestKit.setupAbstractDataStore(
1237                         testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
1238
1239             followerTestKit.waitForMembersUp("member-1", "member-3");
1240             follower2TestKit.waitForMembersUp("member-1", "member-2");
1241
1242             // Do an initial read to get the primary shard info cached.
1243
1244             final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1245             readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1246
1247             // Shutdown the leader and try to create a new tx.
1248
1249             TestKit.shutdownActorSystem(leaderSystem, true);
1250
1251             Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1252
1253             sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1254                 .operationTimeoutInMillis(500).shardElectionTimeoutFactor(5).customRaftPolicyImplementation(null));
1255
1256             final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1257
1258             rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1259
1260             followerTestKit.doCommit(rwTx.ready());
1261         }
1262     }
1263
1264     @Test
1265     public void testSemiReachableCandidateNotDroppingLeader() throws Exception {
1266         final String testName = "testSemiReachableCandidateNotDroppingLeader";
1267         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1268
1269         final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1270                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1271         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1272                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1273
1274         final AbstractDataStore ds2 =
1275                      follower2TestKit.setupAbstractDataStore(
1276                              testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
1277
1278         followerTestKit.waitForMembersUp("member-1", "member-3");
1279         follower2TestKit.waitForMembersUp("member-1", "member-2");
1280
1281         TestKit.shutdownActorSystem(follower2System);
1282
1283         ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1284         OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1285                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1286
1287         Cluster leaderCluster = Cluster.get(leaderSystem);
1288         Cluster followerCluster = Cluster.get(followerSystem);
1289         Cluster follower2Cluster = Cluster.get(follower2System);
1290
1291         Member follower2Member = follower2Cluster.readView().self();
1292
1293         await().atMost(10, TimeUnit.SECONDS)
1294                 .until(() -> leaderCluster.readView().unreachableMembers().contains(follower2Member));
1295         await().atMost(10, TimeUnit.SECONDS)
1296                 .until(() -> followerCluster.readView().unreachableMembers().contains(follower2Member));
1297
1298         ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1299
1300         // to simulate a follower not being able to receive messages, but still being able to send messages and becoming
1301         // candidate, we can just send a couple of RequestVotes to both leader and follower.
1302         cars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1303         followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1304         cars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1305         followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1306
1307         OnDemandRaftState stateAfter = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1308                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1309         OnDemandRaftState followerState = (OnDemandRaftState) followerDistributedDataStore.getActorUtils()
1310                 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1311
1312         assertEquals(initialState.getCurrentTerm(), stateAfter.getCurrentTerm());
1313         assertEquals(initialState.getCurrentTerm(), followerState.getCurrentTerm());
1314
1315         ds2.close();
1316     }
1317
1318     @Test
1319     public void testInstallSnapshot() throws Exception {
1320         final String testName = "testInstallSnapshot";
1321         final String leaderCarShardName = "member-1-shard-cars-" + testName;
1322         final String followerCarShardName = "member-2-shard-cars-" + testName;
1323
1324         // Setup a saved snapshot on the leader. The follower will startup with no data and the leader should
1325         // install a snapshot to sync the follower.
1326
1327         DataTree tree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_CONFIGURATION,
1328             SchemaContextHelper.full());
1329
1330         final ContainerNode carsNode = CarsModel.newCarsNode(
1331                 CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
1332         AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
1333
1334         final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.empty());
1335         final Snapshot initialSnapshot = Snapshot.create(
1336                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
1337                 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
1338         InMemorySnapshotStore.addSnapshot(leaderCarShardName, initialSnapshot);
1339
1340         InMemorySnapshotStore.addSnapshotSavedLatch(leaderCarShardName);
1341         InMemorySnapshotStore.addSnapshotSavedLatch(followerCarShardName);
1342
1343         initDatastoresWithCars(testName);
1344
1345         final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
1346                 CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1347         assertTrue("isPresent", readOptional.isPresent());
1348         assertEquals("Node", carsNode, readOptional.get());
1349
1350         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
1351                 initialSnapshot, snapshotRoot);
1352
1353         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(followerCarShardName, Snapshot.class),
1354                 initialSnapshot, snapshotRoot);
1355     }
1356
1357     @Test
1358     public void testReadWriteMessageSlicing() throws Exception {
1359         // The slicing is only implemented for tell-based protocol
1360         Assume.assumeTrue(ClientBackedDataStore.class.isAssignableFrom(testParameter));
1361
1362         leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
1363         followerDatastoreContextBuilder.maximumMessageSliceSize(100);
1364         initDatastoresWithCars("testLargeReadReplySlicing");
1365
1366         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1367
1368         final NormalizedNode<?, ?> carsNode = CarsModel.create();
1369         rwTx.write(CarsModel.BASE_PATH, carsNode);
1370
1371         verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
1372     }
1373
1374     @SuppressWarnings("IllegalCatch")
1375     @Test
1376     public void testRaftCallbackDuringLeadershipDrop() throws Exception {
1377         final String testName = "testRaftCallbackDuringLeadershipDrop";
1378         initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1379
1380         final ExecutorService executor = Executors.newSingleThreadExecutor();
1381
1382         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1383                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500)
1384                         .shardLeaderElectionTimeoutInSeconds(3600),
1385                 commitTimeout);
1386
1387         final DOMStoreWriteTransaction initialWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1388         initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1389         leaderTestKit.doCommit(initialWriteTx.ready());
1390
1391         try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1392                 testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false)) {
1393
1394             final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
1395                     .getLocalShards().get("cars").getActor();
1396             final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
1397                     .getLocalShards().get("cars").getActor();
1398             member2Cars.tell(new StartDropMessages(AppendEntries.class), null);
1399             member3Cars.tell(new StartDropMessages(AppendEntries.class), null);
1400
1401             final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1402             newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1403             final AtomicBoolean submitDone = new AtomicBoolean(false);
1404             executor.submit(() -> {
1405                 try {
1406                     leaderTestKit.doCommit(newTx.ready());
1407                     submitDone.set(true);
1408                 } catch (Exception e) {
1409                     throw new RuntimeException(e);
1410                 }
1411             });
1412             final ActorRef leaderCars = ((LocalShardStore) leaderDistributedDataStore).getLocalShards()
1413                     .getLocalShards().get("cars").getActor();
1414             await().atMost(10, TimeUnit.SECONDS)
1415                     .until(() -> ((OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1416                             .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE)).getLastIndex() >= 1);
1417
1418             final OnDemandRaftState raftState = (OnDemandRaftState)leaderDistributedDataStore.getActorUtils()
1419                     .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE);
1420
1421             // Simulate a follower not receiving heartbeats but still being able to send messages ie RequestVote with
1422             // new term(switching to candidate after election timeout)
1423             leaderCars.tell(new RequestVote(raftState.getCurrentTerm() + 1,
1424                     "member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
1425                             -1), member3Cars);
1426
1427             member2Cars.tell(new StopDropMessages(AppendEntries.class), null);
1428             member3Cars.tell(new StopDropMessages(AppendEntries.class), null);
1429
1430             await("Is tx stuck in COMMIT_PENDING")
1431                     .atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
1432
1433         }
1434
1435         executor.shutdownNow();
1436     }
1437
1438     private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
1439                                        final NormalizedNode<?, ?> expRoot) {
1440         assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
1441         assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
1442         assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
1443         assertEquals("Snapshot getLastIndex", expected.getLastIndex(), actual.getLastIndex());
1444         assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
1445         MetadataShardDataTreeSnapshot shardSnapshot =
1446                 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
1447         assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
1448     }
1449
1450     private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
1451         final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
1452         final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
1453         final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
1454         Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
1455         Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
1456         dataStore.onDatastoreContextUpdated(mockContextFactory);
1457     }
1458 }