2 * Copyright (c) 2015, 2017 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static org.awaitility.Awaitility.await;
11 import static org.hamcrest.Matchers.equalTo;
12 import static org.junit.Assert.assertEquals;
13 import static org.junit.Assert.assertFalse;
14 import static org.junit.Assert.assertNotNull;
15 import static org.junit.Assert.assertTrue;
16 import static org.junit.Assert.fail;
17 import static org.mockito.ArgumentMatchers.any;
18 import static org.mockito.ArgumentMatchers.eq;
19 import static org.mockito.Mockito.timeout;
20 import static org.mockito.Mockito.verify;
22 import akka.actor.ActorRef;
23 import akka.actor.ActorSelection;
24 import akka.actor.ActorSystem;
25 import akka.actor.Address;
26 import akka.actor.AddressFromURIString;
27 import akka.cluster.Cluster;
28 import akka.cluster.Member;
29 import akka.dispatch.Futures;
30 import akka.pattern.Patterns;
31 import akka.testkit.javadsl.TestKit;
32 import com.google.common.base.Stopwatch;
33 import com.google.common.base.Throwables;
34 import com.google.common.collect.ImmutableMap;
35 import com.google.common.collect.Range;
36 import com.google.common.primitives.UnsignedLong;
37 import com.google.common.util.concurrent.ListenableFuture;
38 import com.google.common.util.concurrent.MoreExecutors;
39 import com.google.common.util.concurrent.Uninterruptibles;
40 import com.typesafe.config.ConfigFactory;
41 import java.util.Arrays;
42 import java.util.Collection;
43 import java.util.Collections;
44 import java.util.Iterator;
45 import java.util.LinkedList;
46 import java.util.List;
47 import java.util.Optional;
49 import java.util.concurrent.ExecutionException;
50 import java.util.concurrent.ExecutorService;
51 import java.util.concurrent.Executors;
52 import java.util.concurrent.TimeUnit;
53 import java.util.concurrent.atomic.AtomicBoolean;
54 import java.util.concurrent.atomic.AtomicLong;
55 import java.util.function.Supplier;
56 import org.junit.After;
57 import org.junit.Assume;
58 import org.junit.Before;
59 import org.junit.Ignore;
60 import org.junit.Test;
61 import org.junit.runner.RunWith;
62 import org.junit.runners.Parameterized;
63 import org.junit.runners.Parameterized.Parameter;
64 import org.junit.runners.Parameterized.Parameters;
65 import org.mockito.Mockito;
66 import org.mockito.stubbing.Answer;
67 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
68 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
69 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
70 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
71 import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
72 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
73 import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
74 import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
75 import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
76 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
77 import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
78 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
79 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
80 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
81 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
82 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
83 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
84 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
85 import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
86 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
87 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
88 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
89 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
90 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
91 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
92 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
93 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
94 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
95 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
96 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
97 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
98 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
99 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
100 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
101 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
102 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
103 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
104 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
105 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
106 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
107 import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
108 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
109 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
110 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
111 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
112 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
113 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
114 import org.opendaylight.yangtools.yang.common.Uint64;
115 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
116 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
117 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
118 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
119 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
120 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
121 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
122 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
123 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
124 import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
125 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
126 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
127 import scala.concurrent.Await;
128 import scala.concurrent.Future;
129 import scala.concurrent.duration.FiniteDuration;
132 * End-to-end distributed data store tests that exercise remote shards and transactions.
134 * @author Thomas Pantelis
136 @RunWith(Parameterized.class)
137 public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
139 @Parameters(name = "{0}")
140 public static Collection<Object[]> data() {
141 return Arrays.asList(new Object[][] {
142 { TestDistributedDataStore.class, 7}, { TestClientBackedDataStore.class, 12 }
147 public Class<? extends AbstractDataStore> testParameter;
149 public int commitTimeout;
151 private static final String[] CARS_AND_PEOPLE = {"cars", "people"};
152 private static final String[] CARS = {"cars"};
154 private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse(
155 "akka://cluster-test@127.0.0.1:2558");
156 private static final Address MEMBER_2_ADDRESS = AddressFromURIString.parse(
157 "akka://cluster-test@127.0.0.1:2559");
159 private static final String MODULE_SHARDS_CARS_ONLY_1_2 = "module-shards-cars-member-1-and-2.conf";
160 private static final String MODULE_SHARDS_CARS_PEOPLE_1_2 = "module-shards-member1-and-2.conf";
161 private static final String MODULE_SHARDS_CARS_PEOPLE_1_2_3 = "module-shards-member1-and-2-and-3.conf";
162 private static final String MODULE_SHARDS_CARS_1_2_3 = "module-shards-cars-member-1-and-2-and-3.conf";
164 private ActorSystem leaderSystem;
165 private ActorSystem followerSystem;
166 private ActorSystem follower2System;
168 private final DatastoreContext.Builder leaderDatastoreContextBuilder =
169 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
171 private final DatastoreContext.Builder followerDatastoreContextBuilder =
172 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
173 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
174 private final TransactionIdentifier tx1 = nextTransactionId();
175 private final TransactionIdentifier tx2 = nextTransactionId();
177 private AbstractDataStore followerDistributedDataStore;
178 private AbstractDataStore leaderDistributedDataStore;
179 private IntegrationTestKit followerTestKit;
180 private IntegrationTestKit leaderTestKit;
183 public void setUp() {
184 InMemoryJournal.clear();
185 InMemorySnapshotStore.clear();
187 leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
188 Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
190 followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
191 Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
193 follower2System = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member3"));
194 Cluster.get(follower2System).join(MEMBER_1_ADDRESS);
198 public void tearDown() {
199 if (followerDistributedDataStore != null) {
200 leaderDistributedDataStore.close();
202 if (leaderDistributedDataStore != null) {
203 leaderDistributedDataStore.close();
206 TestKit.shutdownActorSystem(leaderSystem);
207 TestKit.shutdownActorSystem(followerSystem);
208 TestKit.shutdownActorSystem(follower2System);
210 InMemoryJournal.clear();
211 InMemorySnapshotStore.clear();
214 private void initDatastoresWithCars(final String type) throws Exception {
215 initDatastores(type, MODULE_SHARDS_CARS_ONLY_1_2, CARS);
218 private void initDatastoresWithCarsAndPeople(final String type) throws Exception {
219 initDatastores(type, MODULE_SHARDS_CARS_PEOPLE_1_2, CARS_AND_PEOPLE);
222 private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards)
224 leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder, commitTimeout);
226 leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
227 testParameter, type, moduleShardsConfig, false, shards);
229 followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder, commitTimeout);
230 followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
231 testParameter, type, moduleShardsConfig, false, shards);
233 leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
235 leaderTestKit.waitForMembersUp("member-2");
236 followerTestKit.waitForMembersUp("member-1");
239 private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
241 final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
242 assertTrue("isPresent", optional.isPresent());
244 final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
245 CarsModel.CAR_QNAME);
246 for (final NormalizedNode<?, ?> entry: entries) {
247 listBuilder.withChild((MapEntryNode) entry);
250 assertEquals("Car list node", listBuilder.build(), optional.get());
253 private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
254 final NormalizedNode<?, ?> expNode) throws Exception {
255 final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
256 assertTrue("isPresent", optional.isPresent());
257 assertEquals("Data node", expNode, optional.get());
260 private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
262 final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
263 assertEquals("exists", Boolean.TRUE, exists);
267 public void testWriteTransactionWithSingleShard() throws Exception {
268 final String testName = "testWriteTransactionWithSingleShard";
269 initDatastoresWithCars(testName);
271 final String followerCarShardName = "member-2-shard-cars-" + testName;
273 DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
274 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
276 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
277 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
279 final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
280 final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
281 writeTx.merge(car1Path, car1);
283 final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
284 final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
285 writeTx.merge(car2Path, car2);
287 followerTestKit.doCommit(writeTx.ready());
289 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
291 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
295 writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
297 writeTx.delete(car1Path);
299 followerTestKit.doCommit(writeTx.ready());
301 verifyExists(followerDistributedDataStore.newReadOnlyTransaction(), car2Path);
303 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
305 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car2);
307 // Re-instate the follower member 2 as a single-node to verify replication and recovery.
309 // The following is a bit tricky. Before we reinstate the follower we need to ensure it has persisted and
310 // applied and all the log entries from the leader. Since we've verified the car data above we know that
311 // all the transactions have been applied on the leader so we first read and capture its lastAppliedIndex.
312 final AtomicLong leaderLastAppliedIndex = new AtomicLong();
313 IntegrationTestKit.verifyShardState(leaderDistributedDataStore, CARS[0],
314 state -> leaderLastAppliedIndex.set(state.getLastApplied()));
316 // Now we need to make sure the follower has persisted the leader's lastAppliedIndex via ApplyJournalEntries.
317 // However we don't know exactly how many ApplyJournalEntries messages there will be as it can differ between
318 // the tell-based and ask-based front-ends. For ask-based there will be exactly 2 ApplyJournalEntries but
319 // tell-based persists additional payloads which could be replicated and applied in a batch resulting in
320 // either 2 or 3 ApplyJournalEntries. To handle this we read the follower's persisted ApplyJournalEntries
321 // until we find the one that encompasses the leader's lastAppliedIndex.
322 Stopwatch sw = Stopwatch.createStarted();
323 boolean done = false;
325 final List<ApplyJournalEntries> entries = InMemoryJournal.get(followerCarShardName,
326 ApplyJournalEntries.class);
327 for (ApplyJournalEntries aje: entries) {
328 if (aje.getToIndex() >= leaderLastAppliedIndex.get()) {
334 assertTrue("Follower did not persist ApplyJournalEntries containing leader's lastAppliedIndex "
335 + leaderLastAppliedIndex + ". Entries persisted: " + entries, sw.elapsed(TimeUnit.SECONDS) <= 5);
337 Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
340 TestKit.shutdownActorSystem(leaderSystem, true);
341 TestKit.shutdownActorSystem(followerSystem, true);
343 final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
345 try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
347 .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
348 verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
353 public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
354 final String testName = "testWriteTransactionWithSingleShard";
355 initDatastoresWithCars(testName);
357 final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
359 DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
360 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
361 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
362 followerTestKit.doCommit(writeTx.ready());
365 for (int i = 0; i < numCars; i++) {
366 writeTx = txChain.newWriteOnlyTransaction();
367 writeTx.write(CarsModel.newCarPath("car" + i),
368 CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
370 followerTestKit.doCommit(writeTx.ready());
372 DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
373 domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
375 domStoreReadTransaction.close();
378 // wait to let the shard catch up with purged
379 await("Range set leak test").atMost(5, TimeUnit.SECONDS)
380 .pollInterval(500, TimeUnit.MILLISECONDS)
381 .untilAsserted(() -> {
382 Optional<ActorRef> localShard =
383 leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
384 FrontendShardDataTreeSnapshotMetadata frontendMetadata =
385 (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
386 .executeOperation(localShard.get(), new RequestFrontendMetadata());
388 if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
389 Iterator<FrontendHistoryMetadata> iterator =
390 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
391 FrontendHistoryMetadata metadata = iterator.next();
392 while (iterator.hasNext() && metadata.getHistoryId() != 1) {
393 metadata = iterator.next();
396 assertEquals(0, metadata.getClosedTransactions().size());
397 assertEquals(Range.closedOpen(UnsignedLong.valueOf(0), UnsignedLong.valueOf(11)),
398 metadata.getPurgedTransactions().asRanges().iterator().next());
400 // ask based should track no metadata
401 assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
405 final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
406 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
407 assertTrue("isPresent", optional.isPresent());
408 assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
412 @Ignore("Flushes out tell based leak needs to be handled separately")
413 public void testCloseTransactionMetadataLeak() throws Exception {
414 // Ask based frontend seems to have some issues with back to back close
415 Assume.assumeTrue(testParameter.isAssignableFrom(TestClientBackedDataStore.class));
417 final String testName = "testWriteTransactionWithSingleShard";
418 initDatastoresWithCars(testName);
420 final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
422 DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
423 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
424 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
425 followerTestKit.doCommit(writeTx.ready());
428 for (int i = 0; i < numCars; i++) {
429 writeTx = txChain.newWriteOnlyTransaction();
432 DOMStoreReadTransaction domStoreReadTransaction = txChain.newReadOnlyTransaction();
433 domStoreReadTransaction.read(CarsModel.BASE_PATH).get();
435 domStoreReadTransaction.close();
438 writeTx = txChain.newWriteOnlyTransaction();
439 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
440 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
441 followerTestKit.doCommit(writeTx.ready());
443 // wait to let the shard catch up with purged
444 await("Close transaction purge leak test.").atMost(5, TimeUnit.SECONDS)
445 .pollInterval(500, TimeUnit.MILLISECONDS)
446 .untilAsserted(() -> {
447 Optional<ActorRef> localShard =
448 leaderDistributedDataStore.getActorUtils().findLocalShard("cars");
449 FrontendShardDataTreeSnapshotMetadata frontendMetadata =
450 (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
451 .executeOperation(localShard.get(), new RequestFrontendMetadata());
453 if (leaderDistributedDataStore.getActorUtils().getDatastoreContext().isUseTellBasedProtocol()) {
454 Iterator<FrontendHistoryMetadata> iterator =
455 frontendMetadata.getClients().get(0).getCurrentHistories().iterator();
456 FrontendHistoryMetadata metadata = iterator.next();
457 while (iterator.hasNext() && metadata.getHistoryId() != 1) {
458 metadata = iterator.next();
461 Set<Range<UnsignedLong>> ranges = metadata.getPurgedTransactions().asRanges();
463 assertEquals(0, metadata.getClosedTransactions().size());
464 assertEquals(1, ranges.size());
466 // ask based should track no metadata
467 assertTrue(frontendMetadata.getClients().get(0).getCurrentHistories().isEmpty());
471 final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
472 .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
473 assertTrue("isPresent", optional.isPresent());
474 assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
478 public void testReadWriteTransactionWithSingleShard() throws Exception {
479 initDatastoresWithCars("testReadWriteTransactionWithSingleShard");
481 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
482 assertNotNull("newReadWriteTransaction returned null", rwTx);
484 rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
485 rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
487 final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
488 rwTx.merge(CarsModel.newCarPath("optima"), car1);
490 verifyCars(rwTx, car1);
492 final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
493 final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
494 rwTx.merge(car2Path, car2);
496 verifyExists(rwTx, car2Path);
498 followerTestKit.doCommit(rwTx.ready());
500 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1, car2);
504 public void testWriteTransactionWithMultipleShards() throws Exception {
505 initDatastoresWithCarsAndPeople("testWriteTransactionWithMultipleShards");
507 final DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
508 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
510 final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
511 final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
512 writeTx.write(carsPath, carsNode);
514 final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
515 final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
516 writeTx.write(peoplePath, peopleNode);
518 followerTestKit.doCommit(writeTx.ready());
520 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
522 verifyNode(readTx, carsPath, carsNode);
523 verifyNode(readTx, peoplePath, peopleNode);
527 public void testReadWriteTransactionWithMultipleShards() throws Exception {
528 initDatastoresWithCarsAndPeople("testReadWriteTransactionWithMultipleShards");
530 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
531 assertNotNull("newReadWriteTransaction returned null", rwTx);
533 final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
534 final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
535 rwTx.write(carsPath, carsNode);
537 final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
538 final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
539 rwTx.write(peoplePath, peopleNode);
541 followerTestKit.doCommit(rwTx.ready());
543 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
545 verifyNode(readTx, carsPath, carsNode);
546 verifyNode(readTx, peoplePath, peopleNode);
550 public void testTransactionChainWithSingleShard() throws Exception {
551 initDatastoresWithCars("testTransactionChainWithSingleShard");
553 final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
555 // Add the top-level cars container with write-only.
557 final DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
558 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
560 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
562 final DOMStoreThreePhaseCommitCohort writeTxReady = writeTx.ready();
564 // Verify the top-level cars container with read-only.
566 verifyNode(txChain.newReadOnlyTransaction(), CarsModel.BASE_PATH, CarsModel.emptyContainer());
568 // Perform car operations with read-write.
570 final DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
572 verifyNode(rwTx, CarsModel.BASE_PATH, CarsModel.emptyContainer());
574 rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
576 final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
577 final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
578 rwTx.write(car1Path, car1);
580 verifyExists(rwTx, car1Path);
582 verifyCars(rwTx, car1);
584 final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
585 rwTx.merge(CarsModel.newCarPath("sportage"), car2);
587 rwTx.delete(car1Path);
589 followerTestKit.doCommit(writeTxReady);
591 followerTestKit.doCommit(rwTx.ready());
595 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car2);
599 public void testTransactionChainWithMultipleShards() throws Exception {
600 initDatastoresWithCarsAndPeople("testTransactionChainWithMultipleShards");
602 final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
604 DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
605 assertNotNull("newWriteOnlyTransaction returned null", writeTx);
607 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
608 writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
610 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
611 writeTx.write(PeopleModel.PERSON_LIST_PATH, PeopleModel.newPersonMapNode());
613 followerTestKit.doCommit(writeTx.ready());
615 final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
617 final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
618 final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
619 readWriteTx.write(carPath, car);
621 final MapEntryNode person = PeopleModel.newPersonEntry("jack");
622 final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
623 readWriteTx.merge(personPath, person);
625 Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
626 assertTrue("isPresent", optional.isPresent());
627 assertEquals("Data node", car, optional.get());
629 optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
630 assertTrue("isPresent", optional.isPresent());
631 assertEquals("Data node", person, optional.get());
633 final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
635 writeTx = txChain.newWriteOnlyTransaction();
637 writeTx.delete(personPath);
639 final DOMStoreThreePhaseCommitCohort cohort3 = writeTx.ready();
641 followerTestKit.doCommit(cohort2);
642 followerTestKit.doCommit(cohort3);
646 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
647 verifyCars(readTx, car);
649 optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
650 assertFalse("isPresent", optional.isPresent());
654 public void testChainedTransactionFailureWithSingleShard() throws Exception {
655 initDatastoresWithCars("testChainedTransactionFailureWithSingleShard");
657 final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
658 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
659 LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
660 MoreExecutors.directExecutor());
662 final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
663 final DOMTransactionChain txChain = broker.createTransactionChain(listener);
665 final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
667 final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
668 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
669 .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
671 writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
674 writeTx.commit().get(5, TimeUnit.SECONDS);
675 fail("Expected TransactionCommitFailedException");
676 } catch (final ExecutionException e) {
680 verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
687 public void testChainedTransactionFailureWithMultipleShards() throws Exception {
688 initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
690 final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
691 ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
692 LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
693 MoreExecutors.directExecutor());
695 final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
696 final DOMTransactionChain txChain = broker.createTransactionChain(listener);
698 final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
700 writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
702 final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
703 new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
704 .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
706 // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
707 // done for put for performance reasons.
708 writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
711 writeTx.commit().get(5, TimeUnit.SECONDS);
712 fail("Expected TransactionCommitFailedException");
713 } catch (final ExecutionException e) {
717 verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
724 public void testSingleShardTransactionsWithLeaderChanges() throws Exception {
725 followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
726 final String testName = "testSingleShardTransactionsWithLeaderChanges";
727 initDatastoresWithCars(testName);
729 final String followerCarShardName = "member-2-shard-cars-" + testName;
730 InMemoryJournal.addWriteMessagesCompleteLatch(followerCarShardName, 1, ApplyJournalEntries.class);
732 // Write top-level car container from the follower so it uses a remote Tx.
734 DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
736 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
737 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
739 followerTestKit.doCommit(writeTx.ready());
741 InMemoryJournal.waitForWriteMessagesComplete(followerCarShardName);
743 // Switch the leader to the follower
745 sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
746 .shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
748 TestKit.shutdownActorSystem(leaderSystem, true);
749 Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
751 followerTestKit.waitUntilNoLeader(followerDistributedDataStore.getActorUtils(), CARS);
753 leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
754 Cluster.get(leaderSystem).join(MEMBER_2_ADDRESS);
756 final DatastoreContext.Builder newMember1Builder = DatastoreContext.newBuilder()
757 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
758 IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
760 try (AbstractDataStore ds =
761 newMember1TestKit.setupAbstractDataStore(
762 testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
764 followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
766 // Write a car entry to the new leader - should switch to local Tx
768 writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
770 MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
771 YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
772 writeTx.merge(car1Path, car1);
774 followerTestKit.doCommit(writeTx.ready());
776 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car1);
780 @SuppressWarnings("unchecked")
782 public void testReadyLocalTransactionForwardedToLeader() throws Exception {
783 initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
784 followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
786 final Optional<ActorRef> carsFollowerShard =
787 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
788 assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
790 final DataTree dataTree = new InMemoryDataTreeFactory().create(
791 DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
793 // Send a tx with immediate commit.
795 DataTreeModification modification = dataTree.takeSnapshot().newModification();
796 new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
797 new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
799 final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
800 new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
801 modification.ready();
803 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
805 carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
806 Object resp = followerTestKit.expectMsgClass(Object.class);
807 if (resp instanceof akka.actor.Status.Failure) {
808 throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
811 assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
813 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
815 // Send another tx without immediate commit.
817 modification = dataTree.takeSnapshot().newModification();
818 MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
819 new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
820 modification.ready();
822 readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
824 carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
825 resp = followerTestKit.expectMsgClass(Object.class);
826 if (resp instanceof akka.actor.Status.Failure) {
827 throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
830 assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
832 final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
833 ((ReadyTransactionReply)resp).getCohortPath());
835 final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
836 Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
837 ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
838 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
839 new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
840 cohort.canCommit().get(5, TimeUnit.SECONDS);
841 cohort.preCommit().get(5, TimeUnit.SECONDS);
842 cohort.commit().get(5, TimeUnit.SECONDS);
844 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
847 @SuppressWarnings("unchecked")
849 public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
850 initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
851 followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
853 final Optional<ActorRef> carsFollowerShard =
854 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
855 assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
857 carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
858 final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
860 // Send a tx with immediate commit.
862 DataTreeModification modification = dataTree.takeSnapshot().newModification();
863 new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
864 new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
866 final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
867 new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
869 ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
870 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
871 Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
874 carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
875 Object resp = followerTestKit.expectMsgClass(Object.class);
876 if (resp instanceof akka.actor.Status.Failure) {
877 throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
880 assertEquals("Response type", CommitTransactionReply.class, resp.getClass());
882 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1);
884 // Send another tx without immediate commit.
886 modification = dataTree.takeSnapshot().newModification();
887 MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
888 new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
890 forwardedReady = new ForwardedReadyTransaction(tx2,
891 DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
892 Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
895 carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
896 resp = followerTestKit.expectMsgClass(Object.class);
897 if (resp instanceof akka.actor.Status.Failure) {
898 throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
901 assertEquals("Response type", ReadyTransactionReply.class, resp.getClass());
903 ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
904 ((ReadyTransactionReply)resp).getCohortPath());
906 final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
907 Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
908 final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
909 leaderDistributedDataStore.getActorUtils(), Arrays.asList(
910 new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
911 cohort.canCommit().get(5, TimeUnit.SECONDS);
912 cohort.preCommit().get(5, TimeUnit.SECONDS);
913 cohort.commit().get(5, TimeUnit.SECONDS);
915 verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
919 public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
920 // FIXME: remove when test passes also for ClientBackedDataStore
921 Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
922 followerDatastoreContextBuilder.shardBatchedModificationCount(2);
923 leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
924 initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
926 // Do an initial write to get the primary shard info cached.
928 final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
929 initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
930 initialWriteTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
931 followerTestKit.doCommit(initialWriteTx.ready());
933 // Wait for the commit to be replicated to the follower.
935 MemberNode.verifyRaftState(followerDistributedDataStore, "cars",
936 raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
938 MemberNode.verifyRaftState(followerDistributedDataStore, "people",
939 raftState -> assertEquals("getLastApplied", 1, raftState.getLastApplied()));
941 // Prepare, ready and canCommit a WO tx that writes to 2 shards. This will become the current tx in
944 final DOMStoreWriteTransaction writeTx1 = followerDistributedDataStore.newWriteOnlyTransaction();
945 writeTx1.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
946 writeTx1.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
947 final DOMStoreThreePhaseCommitCohort writeTx1Cohort = writeTx1.ready();
948 final ListenableFuture<Boolean> writeTx1CanCommit = writeTx1Cohort.canCommit();
949 writeTx1CanCommit.get(5, TimeUnit.SECONDS);
951 // Prepare and ready another WO tx that writes to 2 shards but don't canCommit yet. This will be queued
952 // in the leader shard.
954 final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction();
955 final LinkedList<MapEntryNode> cars = new LinkedList<>();
957 cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
958 writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
960 NormalizedNode<?, ?> people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
961 .withChild(PeopleModel.newPersonEntry("Dude")).build();
962 writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
963 final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
965 // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
966 // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
967 // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
970 final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
971 for (int i = 1; i <= 5; i++, carIndex++) {
972 cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
973 writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
976 // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
979 final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
980 cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
981 writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
984 // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
985 // leader shard on ready.
987 final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
988 cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
989 readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
991 IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
992 stats -> assertEquals("getReadWriteTransactionCount", 5, stats.getReadWriteTransactionCount()));
994 // Disable elections on the leader so it switches to follower.
996 sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
997 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
998 .shardElectionTimeoutFactor(10));
1000 leaderTestKit.waitUntilNoLeader(leaderDistributedDataStore.getActorUtils(), "cars");
1002 // Submit all tx's - the messages should get queued for retry.
1004 final ListenableFuture<Boolean> writeTx2CanCommit = writeTx2Cohort.canCommit();
1005 final DOMStoreThreePhaseCommitCohort writeTx3Cohort = writeTx3.ready();
1006 final DOMStoreThreePhaseCommitCohort writeTx4Cohort = writeTx4.ready();
1007 final DOMStoreThreePhaseCommitCohort rwTxCohort = readWriteTx.ready();
1009 // Enable elections on the other follower so it becomes the leader, at which point the
1010 // tx's should get forwarded from the previous leader to the new leader to complete the commits.
1012 sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1013 .customRaftPolicyImplementation(null).shardElectionTimeoutFactor(1));
1014 IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "cars")
1015 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1016 IntegrationTestKit.findLocalShard(followerDistributedDataStore.getActorUtils(), "people")
1017 .tell(TimeoutNow.INSTANCE, ActorRef.noSender());
1019 followerTestKit.doCommit(writeTx1CanCommit, writeTx1Cohort);
1020 followerTestKit.doCommit(writeTx2CanCommit, writeTx2Cohort);
1021 followerTestKit.doCommit(writeTx3Cohort);
1022 followerTestKit.doCommit(writeTx4Cohort);
1023 followerTestKit.doCommit(rwTxCohort);
1025 DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
1026 verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
1027 verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
1031 public void testLeadershipTransferOnShutdown() throws Exception {
1032 // FIXME: remove when test passes also for ClientBackedDataStore
1033 Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1034 leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
1035 followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
1036 final String testName = "testLeadershipTransferOnShutdown";
1037 initDatastores(testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, CARS_AND_PEOPLE);
1039 final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1040 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
1042 try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1043 testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
1045 followerTestKit.waitForMembersUp("member-3");
1046 follower2TestKit.waitForMembersUp("member-1", "member-2");
1048 // Create and submit a couple tx's so they're pending.
1050 DOMStoreWriteTransaction writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1051 writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1052 writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1053 writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
1054 final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
1056 IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1057 stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
1059 writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
1060 final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
1061 writeTx.write(CarsModel.newCarPath("optima"), car);
1062 final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
1064 IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
1065 stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
1067 // Gracefully stop the leader via a Shutdown message.
1069 sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1070 .shardElectionTimeoutFactor(100));
1072 final FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
1073 final Future<ActorRef> future = leaderDistributedDataStore.getActorUtils().findLocalShardAsync("cars");
1074 final ActorRef leaderActor = Await.result(future, duration);
1076 final Future<Boolean> stopFuture = Patterns.gracefulStop(leaderActor, duration, Shutdown.INSTANCE);
1078 // Commit the 2 transactions. They should finish and succeed.
1080 followerTestKit.doCommit(cohort1);
1081 followerTestKit.doCommit(cohort2);
1083 // Wait for the leader actor stopped.
1085 final Boolean stopped = Await.result(stopFuture, duration);
1086 assertEquals("Stopped", Boolean.TRUE, stopped);
1088 // Verify leadership was transferred by reading the committed data from the other nodes.
1090 verifyCars(followerDistributedDataStore.newReadOnlyTransaction(), car);
1091 verifyCars(follower2DistributedDataStore.newReadOnlyTransaction(), car);
1096 public void testTransactionWithIsolatedLeader() throws Exception {
1097 // FIXME: remove when test passes also for ClientBackedDataStore
1098 Assume.assumeTrue(DistributedDataStore.class.isAssignableFrom(testParameter));
1099 // Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
1100 leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
1101 final String testName = "testTransactionWithIsolatedLeader";
1102 initDatastoresWithCars(testName);
1104 // Tx that is submitted after the follower is stopped but before the leader transitions to IsolatedLeader.
1105 final DOMStoreWriteTransaction preIsolatedLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1106 preIsolatedLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1108 // Tx that is submitted after the leader transitions to IsolatedLeader.
1109 final DOMStoreWriteTransaction noShardLeaderWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1110 noShardLeaderWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1112 // Tx that is submitted after the follower is reinstated.
1113 final DOMStoreWriteTransaction successWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1114 successWriteTx.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1116 // Stop the follower
1117 followerTestKit.watch(followerDistributedDataStore.getActorUtils().getShardManager());
1118 followerDistributedDataStore.close();
1119 followerTestKit.expectTerminated(followerDistributedDataStore.getActorUtils().getShardManager());
1121 // Submit the preIsolatedLeaderWriteTx so it's pending
1122 final DOMStoreThreePhaseCommitCohort preIsolatedLeaderTxCohort = preIsolatedLeaderWriteTx.ready();
1124 // Change the isolated leader check interval low so it changes to IsolatedLeader.
1125 sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1126 .shardIsolatedLeaderCheckIntervalInMillis(200));
1128 MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
1129 raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
1132 leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
1133 fail("Expected NoShardLeaderException");
1134 } catch (final ExecutionException e) {
1135 assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
1138 sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
1139 .shardElectionTimeoutFactor(100));
1141 final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
1143 followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
1144 testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
1146 leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
1147 leaderTestKit.doCommit(successTxCohort);
1151 public void testTransactionWithShardLeaderNotResponding() throws Exception {
1152 followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1153 followerDatastoreContextBuilder.shardElectionTimeoutFactor(50);
1154 initDatastoresWithCars("testTransactionWithShardLeaderNotResponding");
1156 // Do an initial read to get the primary shard info cached.
1158 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1159 readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1161 // Shutdown the leader and try to create a new tx.
1163 TestKit.shutdownActorSystem(leaderSystem, true);
1165 followerDatastoreContextBuilder.operationTimeoutInMillis(50).shardElectionTimeoutFactor(1);
1166 sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder);
1168 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1170 rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1173 followerTestKit.doCommit(rwTx.ready());
1174 fail("Exception expected");
1175 } catch (final ExecutionException e) {
1176 final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1177 if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1178 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
1179 || e.getCause() instanceof ShardLeaderNotRespondingException);
1181 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1187 public void testTransactionWithCreateTxFailureDueToNoLeader() throws Exception {
1188 followerDatastoreContextBuilder.frontendRequestTimeoutInSeconds(2);
1189 initDatastoresWithCars("testTransactionWithCreateTxFailureDueToNoLeader");
1191 // Do an initial read to get the primary shard info cached.
1193 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1194 readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1196 // Shutdown the leader and try to create a new tx.
1198 TestKit.shutdownActorSystem(leaderSystem, true);
1200 Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1202 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
1204 sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1205 .operationTimeoutInMillis(10).shardElectionTimeoutFactor(1).customRaftPolicyImplementation(null));
1207 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1209 rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1212 followerTestKit.doCommit(rwTx.ready());
1213 fail("Exception expected");
1214 } catch (final ExecutionException e) {
1215 final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
1216 if (DistributedDataStore.class.isAssignableFrom(testParameter)) {
1217 assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
1219 assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
1225 public void testTransactionRetryWithInitialAskTimeoutExOnCreateTx() throws Exception {
1226 followerDatastoreContextBuilder.backendAlivenessTimerIntervalInSeconds(2);
1227 String testName = "testTransactionRetryWithInitialAskTimeoutExOnCreateTx";
1228 initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1230 final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1231 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1232 final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1233 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1235 try (AbstractDataStore ds =
1236 follower2TestKit.setupAbstractDataStore(
1237 testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
1239 followerTestKit.waitForMembersUp("member-1", "member-3");
1240 follower2TestKit.waitForMembersUp("member-1", "member-2");
1242 // Do an initial read to get the primary shard info cached.
1244 final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
1245 readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1247 // Shutdown the leader and try to create a new tx.
1249 TestKit.shutdownActorSystem(leaderSystem, true);
1251 Cluster.get(followerSystem).leave(MEMBER_1_ADDRESS);
1253 sendDatastoreContextUpdate(followerDistributedDataStore, followerDatastoreContextBuilder
1254 .operationTimeoutInMillis(500).shardElectionTimeoutFactor(5).customRaftPolicyImplementation(null));
1256 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1258 rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1260 followerTestKit.doCommit(rwTx.ready());
1265 public void testSemiReachableCandidateNotDroppingLeader() throws Exception {
1266 final String testName = "testSemiReachableCandidateNotDroppingLeader";
1267 initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1269 final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
1270 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
1271 final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
1272 follower2System, follower2DatastoreContextBuilder, commitTimeout);
1274 final AbstractDataStore ds2 =
1275 follower2TestKit.setupAbstractDataStore(
1276 testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
1278 followerTestKit.waitForMembersUp("member-1", "member-3");
1279 follower2TestKit.waitForMembersUp("member-1", "member-2");
1281 TestKit.shutdownActorSystem(follower2System);
1283 ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1284 OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1285 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1287 Cluster leaderCluster = Cluster.get(leaderSystem);
1288 Cluster followerCluster = Cluster.get(followerSystem);
1289 Cluster follower2Cluster = Cluster.get(follower2System);
1291 Member follower2Member = follower2Cluster.readView().self();
1293 await().atMost(10, TimeUnit.SECONDS)
1294 .until(() -> leaderCluster.readView().unreachableMembers().contains(follower2Member));
1295 await().atMost(10, TimeUnit.SECONDS)
1296 .until(() -> followerCluster.readView().unreachableMembers().contains(follower2Member));
1298 ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").get();
1300 // to simulate a follower not being able to receive messages, but still being able to send messages and becoming
1301 // candidate, we can just send a couple of RequestVotes to both leader and follower.
1302 cars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1303 followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
1304 cars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1305 followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
1307 OnDemandRaftState stateAfter = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1308 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1309 OnDemandRaftState followerState = (OnDemandRaftState) followerDistributedDataStore.getActorUtils()
1310 .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
1312 assertEquals(initialState.getCurrentTerm(), stateAfter.getCurrentTerm());
1313 assertEquals(initialState.getCurrentTerm(), followerState.getCurrentTerm());
1319 public void testInstallSnapshot() throws Exception {
1320 final String testName = "testInstallSnapshot";
1321 final String leaderCarShardName = "member-1-shard-cars-" + testName;
1322 final String followerCarShardName = "member-2-shard-cars-" + testName;
1324 // Setup a saved snapshot on the leader. The follower will startup with no data and the leader should
1325 // install a snapshot to sync the follower.
1327 DataTree tree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_CONFIGURATION,
1328 SchemaContextHelper.full());
1330 final ContainerNode carsNode = CarsModel.newCarsNode(
1331 CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
1332 AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
1334 final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.empty());
1335 final Snapshot initialSnapshot = Snapshot.create(
1336 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
1337 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
1338 InMemorySnapshotStore.addSnapshot(leaderCarShardName, initialSnapshot);
1340 InMemorySnapshotStore.addSnapshotSavedLatch(leaderCarShardName);
1341 InMemorySnapshotStore.addSnapshotSavedLatch(followerCarShardName);
1343 initDatastoresWithCars(testName);
1345 final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
1346 CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
1347 assertTrue("isPresent", readOptional.isPresent());
1348 assertEquals("Node", carsNode, readOptional.get());
1350 verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
1351 initialSnapshot, snapshotRoot);
1353 verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(followerCarShardName, Snapshot.class),
1354 initialSnapshot, snapshotRoot);
1358 public void testReadWriteMessageSlicing() throws Exception {
1359 // The slicing is only implemented for tell-based protocol
1360 Assume.assumeTrue(ClientBackedDataStore.class.isAssignableFrom(testParameter));
1362 leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
1363 followerDatastoreContextBuilder.maximumMessageSliceSize(100);
1364 initDatastoresWithCars("testLargeReadReplySlicing");
1366 final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
1368 final NormalizedNode<?, ?> carsNode = CarsModel.create();
1369 rwTx.write(CarsModel.BASE_PATH, carsNode);
1371 verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
1374 @SuppressWarnings("IllegalCatch")
1376 public void testRaftCallbackDuringLeadershipDrop() throws Exception {
1377 final String testName = "testRaftCallbackDuringLeadershipDrop";
1378 initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
1380 final ExecutorService executor = Executors.newSingleThreadExecutor();
1382 final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
1383 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500)
1384 .shardLeaderElectionTimeoutInSeconds(3600),
1387 final DOMStoreWriteTransaction initialWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1388 initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
1389 leaderTestKit.doCommit(initialWriteTx.ready());
1391 try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
1392 testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false)) {
1394 final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
1395 .getLocalShards().get("cars").getActor();
1396 final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
1397 .getLocalShards().get("cars").getActor();
1398 member2Cars.tell(new StartDropMessages(AppendEntries.class), null);
1399 member3Cars.tell(new StartDropMessages(AppendEntries.class), null);
1401 final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
1402 newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
1403 final AtomicBoolean submitDone = new AtomicBoolean(false);
1404 executor.submit(() -> {
1406 leaderTestKit.doCommit(newTx.ready());
1407 submitDone.set(true);
1408 } catch (Exception e) {
1409 throw new RuntimeException(e);
1412 final ActorRef leaderCars = ((LocalShardStore) leaderDistributedDataStore).getLocalShards()
1413 .getLocalShards().get("cars").getActor();
1414 await().atMost(10, TimeUnit.SECONDS)
1415 .until(() -> ((OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
1416 .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE)).getLastIndex() >= 1);
1418 final OnDemandRaftState raftState = (OnDemandRaftState)leaderDistributedDataStore.getActorUtils()
1419 .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE);
1421 // Simulate a follower not receiving heartbeats but still being able to send messages ie RequestVote with
1422 // new term(switching to candidate after election timeout)
1423 leaderCars.tell(new RequestVote(raftState.getCurrentTerm() + 1,
1424 "member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
1427 member2Cars.tell(new StopDropMessages(AppendEntries.class), null);
1428 member3Cars.tell(new StopDropMessages(AppendEntries.class), null);
1430 await("Is tx stuck in COMMIT_PENDING")
1431 .atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
1435 executor.shutdownNow();
1438 private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
1439 final NormalizedNode<?, ?> expRoot) {
1440 assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
1441 assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
1442 assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
1443 assertEquals("Snapshot getLastIndex", expected.getLastIndex(), actual.getLastIndex());
1444 assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
1445 MetadataShardDataTreeSnapshot shardSnapshot =
1446 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
1447 assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
1450 private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
1451 final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
1452 final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
1453 final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
1454 Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
1455 Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
1456 dataStore.onDatastoreContextUpdated(mockContextFactory);