2 * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore.admin;
10 import static org.hamcrest.CoreMatchers.anyOf;
11 import static org.hamcrest.CoreMatchers.containsString;
12 import static org.junit.Assert.assertEquals;
13 import static org.junit.Assert.assertNotNull;
14 import static org.junit.Assert.assertNull;
15 import static org.junit.Assert.assertThat;
16 import static org.junit.Assert.assertTrue;
17 import static org.junit.Assert.fail;
18 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyNoShardPresent;
19 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftPeersPresent;
20 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftState;
22 import akka.actor.ActorRef;
23 import akka.actor.PoisonPill;
24 import akka.actor.Status.Success;
25 import akka.cluster.Cluster;
26 import com.google.common.base.Optional;
27 import com.google.common.collect.ImmutableList;
28 import com.google.common.collect.ImmutableMap;
29 import com.google.common.collect.Iterables;
30 import com.google.common.collect.Lists;
31 import com.google.common.collect.Sets;
33 import java.io.FileInputStream;
35 import java.util.AbstractMap.SimpleEntry;
36 import java.util.ArrayList;
37 import java.util.Arrays;
38 import java.util.HashMap;
39 import java.util.HashSet;
40 import java.util.List;
42 import java.util.Map.Entry;
44 import java.util.concurrent.TimeUnit;
45 import org.apache.commons.lang3.SerializationUtils;
46 import org.junit.After;
47 import org.junit.Before;
48 import org.junit.Test;
49 import org.opendaylight.controller.cluster.access.concepts.MemberName;
50 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
51 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
52 import org.opendaylight.controller.cluster.datastore.MemberNode;
53 import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
54 import org.opendaylight.controller.cluster.datastore.Shard;
55 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
56 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
57 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
58 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
59 import org.opendaylight.controller.cluster.raft.RaftState;
60 import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
61 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
62 import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
63 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
64 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
65 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
66 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
67 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
68 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
69 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
70 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
71 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
72 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
73 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
74 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
75 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
76 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
77 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
78 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
79 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
80 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
81 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
82 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
83 import org.opendaylight.yangtools.yang.common.RpcError;
84 import org.opendaylight.yangtools.yang.common.RpcResult;
85 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
88 * Unit tests for ClusterAdminRpcService.
90 * @author Thomas Pantelis
92 public class ClusterAdminRpcServiceTest {
93 private static final MemberName MEMBER_1 = MemberName.forName("member-1");
94 private static final MemberName MEMBER_2 = MemberName.forName("member-2");
95 private static final MemberName MEMBER_3 = MemberName.forName("member-3");
96 private final List<MemberNode> memberNodes = new ArrayList<>();
100 InMemoryJournal.clear();
101 InMemorySnapshotStore.clear();
105 public void tearDown() {
106 for (MemberNode m : Lists.reverse(memberNodes)) {
113 public void testBackupDatastore() throws Exception {
114 MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1")
115 .moduleShardsConfig("module-shards-member1.conf").waitForShardLeader("cars", "people")
116 .testName("testBackupDatastore").build();
118 String fileName = "target/testBackupDatastore";
119 new File(fileName).delete();
121 ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore());
123 RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
124 .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
125 verifySuccessfulRpcResult(rpcResult);
127 try (FileInputStream fis = new FileInputStream(fileName)) {
128 List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
129 assertEquals("DatastoreSnapshot size", 2, snapshots.size());
131 ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
132 snapshots.get(1).getType(), snapshots.get(1));
133 verifyDatastoreSnapshot(node.configDataStore().getActorContext().getDataStoreName(),
134 map.get(node.configDataStore().getActorContext().getDataStoreName()), "cars", "people");
136 new File(fileName).delete();
139 // Test failure by killing a shard.
141 node.configDataStore().getActorContext().getShardManager().tell(node.datastoreContextBuilder()
142 .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
144 ActorRef carsShardActor = node.configDataStore().getActorContext().findLocalShard("cars").get();
145 node.kit().watch(carsShardActor);
146 carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
147 node.kit().expectTerminated(carsShardActor);
149 rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
150 .get(5, TimeUnit.SECONDS);
151 assertEquals("isSuccessful", false, rpcResult.isSuccessful());
152 assertEquals("getErrors", 1, rpcResult.getErrors().size());
155 private static void verifyDatastoreSnapshot(String type, DatastoreSnapshot datastoreSnapshot,
156 String... expShardNames) {
157 assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
158 Set<String> shardNames = new HashSet<>();
159 for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
160 shardNames.add(s.getName());
163 assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
167 public void testAddShardReplica() throws Exception {
168 String name = "testAddShardReplica";
169 String moduleShardsConfig = "module-shards-cars-member-1.conf";
170 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
171 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
173 MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
174 .moduleShardsConfig(moduleShardsConfig).build();
176 leaderNode1.waitForMembersUp("member-2");
178 doAddShardReplica(newReplicaNode2, "cars", "member-1");
180 MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
181 .moduleShardsConfig(moduleShardsConfig).build();
183 leaderNode1.waitForMembersUp("member-3");
184 newReplicaNode2.waitForMembersUp("member-3");
186 doAddShardReplica(newReplicaNode3, "cars", "member-1", "member-2");
188 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "cars", "member-1", "member-3");
189 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
191 // Write data to member-2's config datastore and read/verify via member-3
192 final NormalizedNode<?, ?> configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
193 newReplicaNode3.configDataStore());
195 // Write data to member-3's oper datastore and read/verify via member-2
196 writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
198 // Verify all data has been replicated. We expect 3 log entries and thus last applied index of 2 -
199 // 2 ServerConfigurationPayload entries and the transaction payload entry.
201 RaftStateVerifier verifier = raftState -> {
202 assertEquals("Commit index", 2, raftState.getCommitIndex());
203 assertEquals("Last applied index", 2, raftState.getLastApplied());
206 verifyRaftState(leaderNode1.configDataStore(), "cars", verifier);
207 verifyRaftState(leaderNode1.operDataStore(), "cars", verifier);
209 verifyRaftState(newReplicaNode2.configDataStore(), "cars", verifier);
210 verifyRaftState(newReplicaNode2.operDataStore(), "cars", verifier);
212 verifyRaftState(newReplicaNode3.configDataStore(), "cars", verifier);
213 verifyRaftState(newReplicaNode3.operDataStore(), "cars", verifier);
215 // Restart member-3 and verify the cars config shard is re-instated.
217 Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(newReplicaNode3.kit().getSystem()).selfAddress());
218 newReplicaNode3.cleanup();
220 newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
221 .moduleShardsConfig(moduleShardsConfig).createOperDatastore(false).build();
223 verifyRaftState(newReplicaNode3.configDataStore(), "cars", verifier);
224 readCarsNodeAndVerify(newReplicaNode3.configDataStore(), configCarsNode);
228 public void testAddShardReplicaFailures() throws Exception {
229 String name = "testAddShardReplicaFailures";
230 MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
231 .moduleShardsConfig("module-shards-cars-member-1.conf").build();
233 ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
234 memberNode.operDataStore());
236 RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
237 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
238 verifyFailedRpcResult(rpcResult);
240 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars")
241 .build()).get(10, TimeUnit.SECONDS);
242 verifyFailedRpcResult(rpcResult);
244 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people")
245 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
246 verifyFailedRpcResult(rpcResult);
249 private static NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
250 DistributedDataStore readFromStore) throws Exception {
251 DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
252 NormalizedNode<?, ?> carsNode = CarsModel.create();
253 writeTx.write(CarsModel.BASE_PATH, carsNode);
255 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
256 Boolean canCommit = cohort .canCommit().get(7, TimeUnit.SECONDS);
257 assertEquals("canCommit", true, canCommit);
258 cohort.preCommit().get(5, TimeUnit.SECONDS);
259 cohort.commit().get(5, TimeUnit.SECONDS);
261 readCarsNodeAndVerify(readFromStore, carsNode);
265 private static void readCarsNodeAndVerify(DistributedDataStore readFromStore,
266 NormalizedNode<?, ?> expCarsNode) throws Exception {
267 Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction()
268 .read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
269 assertEquals("isPresent", true, optional.isPresent());
270 assertEquals("Data node", expCarsNode, optional.get());
273 private static void doAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
275 memberNode.waitForMembersUp(peerMemberNames);
277 ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
278 memberNode.operDataStore());
280 RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
281 .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
282 verifySuccessfulRpcResult(rpcResult);
284 verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
286 Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
287 assertEquals("Oper shard present", false, optional.isPresent());
289 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
290 .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
291 verifySuccessfulRpcResult(rpcResult);
293 verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
296 private static <T> T verifySuccessfulRpcResult(RpcResult<T> rpcResult) {
297 if (!rpcResult.isSuccessful()) {
298 if (rpcResult.getErrors().size() > 0) {
299 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
300 throw new AssertionError("Rpc failed with error: " + error, error.getCause());
303 fail("Rpc failed with no error");
306 return rpcResult.getResult();
309 private static void verifyFailedRpcResult(RpcResult<Void> rpcResult) {
310 assertEquals("RpcResult", false, rpcResult.isSuccessful());
311 assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
312 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
313 assertNotNull("RpcResult error message null", error.getMessage());
317 public void testRemoveShardReplica() throws Exception {
318 String name = "testRemoveShardReplica";
319 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
320 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
321 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
322 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
325 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
326 .moduleShardsConfig(moduleShardsConfig).build();
328 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
329 .moduleShardsConfig(moduleShardsConfig).build();
331 leaderNode1.configDataStore().waitTillReady();
332 replicaNode3.configDataStore().waitTillReady();
333 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
334 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
335 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
337 // Invoke RPC service on member-3 to remove it's local shard
339 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
340 replicaNode3.operDataStore());
342 RpcResult<Void> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
343 .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
344 .get(10, TimeUnit.SECONDS);
345 verifySuccessfulRpcResult(rpcResult);
347 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
348 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1");
349 verifyNoShardPresent(replicaNode3.configDataStore(), "cars");
351 // Restart member-2 and verify member-3 isn't present.
353 Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress());
354 replicaNode2.cleanup();
356 MemberNode newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
357 .moduleShardsConfig(moduleShardsConfig).build();
359 newPeplicaNode2.configDataStore().waitTillReady();
360 verifyRaftPeersPresent(newPeplicaNode2.configDataStore(), "cars", "member-1");
362 // Invoke RPC service on member-1 to remove member-2
364 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
365 leaderNode1.operDataStore());
367 rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
368 .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
369 verifySuccessfulRpcResult(rpcResult);
371 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars");
372 verifyNoShardPresent(newPeplicaNode2.configDataStore(), "cars");
376 public void testRemoveShardLeaderReplica() throws Exception {
377 String name = "testRemoveShardLeaderReplica";
378 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
379 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
380 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
381 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
384 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
385 .moduleShardsConfig(moduleShardsConfig).build();
387 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
388 .moduleShardsConfig(moduleShardsConfig).build();
390 leaderNode1.configDataStore().waitTillReady();
391 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
392 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
393 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
395 replicaNode2.waitForMembersUp("member-1", "member-3");
396 replicaNode3.waitForMembersUp("member-1", "member-2");
398 // Invoke RPC service on leader member-1 to remove it's local shard
400 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
401 leaderNode1.operDataStore());
403 RpcResult<Void> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
404 .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
405 .get(10, TimeUnit.SECONDS);
406 verifySuccessfulRpcResult(rpcResult);
408 verifyRaftState(replicaNode2.configDataStore(), "cars", raftState ->
409 assertThat("Leader Id", raftState.getLeader(), anyOf(containsString("member-2"),
410 containsString("member-3"))));
412 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-3");
413 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-2");
414 verifyNoShardPresent(leaderNode1.configDataStore(), "cars");
418 public void testAddReplicasForAllShards() throws Exception {
419 String name = "testAddReplicasForAllShards";
420 String moduleShardsConfig = "module-shards-member1.conf";
421 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name )
422 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
424 ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
425 "pets", null, Arrays.asList(MEMBER_1));
426 leaderNode1.configDataStore().getActorContext().getShardManager().tell(
427 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
428 leaderNode1.kit().expectMsgClass(Success.class);
429 leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorContext(), "pets");
431 MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
432 .moduleShardsConfig(moduleShardsConfig).build();
434 leaderNode1.waitForMembersUp("member-2");
435 newReplicaNode2.waitForMembersUp("member-1");
437 newReplicaNode2.configDataStore().getActorContext().getShardManager().tell(
438 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
439 newReplicaNode2.kit().expectMsgClass(Success.class);
441 newReplicaNode2.operDataStore().getActorContext().getShardManager().tell(
442 new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
443 "no-leader", null, Arrays.asList(MEMBER_1)), Shard.builder(), null),
444 newReplicaNode2.kit().getRef());
445 newReplicaNode2.kit().expectMsgClass(Success.class);
447 ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
448 newReplicaNode2.operDataStore());
450 RpcResult<AddReplicasForAllShardsOutput> rpcResult =
451 service.addReplicasForAllShards().get(10, TimeUnit.SECONDS);
452 AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
453 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
454 successShardResult("people", DataStoreType.Config),
455 successShardResult("pets", DataStoreType.Config),
456 successShardResult("cars", DataStoreType.Operational),
457 successShardResult("people", DataStoreType.Operational),
458 failedShardResult("no-leader", DataStoreType.Operational));
460 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "cars", "member-1");
461 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "people", "member-1");
462 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "pets", "member-1");
463 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1");
464 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "people", "member-1");
468 public void testRemoveAllShardReplicas() throws Exception {
469 String name = "testRemoveAllShardReplicas";
470 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
471 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
472 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
473 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
476 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
477 .moduleShardsConfig(moduleShardsConfig).build();
479 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
480 .moduleShardsConfig(moduleShardsConfig).build();
482 leaderNode1.configDataStore().waitTillReady();
483 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
484 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
485 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
487 ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
488 "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
489 leaderNode1.configDataStore().getActorContext().getShardManager().tell(
490 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
491 leaderNode1.kit().expectMsgClass(Success.class);
493 replicaNode2.configDataStore().getActorContext().getShardManager().tell(
494 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode2.kit().getRef());
495 replicaNode2.kit().expectMsgClass(Success.class);
497 replicaNode3.configDataStore().getActorContext().getShardManager().tell(
498 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode3.kit().getRef());
499 replicaNode3.kit().expectMsgClass(Success.class);
501 verifyRaftPeersPresent(leaderNode1.configDataStore(), "pets", "member-2", "member-3");
502 verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3");
503 verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
505 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
506 replicaNode3.operDataStore());
508 RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
509 new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
510 RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult);
511 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
512 successShardResult("people", DataStoreType.Config),
513 successShardResult("pets", DataStoreType.Config),
514 successShardResult("cars", DataStoreType.Operational),
515 successShardResult("people", DataStoreType.Operational));
517 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
518 verifyRaftPeersPresent(leaderNode1.configDataStore(), "people", "member-2");
519 verifyRaftPeersPresent(leaderNode1.configDataStore(), "pets", "member-2");
520 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1");
521 verifyRaftPeersPresent(replicaNode2.configDataStore(), "people", "member-1");
522 verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1");
523 verifyNoShardPresent(replicaNode3.configDataStore(), "cars");
524 verifyNoShardPresent(replicaNode3.configDataStore(), "people");
525 verifyNoShardPresent(replicaNode3.configDataStore(), "pets");
529 public void testChangeMemberVotingStatesForShard() throws Exception {
530 String name = "testChangeMemberVotingStatusForShard";
531 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
532 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
533 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
534 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
537 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
538 .moduleShardsConfig(moduleShardsConfig).build();
540 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
541 .moduleShardsConfig(moduleShardsConfig).build();
543 leaderNode1.configDataStore().waitTillReady();
544 replicaNode3.configDataStore().waitTillReady();
545 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
546 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
547 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
549 // Invoke RPC service on member-3 to change voting status
551 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
552 replicaNode3.operDataStore());
554 RpcResult<Void> rpcResult = service3
555 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
556 .setShardName("cars").setDataStoreType(DataStoreType.Config)
557 .setMemberVotingState(ImmutableList.of(
558 new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
559 new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build()))
561 .get(10, TimeUnit.SECONDS);
562 verifySuccessfulRpcResult(rpcResult);
564 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
565 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
566 verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
567 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
568 verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
569 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
573 public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception {
574 String name = "testChangeMemberVotingStatesForSingleNodeShard";
575 String moduleShardsConfig = "module-shards-member1.conf";
576 MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
577 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
578 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
581 leaderNode.configDataStore().waitTillReady();
583 // Invoke RPC service on member-3 to change voting status
585 ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
586 leaderNode.operDataStore());
588 RpcResult<Void> rpcResult = service
589 .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
590 .setShardName("cars").setDataStoreType(DataStoreType.Config)
591 .setMemberVotingState(ImmutableList
592 .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(false).build()))
594 .get(10, TimeUnit.SECONDS);
595 verifyFailedRpcResult(rpcResult);
597 verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", true));
601 public void testChangeMemberVotingStatesForAllShards() throws Exception {
602 String name = "testChangeMemberVotingStatesForAllShards";
603 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
604 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
605 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
606 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
609 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
610 .moduleShardsConfig(moduleShardsConfig).build();
612 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
613 .moduleShardsConfig(moduleShardsConfig).build();
615 leaderNode1.configDataStore().waitTillReady();
616 leaderNode1.operDataStore().waitTillReady();
617 replicaNode3.configDataStore().waitTillReady();
618 replicaNode3.operDataStore().waitTillReady();
619 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
620 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
621 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
623 // Invoke RPC service on member-3 to change voting status
625 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
626 replicaNode3.operDataStore());
628 RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
629 new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
630 new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
631 new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build())).build())
632 .get(10, TimeUnit.SECONDS);
633 ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
634 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
635 successShardResult("people", DataStoreType.Config),
636 successShardResult("cars", DataStoreType.Operational),
637 successShardResult("people", DataStoreType.Operational));
639 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
640 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
641 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
642 new String[]{"cars", "people"}, new SimpleEntry<>("member-1", true),
643 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false));
647 public void testFlipMemberVotingStates() throws Exception {
648 String name = "testFlipMemberVotingStates";
650 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
651 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
652 new ServerInfo("member-3", false)));
654 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
655 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
656 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
658 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
659 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
660 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
661 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
664 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
665 .moduleShardsConfig(moduleShardsConfig).build();
667 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
668 .moduleShardsConfig(moduleShardsConfig).build();
670 leaderNode1.configDataStore().waitTillReady();
671 leaderNode1.operDataStore().waitTillReady();
672 replicaNode3.configDataStore().waitTillReady();
673 replicaNode3.operDataStore().waitTillReady();
674 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
675 new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", false));
677 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
678 replicaNode3.operDataStore());
680 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards()
681 .get(10, TimeUnit.SECONDS);
682 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
683 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
684 successShardResult("people", DataStoreType.Config),
685 successShardResult("cars", DataStoreType.Operational),
686 successShardResult("people", DataStoreType.Operational));
688 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
689 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
690 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
691 new String[]{"cars", "people"},
692 new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
693 new SimpleEntry<>("member-3", true));
695 // Leadership should have transferred to member 3 since it is the only remaining voting member.
696 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
697 assertNotNull("Expected non-null leader Id", raftState.getLeader());
698 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
699 raftState.getLeader().contains("member-3"));
702 verifyRaftState(leaderNode1.operDataStore(), "cars", raftState -> {
703 assertNotNull("Expected non-null leader Id", raftState.getLeader());
704 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
705 raftState.getLeader().contains("member-3"));
708 // Flip the voting states back to the original states.
710 rpcResult = service3.flipMemberVotingStatesForAllShards(). get(10, TimeUnit.SECONDS);
711 result = verifySuccessfulRpcResult(rpcResult);
712 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
713 successShardResult("people", DataStoreType.Config),
714 successShardResult("cars", DataStoreType.Operational),
715 successShardResult("people", DataStoreType.Operational));
717 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
718 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
719 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
720 new String[]{"cars", "people"},
721 new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
722 new SimpleEntry<>("member-3", false));
724 // Leadership should have transferred to member 1 or 2.
725 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
726 assertNotNull("Expected non-null leader Id", raftState.getLeader());
727 assertTrue("Expected leader member-1 or member-2. Actual: " + raftState.getLeader(),
728 raftState.getLeader().contains("member-1") || raftState.getLeader().contains("member-2"));
733 public void testFlipMemberVotingStatesWithNoInitialLeader() throws Exception {
734 String name = "testFlipMemberVotingStatesWithNoInitialLeader";
736 // Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially
737 // non-voting and simulated as down by not starting them up.
738 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
739 new ServerInfo("member-1", false), new ServerInfo("member-2", false),
740 new ServerInfo("member-3", false), new ServerInfo("member-4", true),
741 new ServerInfo("member-5", true), new ServerInfo("member-6", true)));
743 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
744 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
745 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
747 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
748 final MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
749 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
750 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
753 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
754 .moduleShardsConfig(moduleShardsConfig).build();
756 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
757 .moduleShardsConfig(moduleShardsConfig).build();
759 // Initially there won't be a leader b/c all the up nodes are non-voting.
761 replicaNode1.waitForMembersUp("member-2", "member-3");
763 verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", false),
764 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false),
765 new SimpleEntry<>("member-4", true), new SimpleEntry<>("member-5", true),
766 new SimpleEntry<>("member-6", true));
768 verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
769 assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
771 ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
772 replicaNode1.operDataStore());
774 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
775 .get(10, TimeUnit.SECONDS);
776 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
777 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
778 successShardResult("people", DataStoreType.Config),
779 successShardResult("cars", DataStoreType.Operational),
780 successShardResult("people", DataStoreType.Operational));
782 verifyVotingStates(new DistributedDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
783 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
784 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
785 new String[]{"cars", "people"},
786 new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
787 new SimpleEntry<>("member-3", true), new SimpleEntry<>("member-4", false),
788 new SimpleEntry<>("member-5", false), new SimpleEntry<>("member-6", false));
790 // Since member 1 was changed to voting and there was no leader, it should've started and election
792 verifyRaftState(replicaNode1.configDataStore(), "cars", raftState -> {
793 assertNotNull("Expected non-null leader Id", raftState.getLeader());
794 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
795 raftState.getLeader().contains("member-1"));
798 verifyRaftState(replicaNode1.operDataStore(), "cars", raftState -> {
799 assertNotNull("Expected non-null leader Id", raftState.getLeader());
800 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
801 raftState.getLeader().contains("member-1"));
806 public void testFlipMemberVotingStatesWithVotingMembersDown() throws Exception {
807 String name = "testFlipMemberVotingStatesWithVotingMembersDown";
809 // Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up.
810 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
811 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
812 new ServerInfo("member-3", true), new ServerInfo("member-4", false),
813 new ServerInfo("member-5", false), new ServerInfo("member-6", false)));
815 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
816 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
817 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
819 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
820 final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
821 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
822 DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
825 final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
826 .moduleShardsConfig(moduleShardsConfig).build();
828 final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
829 .moduleShardsConfig(moduleShardsConfig).build();
831 leaderNode1.configDataStore().waitTillReady();
832 leaderNode1.operDataStore().waitTillReady();
833 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
834 new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", true),
835 new SimpleEntry<>("member-4", false), new SimpleEntry<>("member-5", false),
836 new SimpleEntry<>("member-6", false));
838 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
839 leaderNode1.operDataStore());
841 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards()
842 .get(10, TimeUnit.SECONDS);
843 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
844 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
845 successShardResult("people", DataStoreType.Config),
846 successShardResult("cars", DataStoreType.Operational),
847 successShardResult("people", DataStoreType.Operational));
849 // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
850 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
851 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
852 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
853 new String[]{"cars", "people"},
854 new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
855 new SimpleEntry<>("member-3", false), new SimpleEntry<>("member-4", true),
856 new SimpleEntry<>("member-5", true), new SimpleEntry<>("member-6", true));
858 // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
859 // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
860 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
861 assertNotNull("Expected non-null leader Id", raftState.getLeader());
862 assertTrue("Expected leader member-1", raftState.getLeader().contains("member-1"));
866 private void setupPersistedServerConfigPayload(ServerConfigurationPayload serverConfig,
867 String member, String datastoreTypeSuffix, String... shards) {
868 String[] datastoreTypes = {"config_", "oper_"};
869 for (String type : datastoreTypes) {
870 for (String shard : shards) {
871 List<ServerInfo> newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size());
872 for (ServerInfo info : serverConfig.getServerConfig()) {
873 newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()),
874 type + datastoreTypeSuffix).toString(), info.isVoting()));
877 String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
878 type + datastoreTypeSuffix).toString();
879 InMemoryJournal.addEntry(shardID, 1, new UpdateElectionTerm(1, null));
880 InMemoryJournal.addEntry(shardID, 2, new ReplicatedLogImplEntry(0, 1,
881 new ServerConfigurationPayload(newServerInfo)));
887 private static void verifyVotingStates(DistributedDataStore[] datastores, String[] shards,
888 SimpleEntry<String, Boolean>... expStates) throws Exception {
889 for (DistributedDataStore datastore: datastores) {
890 for (String shard: shards) {
891 verifyVotingStates(datastore, shard, expStates);
897 private static void verifyVotingStates(DistributedDataStore datastore, String shardName,
898 SimpleEntry<String, Boolean>... expStates) throws Exception {
899 String localMemberName = datastore.getActorContext().getCurrentMemberName().getName();
900 Map<String, Boolean> expStateMap = new HashMap<>();
901 for (Entry<String, Boolean> e: expStates) {
902 expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
903 datastore.getActorContext().getDataStoreName()).toString(), e.getValue());
906 verifyRaftState(datastore, shardName, raftState -> {
907 String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
908 datastore.getActorContext().getDataStoreName()).toString();
909 assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
910 for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
911 assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
916 private static void verifyShardResults(List<ShardResult> shardResults, ShardResult... expShardResults) {
917 Map<String, ShardResult> expResultsMap = new HashMap<>();
918 for (ShardResult r: expShardResults) {
919 expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
922 for (ShardResult result: shardResults) {
923 ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
924 assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
925 result.getDataStoreType()), exp);
926 assertEquals("isSucceeded", exp.isSucceeded(), result.isSucceeded());
927 if (exp.isSucceeded()) {
928 assertNull("Expected null error message", result.getErrorMessage());
930 assertNotNull("Expected error message", result.getErrorMessage());
934 if (!expResultsMap.isEmpty()) {
935 fail("Missing shard results for " + expResultsMap.keySet());
939 private static ShardResult successShardResult(String shardName, DataStoreType type) {
940 return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(true).build();
943 private static ShardResult failedShardResult(String shardName, DataStoreType type) {
944 return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(false).build();