2 * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore.admin;
10 import static org.hamcrest.CoreMatchers.anyOf;
11 import static org.hamcrest.CoreMatchers.containsString;
12 import static org.junit.Assert.assertEquals;
13 import static org.junit.Assert.assertNotNull;
14 import static org.junit.Assert.assertNull;
15 import static org.junit.Assert.assertThat;
16 import static org.junit.Assert.assertTrue;
17 import static org.junit.Assert.fail;
18 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyNoShardPresent;
19 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftPeersPresent;
20 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftState;
21 import akka.actor.ActorRef;
22 import akka.actor.PoisonPill;
23 import akka.actor.Status.Success;
24 import akka.cluster.Cluster;
25 import com.google.common.base.Optional;
26 import com.google.common.collect.ImmutableList;
27 import com.google.common.collect.ImmutableMap;
28 import com.google.common.collect.Iterables;
29 import com.google.common.collect.Lists;
30 import com.google.common.collect.Sets;
32 import java.io.FileInputStream;
34 import java.util.AbstractMap.SimpleEntry;
35 import java.util.ArrayList;
36 import java.util.Arrays;
37 import java.util.HashMap;
38 import java.util.HashSet;
39 import java.util.List;
41 import java.util.Map.Entry;
43 import java.util.concurrent.TimeUnit;
44 import org.apache.commons.lang3.SerializationUtils;
45 import org.junit.After;
46 import org.junit.Before;
47 import org.junit.Test;
48 import org.opendaylight.controller.cluster.access.concepts.MemberName;
49 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
50 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
51 import org.opendaylight.controller.cluster.datastore.MemberNode;
52 import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
53 import org.opendaylight.controller.cluster.datastore.Shard;
54 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
55 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
56 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
57 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
58 import org.opendaylight.controller.cluster.raft.RaftState;
59 import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
60 import org.opendaylight.controller.cluster.raft.ServerConfigurationPayload;
61 import org.opendaylight.controller.cluster.raft.ServerConfigurationPayload.ServerInfo;
62 import org.opendaylight.controller.cluster.raft.base.messages.UpdateElectionTerm;
63 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
64 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
65 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
66 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
67 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
68 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
69 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
70 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
71 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
72 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
73 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
74 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
75 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
76 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
77 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
78 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
79 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
80 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
81 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
82 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
83 import org.opendaylight.yangtools.yang.common.RpcError;
84 import org.opendaylight.yangtools.yang.common.RpcResult;
85 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
88 * Unit tests for ClusterAdminRpcService.
90 * @author Thomas Pantelis
92 public class ClusterAdminRpcServiceTest {
93 private static final MemberName MEMBER_1 = MemberName.forName("member-1");
94 private static final MemberName MEMBER_2 = MemberName.forName("member-2");
95 private static final MemberName MEMBER_3 = MemberName.forName("member-3");
96 private final List<MemberNode> memberNodes = new ArrayList<>();
100 InMemoryJournal.clear();
101 InMemorySnapshotStore.clear();
105 public void tearDown() {
106 for (MemberNode m : Lists.reverse(memberNodes)) {
113 public void testBackupDatastore() throws Exception {
114 MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1").
115 moduleShardsConfig("module-shards-member1.conf").
116 waitForShardLeader("cars", "people").testName("testBackupDatastore").build();
118 String fileName = "target/testBackupDatastore";
119 new File(fileName).delete();
121 ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore());
123 RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder().
124 setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
125 verifySuccessfulRpcResult(rpcResult);
127 try(FileInputStream fis = new FileInputStream(fileName)) {
128 List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
129 assertEquals("DatastoreSnapshot size", 2, snapshots.size());
131 ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
132 snapshots.get(1).getType(), snapshots.get(1));
133 verifyDatastoreSnapshot(node.configDataStore().getActorContext().getDataStoreName(),
134 map.get(node.configDataStore().getActorContext().getDataStoreName()), "cars", "people");
136 new File(fileName).delete();
139 // Test failure by killing a shard.
141 node.configDataStore().getActorContext().getShardManager().tell(node.datastoreContextBuilder().
142 shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
144 ActorRef carsShardActor = node.configDataStore().getActorContext().findLocalShard("cars").get();
145 node.kit().watch(carsShardActor);
146 carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
147 node.kit().expectTerminated(carsShardActor);
149 rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build()).
150 get(5, TimeUnit.SECONDS);
151 assertEquals("isSuccessful", false, rpcResult.isSuccessful());
152 assertEquals("getErrors", 1, rpcResult.getErrors().size());
155 private static void verifyDatastoreSnapshot(String type, DatastoreSnapshot datastoreSnapshot, String... expShardNames) {
156 assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
157 Set<String> shardNames = new HashSet<>();
158 for(DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
159 shardNames.add(s.getName());
162 assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
166 public void testAddShardReplica() throws Exception {
167 String name = "testAddShardReplica";
168 String moduleShardsConfig = "module-shards-cars-member-1.conf";
169 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
170 moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
172 MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
173 moduleShardsConfig(moduleShardsConfig).build();
175 leaderNode1.waitForMembersUp("member-2");
177 doAddShardReplica(newReplicaNode2, "cars", "member-1");
179 MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
180 moduleShardsConfig(moduleShardsConfig).build();
182 leaderNode1.waitForMembersUp("member-3");
183 newReplicaNode2.waitForMembersUp("member-3");
185 doAddShardReplica(newReplicaNode3, "cars", "member-1", "member-2");
187 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "cars", "member-1", "member-3");
188 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
190 // Write data to member-2's config datastore and read/verify via member-3
191 NormalizedNode<?, ?> configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
192 newReplicaNode3.configDataStore());
194 // Write data to member-3's oper datastore and read/verify via member-2
195 writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
197 // Verify all data has been replicated. We expect 3 log entries and thus last applied index of 2 -
198 // 2 ServerConfigurationPayload entries and the transaction payload entry.
200 RaftStateVerifier verifier = new RaftStateVerifier() {
202 public void verify(OnDemandRaftState raftState) {
203 assertEquals("Commit index", 2, raftState.getCommitIndex());
204 assertEquals("Last applied index", 2, raftState.getLastApplied());
208 verifyRaftState(leaderNode1.configDataStore(), "cars", verifier);
209 verifyRaftState(leaderNode1.operDataStore(), "cars", verifier);
211 verifyRaftState(newReplicaNode2.configDataStore(), "cars", verifier);
212 verifyRaftState(newReplicaNode2.operDataStore(), "cars", verifier);
214 verifyRaftState(newReplicaNode3.configDataStore(), "cars", verifier);
215 verifyRaftState(newReplicaNode3.operDataStore(), "cars", verifier);
217 // Restart member-3 and verify the cars config shard is re-instated.
219 Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(newReplicaNode3.kit().getSystem()).selfAddress());
220 newReplicaNode3.cleanup();
222 newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
223 moduleShardsConfig(moduleShardsConfig).createOperDatastore(false).build();
225 verifyRaftState(newReplicaNode3.configDataStore(), "cars", verifier);
226 readCarsNodeAndVerify(newReplicaNode3.configDataStore(), configCarsNode);
230 public void testAddShardReplicaFailures() throws Exception {
231 String name = "testAddShardReplicaFailures";
232 MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name).
233 moduleShardsConfig("module-shards-cars-member-1.conf").build();
235 ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
236 memberNode.operDataStore());
238 RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().
239 setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
240 verifyFailedRpcResult(rpcResult);
242 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").
243 build()).get(10, TimeUnit.SECONDS);
244 verifyFailedRpcResult(rpcResult);
246 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people").
247 setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
248 verifyFailedRpcResult(rpcResult);
251 private static NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
252 DistributedDataStore readFromStore) throws Exception {
253 DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
254 NormalizedNode<?, ?> carsNode = CarsModel.create();
255 writeTx.write(CarsModel.BASE_PATH, carsNode);
257 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
258 Boolean canCommit = cohort .canCommit().get(7, TimeUnit.SECONDS);
259 assertEquals("canCommit", true, canCommit);
260 cohort.preCommit().get(5, TimeUnit.SECONDS);
261 cohort.commit().get(5, TimeUnit.SECONDS);
263 readCarsNodeAndVerify(readFromStore, carsNode);
267 private static void readCarsNodeAndVerify(DistributedDataStore readFromStore,
268 NormalizedNode<?, ?> expCarsNode) throws Exception {
269 Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction().
270 read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
271 assertEquals("isPresent", true, optional.isPresent());
272 assertEquals("Data node", expCarsNode, optional.get());
275 private static void doAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
277 memberNode.waitForMembersUp(peerMemberNames);
279 ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
280 memberNode.operDataStore());
282 RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).
283 setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
284 verifySuccessfulRpcResult(rpcResult);
286 verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
288 Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
289 assertEquals("Oper shard present", false, optional.isPresent());
291 rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).
292 setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
293 verifySuccessfulRpcResult(rpcResult);
295 verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
298 private static <T> T verifySuccessfulRpcResult(RpcResult<T> rpcResult) {
299 if(!rpcResult.isSuccessful()) {
300 if(rpcResult.getErrors().size() > 0) {
301 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
302 throw new AssertionError("Rpc failed with error: " + error, error.getCause());
305 fail("Rpc failed with no error");
308 return rpcResult.getResult();
311 private static void verifyFailedRpcResult(RpcResult<Void> rpcResult) {
312 assertEquals("RpcResult", false, rpcResult.isSuccessful());
313 assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
314 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
315 assertNotNull("RpcResult error message null", error.getMessage());
319 public void testRemoveShardReplica() throws Exception {
320 String name = "testRemoveShardReplicaLocal";
321 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
322 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
323 moduleShardsConfig(moduleShardsConfig).
324 datastoreContextBuilder(DatastoreContext.newBuilder().
325 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
327 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
328 moduleShardsConfig(moduleShardsConfig).build();
330 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
331 moduleShardsConfig(moduleShardsConfig).build();
333 leaderNode1.configDataStore().waitTillReady();
334 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
335 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
336 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
338 // Invoke RPC service on member-3 to remove it's local shard
340 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
341 replicaNode3.operDataStore());
343 RpcResult<Void> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder().
344 setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build()).
345 get(10, TimeUnit.SECONDS);
346 verifySuccessfulRpcResult(rpcResult);
348 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
349 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1");
350 verifyNoShardPresent(replicaNode3.configDataStore(), "cars");
352 // Restart member-2 and verify member-3 isn't present.
354 Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress());
355 replicaNode2.cleanup();
357 replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
358 moduleShardsConfig(moduleShardsConfig).build();
360 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1");
362 // Invoke RPC service on member-1 to remove member-2
364 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
365 leaderNode1.operDataStore());
367 rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().
368 setShardName("cars").setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).
369 get(10, TimeUnit.SECONDS);
370 verifySuccessfulRpcResult(rpcResult);
372 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars");
373 verifyNoShardPresent(replicaNode2.configDataStore(), "cars");
377 public void testRemoveShardLeaderReplica() throws Exception {
378 String name = "testRemoveShardLeaderReplica";
379 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
380 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
381 moduleShardsConfig(moduleShardsConfig).
382 datastoreContextBuilder(DatastoreContext.newBuilder().
383 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
385 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
386 moduleShardsConfig(moduleShardsConfig).build();
388 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
389 moduleShardsConfig(moduleShardsConfig).build();
391 leaderNode1.configDataStore().waitTillReady();
392 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
393 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
394 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
396 replicaNode2.waitForMembersUp("member-1", "member-3");
397 replicaNode3.waitForMembersUp("member-1", "member-2");
399 // Invoke RPC service on leader member-1 to remove it's local shard
401 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
402 leaderNode1.operDataStore());
404 RpcResult<Void> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().
405 setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build()).
406 get(10, TimeUnit.SECONDS);
407 verifySuccessfulRpcResult(rpcResult);
409 verifyRaftState(replicaNode2.configDataStore(), "cars", new RaftStateVerifier() {
411 public void verify(OnDemandRaftState raftState) {
412 assertThat("Leader Id", raftState.getLeader(), anyOf(containsString("member-2"),
413 containsString("member-3")));
417 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-3");
418 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-2");
419 verifyNoShardPresent(leaderNode1.configDataStore(), "cars");
423 public void testAddReplicasForAllShards() throws Exception {
424 String name = "testAddReplicasForAllShards";
425 String moduleShardsConfig = "module-shards-member1.conf";
426 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
427 moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
429 ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
430 "pets", null, Arrays.asList(MEMBER_1));
431 leaderNode1.configDataStore().getActorContext().getShardManager().tell(
432 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
433 leaderNode1.kit().expectMsgClass(Success.class);
434 leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorContext(), "pets");
436 MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
437 moduleShardsConfig(moduleShardsConfig).build();
439 leaderNode1.waitForMembersUp("member-2");
440 newReplicaNode2.waitForMembersUp("member-1");
442 newReplicaNode2.configDataStore().getActorContext().getShardManager().tell(
443 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
444 newReplicaNode2.kit().expectMsgClass(Success.class);
446 newReplicaNode2.operDataStore().getActorContext().getShardManager().tell(
447 new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
448 "no-leader", null, Arrays.asList(MEMBER_1)), Shard.builder(), null),
449 newReplicaNode2.kit().getRef());
450 newReplicaNode2.kit().expectMsgClass(Success.class);
452 ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
453 newReplicaNode2.operDataStore());
455 RpcResult<AddReplicasForAllShardsOutput> rpcResult = service.addReplicasForAllShards().get(10, TimeUnit.SECONDS);
456 AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
457 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
458 successShardResult("people", DataStoreType.Config),
459 successShardResult("pets", DataStoreType.Config),
460 successShardResult("cars", DataStoreType.Operational),
461 successShardResult("people", DataStoreType.Operational),
462 failedShardResult("no-leader", DataStoreType.Operational));
464 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "cars", "member-1");
465 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "people", "member-1");
466 verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "pets", "member-1");
467 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1");
468 verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "people", "member-1");
472 public void testRemoveAllShardReplicas() throws Exception {
473 String name = "testRemoveAllShardReplicas";
474 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
475 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
476 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
477 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
479 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
480 moduleShardsConfig(moduleShardsConfig).build();
482 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
483 moduleShardsConfig(moduleShardsConfig).build();
485 leaderNode1.configDataStore().waitTillReady();
486 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
487 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
488 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
490 ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
491 "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
492 leaderNode1.configDataStore().getActorContext().getShardManager().tell(
493 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
494 leaderNode1.kit().expectMsgClass(Success.class);
496 replicaNode2.configDataStore().getActorContext().getShardManager().tell(
497 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode2.kit().getRef());
498 replicaNode2.kit().expectMsgClass(Success.class);
500 replicaNode3.configDataStore().getActorContext().getShardManager().tell(
501 new CreateShard(petsModuleConfig, Shard.builder(), null), replicaNode3.kit().getRef());
502 replicaNode3.kit().expectMsgClass(Success.class);
504 verifyRaftPeersPresent(leaderNode1.configDataStore(), "pets", "member-2", "member-3");
505 verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3");
506 verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
508 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
509 replicaNode3.operDataStore());
511 RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
512 new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
513 RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult);
514 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
515 successShardResult("people", DataStoreType.Config),
516 successShardResult("pets", DataStoreType.Config),
517 successShardResult("cars", DataStoreType.Operational),
518 successShardResult("people", DataStoreType.Operational));
520 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
521 verifyRaftPeersPresent(leaderNode1.configDataStore(), "people", "member-2");
522 verifyRaftPeersPresent(leaderNode1.configDataStore(), "pets", "member-2");
523 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1");
524 verifyRaftPeersPresent(replicaNode2.configDataStore(), "people", "member-1");
525 verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1");
526 verifyNoShardPresent(replicaNode3.configDataStore(), "cars");
527 verifyNoShardPresent(replicaNode3.configDataStore(), "people");
528 verifyNoShardPresent(replicaNode3.configDataStore(), "pets");
532 public void testChangeMemberVotingStatesForShard() throws Exception {
533 String name = "testChangeMemberVotingStatusForShard";
534 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
535 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
536 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
537 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
539 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
540 moduleShardsConfig(moduleShardsConfig).build();
542 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
543 moduleShardsConfig(moduleShardsConfig).build();
545 leaderNode1.configDataStore().waitTillReady();
546 replicaNode3.configDataStore().waitTillReady();
547 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
548 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
549 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
551 // Invoke RPC service on member-3 to change voting status
553 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
554 replicaNode3.operDataStore());
556 RpcResult<Void> rpcResult = service3.changeMemberVotingStatesForShard(
557 new ChangeMemberVotingStatesForShardInputBuilder().setShardName("cars").
558 setDataStoreType(DataStoreType.Config).setMemberVotingState(ImmutableList.of(
559 new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
560 new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build())).build()).
561 get(10, TimeUnit.SECONDS);
562 verifySuccessfulRpcResult(rpcResult);
564 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", false),
565 new SimpleEntry<>("member-3", false));
566 verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", false),
567 new SimpleEntry<>("member-3", false));
568 verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", false),
569 new SimpleEntry<>("member-3", false));
573 public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception {
574 String name = "testChangeMemberVotingStatesForSingleNodeShard";
575 String moduleShardsConfig = "module-shards-member1.conf";
576 MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
577 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
578 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
580 leaderNode.configDataStore().waitTillReady();
582 // Invoke RPC service on member-3 to change voting status
584 ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
585 leaderNode.operDataStore());
587 RpcResult<Void> rpcResult = service.changeMemberVotingStatesForShard(
588 new ChangeMemberVotingStatesForShardInputBuilder().setShardName("cars").
589 setDataStoreType(DataStoreType.Config).setMemberVotingState(ImmutableList.of(
590 new MemberVotingStateBuilder().setMemberName("member-1").setVoting(false).build())).build()).
591 get(10, TimeUnit.SECONDS);
592 verifyFailedRpcResult(rpcResult);
594 verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", true));
598 public void testChangeMemberVotingStatesForAllShards() throws Exception {
599 String name = "testChangeMemberVotingStatesForAllShards";
600 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
601 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
602 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
603 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
605 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
606 moduleShardsConfig(moduleShardsConfig).build();
608 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
609 moduleShardsConfig(moduleShardsConfig).build();
611 leaderNode1.configDataStore().waitTillReady();
612 leaderNode1.operDataStore().waitTillReady();
613 replicaNode3.configDataStore().waitTillReady();
614 replicaNode3.operDataStore().waitTillReady();
615 verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2", "member-3");
616 verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
617 verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
619 // Invoke RPC service on member-3 to change voting status
621 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
622 replicaNode3.operDataStore());
624 RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
625 new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
626 new MemberVotingStateBuilder().setMemberName("member-2").setVoting(false).build(),
627 new MemberVotingStateBuilder().setMemberName("member-3").setVoting(false).build())).build()).
628 get(10, TimeUnit.SECONDS);
629 ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
630 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
631 successShardResult("people", DataStoreType.Config),
632 successShardResult("cars", DataStoreType.Operational),
633 successShardResult("people", DataStoreType.Operational));
635 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
636 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
637 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
638 new String[]{"cars", "people"}, new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", false),
639 new SimpleEntry<>("member-3", false));
643 public void testFlipMemberVotingStates() throws Exception {
644 String name = "testFlipMemberVotingStates";
646 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
647 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
648 new ServerInfo("member-3", false)));
650 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
651 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
652 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
654 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
655 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
656 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
657 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
659 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
660 moduleShardsConfig(moduleShardsConfig).build();
662 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
663 moduleShardsConfig(moduleShardsConfig).build();
665 leaderNode1.configDataStore().waitTillReady();
666 leaderNode1.operDataStore().waitTillReady();
667 replicaNode3.configDataStore().waitTillReady();
668 replicaNode3.operDataStore().waitTillReady();
669 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
670 new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", false));
672 ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
673 replicaNode3.operDataStore());
675 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards().
676 get(10, TimeUnit.SECONDS);
677 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
678 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
679 successShardResult("people", DataStoreType.Config),
680 successShardResult("cars", DataStoreType.Operational),
681 successShardResult("people", DataStoreType.Operational));
683 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
684 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
685 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
686 new String[]{"cars", "people"},
687 new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
688 new SimpleEntry<>("member-3", true));
690 // Leadership should have transferred to member 3 since it is the only remaining voting member.
691 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
692 assertNotNull("Expected non-null leader Id", raftState.getLeader());
693 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
694 raftState.getLeader().contains("member-3"));
697 verifyRaftState(leaderNode1.operDataStore(), "cars", raftState -> {
698 assertNotNull("Expected non-null leader Id", raftState.getLeader());
699 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
700 raftState.getLeader().contains("member-3"));
703 // Flip the voting states back to the original states.
705 rpcResult = service3.flipMemberVotingStatesForAllShards(). get(10, TimeUnit.SECONDS);
706 result = verifySuccessfulRpcResult(rpcResult);
707 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
708 successShardResult("people", DataStoreType.Config),
709 successShardResult("cars", DataStoreType.Operational),
710 successShardResult("people", DataStoreType.Operational));
712 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
713 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
714 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
715 new String[]{"cars", "people"},
716 new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
717 new SimpleEntry<>("member-3", false));
719 // Leadership should have transferred to member 1 or 2.
720 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
721 assertNotNull("Expected non-null leader Id", raftState.getLeader());
722 assertTrue("Expected leader member-1 or member-2. Actual: " + raftState.getLeader(),
723 raftState.getLeader().contains("member-1") || raftState.getLeader().contains("member-2"));
728 public void testFlipMemberVotingStatesWithNoInitialLeader() throws Exception {
729 String name = "testFlipMemberVotingStatesWithNoInitialLeader";
731 // Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially
732 // non-voting and simulated as down by not starting them up.
733 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
734 new ServerInfo("member-1", false), new ServerInfo("member-2", false),
735 new ServerInfo("member-3", false), new ServerInfo("member-4", true),
736 new ServerInfo("member-5", true), new ServerInfo("member-6", true)));
738 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
739 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
740 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
742 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
743 MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
744 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
745 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
747 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
748 moduleShardsConfig(moduleShardsConfig).build();
750 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
751 moduleShardsConfig(moduleShardsConfig).build();
753 // Initially there won't be a leader b/c all the up nodes are non-voting.
755 replicaNode1.waitForMembersUp("member-2", "member-3");
757 verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", false),
758 new SimpleEntry<>("member-2", false), new SimpleEntry<>("member-3", false),
759 new SimpleEntry<>("member-4", true), new SimpleEntry<>("member-5", true),
760 new SimpleEntry<>("member-6", true));
762 verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
763 assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
765 ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
766 replicaNode1.operDataStore());
768 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards().
769 get(10, TimeUnit.SECONDS);
770 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
771 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
772 successShardResult("people", DataStoreType.Config),
773 successShardResult("cars", DataStoreType.Operational),
774 successShardResult("people", DataStoreType.Operational));
776 verifyVotingStates(new DistributedDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
777 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
778 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
779 new String[]{"cars", "people"},
780 new SimpleEntry<>("member-1", true), new SimpleEntry<>("member-2", true),
781 new SimpleEntry<>("member-3", true), new SimpleEntry<>("member-4", false),
782 new SimpleEntry<>("member-5", false), new SimpleEntry<>("member-6", false));
784 // Since member 1 was changed to voting and there was no leader, it should've started and election
786 verifyRaftState(replicaNode1.configDataStore(), "cars", raftState -> {
787 assertNotNull("Expected non-null leader Id", raftState.getLeader());
788 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
789 raftState.getLeader().contains("member-1"));
792 verifyRaftState(replicaNode1.operDataStore(), "cars", raftState -> {
793 assertNotNull("Expected non-null leader Id", raftState.getLeader());
794 assertTrue("Expected leader member-1. Actual: " + raftState.getLeader(),
795 raftState.getLeader().contains("member-1"));
800 public void testFlipMemberVotingStatesWithVotingMembersDown() throws Exception {
801 String name = "testFlipMemberVotingStatesWithVotingMembersDown";
803 // Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up.
804 ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
805 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
806 new ServerInfo("member-3", true), new ServerInfo("member-4", false),
807 new ServerInfo("member-5", false), new ServerInfo("member-6", false)));
809 setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
810 setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
811 setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
813 String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
814 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
815 moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder().
816 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1)).build();
818 MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
819 moduleShardsConfig(moduleShardsConfig).build();
821 MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
822 moduleShardsConfig(moduleShardsConfig).build();
824 leaderNode1.configDataStore().waitTillReady();
825 leaderNode1.operDataStore().waitTillReady();
826 verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", true),
827 new SimpleEntry<>("member-2", true), new SimpleEntry<>("member-3", true),
828 new SimpleEntry<>("member-4", false), new SimpleEntry<>("member-5", false),
829 new SimpleEntry<>("member-6", false));
831 ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
832 leaderNode1.operDataStore());
834 RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards().
835 get(10, TimeUnit.SECONDS);
836 FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
837 verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
838 successShardResult("people", DataStoreType.Config),
839 successShardResult("cars", DataStoreType.Operational),
840 successShardResult("people", DataStoreType.Operational));
842 // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
843 verifyVotingStates(new DistributedDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
844 replicaNode2.configDataStore(), replicaNode2.operDataStore(),
845 replicaNode3.configDataStore(), replicaNode3.operDataStore()},
846 new String[]{"cars", "people"},
847 new SimpleEntry<>("member-1", false), new SimpleEntry<>("member-2", false),
848 new SimpleEntry<>("member-3", false), new SimpleEntry<>("member-4", true),
849 new SimpleEntry<>("member-5", true), new SimpleEntry<>("member-6", true));
851 // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
852 // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
853 verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
854 assertNotNull("Expected non-null leader Id", raftState.getLeader());
855 assertTrue("Expected leader member-1", raftState.getLeader().contains("member-1"));
859 private void setupPersistedServerConfigPayload(ServerConfigurationPayload serverConfig,
860 String member, String datastoreTypeSuffix, String... shards) {
861 String[] datastoreTypes = {"config_", "oper_"};
862 for(String type: datastoreTypes) {
863 for(String shard: shards) {
864 List<ServerInfo> newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size());
865 for(ServerInfo info: serverConfig.getServerConfig()) {
866 newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()),
867 type + datastoreTypeSuffix).toString(), info.isVoting()));
870 String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
871 type + datastoreTypeSuffix).toString();
872 InMemoryJournal.addEntry(shardID, 1, new UpdateElectionTerm(1, null));
873 InMemoryJournal.addEntry(shardID, 2, new ReplicatedLogImplEntry(0, 1,
874 new ServerConfigurationPayload(newServerInfo)));
880 private static void verifyVotingStates(DistributedDataStore[] datastores, String[] shards,
881 SimpleEntry<String, Boolean>... expStates) throws Exception {
882 for(DistributedDataStore datastore: datastores) {
883 for(String shard: shards) {
884 verifyVotingStates(datastore, shard, expStates);
890 private static void verifyVotingStates(DistributedDataStore datastore, String shardName,
891 SimpleEntry<String, Boolean>... expStates) throws Exception {
892 String localMemberName = datastore.getActorContext().getCurrentMemberName().getName();
893 Map<String, Boolean> expStateMap = new HashMap<>();
894 for(Entry<String, Boolean> e: expStates) {
895 expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
896 datastore.getActorContext().getDataStoreName()).toString(), e.getValue());
899 verifyRaftState(datastore, shardName, raftState -> {
900 String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
901 datastore.getActorContext().getDataStoreName()).toString();
902 assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
903 for(Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
904 assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
909 private static void verifyShardResults(List<ShardResult> shardResults, ShardResult... expShardResults) {
910 Map<String, ShardResult> expResultsMap = new HashMap<>();
911 for(ShardResult r: expShardResults) {
912 expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
915 for(ShardResult result: shardResults) {
916 ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
917 assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
918 result.getDataStoreType()), exp);
919 assertEquals("isSucceeded", exp.isSucceeded(), result.isSucceeded());
920 if(exp.isSucceeded()) {
921 assertNull("Expected null error message", result.getErrorMessage());
923 assertNotNull("Expected error message", result.getErrorMessage());
927 if(!expResultsMap.isEmpty()) {
928 fail("Missing shard results for " + expResultsMap.keySet());
932 private static ShardResult successShardResult(String shardName, DataStoreType type) {
933 return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(true).build();
936 private static ShardResult failedShardResult(String shardName, DataStoreType type) {
937 return new ShardResultBuilder().setDataStoreType(type).setShardName(shardName).setSucceeded(false).build();