2 * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore.admin;
10 import static org.junit.Assert.assertEquals;
11 import static org.junit.Assert.assertNotNull;
12 import static org.junit.Assert.assertTrue;
13 import static org.junit.Assert.fail;
14 import akka.actor.ActorRef;
15 import akka.actor.ActorSystem;
16 import akka.actor.Address;
17 import akka.actor.AddressFromURIString;
18 import akka.actor.PoisonPill;
19 import akka.cluster.Cluster;
20 import akka.cluster.ClusterEvent.CurrentClusterState;
21 import akka.cluster.Member;
22 import akka.cluster.MemberStatus;
23 import akka.testkit.JavaTestKit;
24 import com.google.common.base.Optional;
25 import com.google.common.base.Stopwatch;
26 import com.google.common.collect.ImmutableMap;
27 import com.google.common.collect.Iterables;
28 import com.google.common.collect.Sets;
29 import com.google.common.util.concurrent.Uninterruptibles;
30 import com.typesafe.config.ConfigFactory;
32 import java.io.FileInputStream;
33 import java.util.ArrayList;
34 import java.util.HashSet;
35 import java.util.List;
37 import java.util.concurrent.TimeUnit;
38 import org.apache.commons.lang3.SerializationUtils;
39 import org.junit.After;
40 import org.junit.Test;
41 import org.opendaylight.controller.cluster.datastore.ClusterWrapperImpl;
42 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
43 import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
44 import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
45 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
46 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
47 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
48 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
49 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
50 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
51 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
52 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
53 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
54 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
55 import org.opendaylight.yangtools.yang.common.RpcError;
56 import org.opendaylight.yangtools.yang.common.RpcResult;
57 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
58 import scala.concurrent.Await;
59 import scala.concurrent.Future;
60 import scala.concurrent.duration.Duration;
63 * Unit tests for ClusterAdminRpcService.
65 * @author Thomas Pantelis
67 public class ClusterAdminRpcServiceTest {
68 private static final Address MEMBER_1_ADDRESS = AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
70 private final List<MemberNode> memberNodes = new ArrayList<>();
73 public void tearDown() {
74 for(MemberNode m: memberNodes) {
80 public void testBackupDatastore() throws Exception {
81 MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1").
82 moduleShardsConfig("module-shards-member1.conf").
83 waitForShardLeader("cars", "people").testName("testBackupDatastore").build();
85 String fileName = "target/testBackupDatastore";
86 new File(fileName).delete();
88 ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore, node.operDataStore);
90 RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder().
91 setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
92 checkSuccessfulRpcResult(rpcResult);
94 try(FileInputStream fis = new FileInputStream(fileName)) {
95 List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
96 assertEquals("DatastoreSnapshot size", 2, snapshots.size());
98 ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
99 snapshots.get(1).getType(), snapshots.get(1));
100 verifyDatastoreSnapshot(node.configDataStore.getActorContext().getDataStoreType(),
101 map.get(node.configDataStore.getActorContext().getDataStoreType()), "cars", "people");
103 new File(fileName).delete();
106 // Test failure by killing a shard.
108 node.configDataStore.getActorContext().getShardManager().tell(node.datastoreContextBuilder.
109 shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
111 ActorRef carsShardActor = node.configDataStore.getActorContext().findLocalShard("cars").get();
112 node.kit.watch(carsShardActor);
113 carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
114 node.kit.expectTerminated(carsShardActor);
116 rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build()).
117 get(5, TimeUnit.SECONDS);
118 assertEquals("isSuccessful", false, rpcResult.isSuccessful());
119 assertEquals("getErrors", 1, rpcResult.getErrors().size());
124 private void verifyDatastoreSnapshot(String type, DatastoreSnapshot datastoreSnapshot, String... expShardNames) {
125 assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
126 Set<String> shardNames = new HashSet<>();
127 for(DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
128 shardNames.add(s.getName());
131 assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
135 public void testAddShardReplica() throws Exception {
136 String name = "testAddShardReplica";
137 String moduleShardsConfig = "module-shards-cars-member-1.conf";
138 MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name ).
139 moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
141 MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name).
142 moduleShardsConfig(moduleShardsConfig).build();
144 leaderNode1.waitForMembersUp("member-2");
146 testAddShardReplica(newReplicaNode2, "cars", "member-1");
148 MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
149 moduleShardsConfig(moduleShardsConfig).build();
151 leaderNode1.waitForMembersUp("member-3");
152 newReplicaNode2.waitForMembersUp("member-3");
154 testAddShardReplica(newReplicaNode3, "cars", "member-1", "member-2");
156 verifyRaftPeersPresent(newReplicaNode2.configDataStore, "cars", "member-1", "member-3");
157 verifyRaftPeersPresent(newReplicaNode2.operDataStore, "cars", "member-1", "member-3");
159 // Write data to member-2's config datastore and read/verify via member-3
160 NormalizedNode<?, ?> configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore,
161 newReplicaNode3.configDataStore);
163 // Write data to member-3's oper datastore and read/verify via member-2
164 writeCarsNodeAndVerify(newReplicaNode3.operDataStore, newReplicaNode2.operDataStore);
166 // Verify all data has been replicated. We expect 3 log entries and thus last applied index of 2 -
167 // 2 ServerConfigurationPayload entries and the transaction payload entry.
169 RaftStateVerifier verifier = new RaftStateVerifier() {
171 public void verify(OnDemandRaftState raftState) {
172 assertEquals("Commit index", 2, raftState.getCommitIndex());
173 assertEquals("Last applied index", 2, raftState.getLastApplied());
177 verifyRaftState(leaderNode1.configDataStore, "cars", verifier);
178 verifyRaftState(leaderNode1.operDataStore, "cars", verifier);
180 verifyRaftState(newReplicaNode2.configDataStore, "cars", verifier);
181 verifyRaftState(newReplicaNode2.operDataStore, "cars", verifier);
183 verifyRaftState(newReplicaNode3.configDataStore, "cars", verifier);
184 verifyRaftState(newReplicaNode3.operDataStore, "cars", verifier);
186 // Restart member-3 and verify the cars config shard is re-instated.
188 Cluster.get(leaderNode1.kit.getSystem()).down(Cluster.get(newReplicaNode3.kit.getSystem()).selfAddress());
189 newReplicaNode3.cleanup();
191 newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
192 moduleShardsConfig(moduleShardsConfig).createOperDatastore(false).build();
194 verifyRaftState(newReplicaNode3.configDataStore, "cars", verifier);
195 readCarsNodeAndVerify(newReplicaNode3.configDataStore, configCarsNode);
198 private NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
199 DistributedDataStore readFromStore) throws Exception {
200 DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
201 NormalizedNode<?, ?> carsNode = CarsModel.create();
202 writeTx.write(CarsModel.BASE_PATH, carsNode);
204 DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
205 Boolean canCommit = cohort .canCommit().get(7, TimeUnit.SECONDS);
206 assertEquals("canCommit", true, canCommit);
207 cohort.preCommit().get(5, TimeUnit.SECONDS);
208 cohort.commit().get(5, TimeUnit.SECONDS);
210 readCarsNodeAndVerify(readFromStore, carsNode);
214 private void readCarsNodeAndVerify(DistributedDataStore readFromStore,
215 NormalizedNode<?, ?> expCarsNode) throws Exception {
216 Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction().
217 read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
218 assertEquals("isPresent", true, optional.isPresent());
219 assertEquals("Data node", expCarsNode, optional.get());
222 private void testAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
224 memberNode.waitForMembersUp(peerMemberNames);
226 ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore,
227 memberNode.operDataStore);
229 RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).
230 build()).get(10, TimeUnit.SECONDS);
231 checkSuccessfulRpcResult(rpcResult);
233 verifyRaftPeersPresent(memberNode.configDataStore, shardName, peerMemberNames);
234 verifyRaftPeersPresent(memberNode.operDataStore, shardName, peerMemberNames);
239 private void verifyRaftPeersPresent(DistributedDataStore datastore, final String shardName, String... peerMemberNames)
241 final Set<String> peerIds = Sets.newHashSet();
242 for(String p: peerMemberNames) {
243 peerIds.add(ShardIdentifier.builder().memberName(p).shardName(shardName).
244 type(datastore.getActorContext().getDataStoreType()).build().toString());
247 verifyRaftState(datastore, shardName, new RaftStateVerifier() {
249 public void verify(OnDemandRaftState raftState) {
250 assertTrue("Peer(s) " + peerIds + " not found for shard " + shardName,
251 raftState.getPeerAddresses().keySet().containsAll(peerIds));
256 private void verifyRaftState(DistributedDataStore datastore, String shardName, RaftStateVerifier verifier)
258 ActorContext actorContext = datastore.getActorContext();
260 Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
261 ActorRef shardActor = Await.result(future, Duration.create(10, TimeUnit.SECONDS));
263 AssertionError lastError = null;
264 Stopwatch sw = Stopwatch.createStarted();
265 while(sw.elapsed(TimeUnit.SECONDS) <= 5) {
266 OnDemandRaftState raftState = (OnDemandRaftState)actorContext.
267 executeOperation(shardActor, GetOnDemandRaftState.INSTANCE);
270 verifier.verify(raftState);
272 } catch (AssertionError e) {
274 Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
281 private void checkSuccessfulRpcResult(RpcResult<Void> rpcResult) {
282 if(!rpcResult.isSuccessful()) {
283 if(rpcResult.getErrors().size() > 0) {
284 RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
285 throw new AssertionError("Rpc failed with error: " + error, error.getCause());
288 fail("Rpc failed with no error");
293 public void testRemoveShardReplica() {
298 public void testAddReplicasForAllShards() {
303 public void testRemoveAllShardReplicas() {
308 public void testConvertMembersToVotingForAllShards() {
313 public void testConvertMembersToNonvotingForAllShards() {
317 private static class MemberNode {
318 IntegrationTestKit kit;
319 DistributedDataStore configDataStore;
320 DistributedDataStore operDataStore;
321 final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder().
322 shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(30);
325 static Builder builder(List<MemberNode> members) {
326 return new Builder(members);
329 void waitForMembersUp(String... otherMembers) {
330 Set<String> otherMembersSet = Sets.newHashSet(otherMembers);
331 Stopwatch sw = Stopwatch.createStarted();
332 while(sw.elapsed(TimeUnit.SECONDS) <= 10) {
333 CurrentClusterState state = Cluster.get(kit.getSystem()).state();
334 for(Member m: state.getMembers()) {
335 if(m.status() == MemberStatus.up() && otherMembersSet.remove(m.getRoles().iterator().next()) &&
336 otherMembersSet.isEmpty()) {
341 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
344 fail("Member(s) " + otherMembersSet + " are not Up");
350 kit.cleanup(configDataStore);
351 kit.cleanup(operDataStore);
352 JavaTestKit.shutdownActorSystem(kit.getSystem());
356 static class Builder {
357 List<MemberNode> members;
358 String moduleShardsConfig;
360 String[] waitForshardLeader = new String[0];
362 boolean createOperDatastore = true;
364 Builder(List<MemberNode> members) {
365 this.members = members;
368 Builder moduleShardsConfig(String moduleShardsConfig) {
369 this.moduleShardsConfig = moduleShardsConfig;
373 Builder akkaConfig(String akkaConfig) {
374 this.akkaConfig = akkaConfig;
378 Builder testName(String testName) {
379 this.testName = testName;
383 Builder waitForShardLeader(String... shardNames) {
384 this.waitForshardLeader = shardNames;
388 Builder createOperDatastore(boolean value) {
389 this.createOperDatastore = value;
394 MemberNode node = new MemberNode();
395 ActorSystem system = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig(akkaConfig));
396 Cluster.get(system).join(MEMBER_1_ADDRESS);
398 node.kit = new IntegrationTestKit(system, node.datastoreContextBuilder);
400 String memberName = new ClusterWrapperImpl(system).getCurrentMemberName();
401 node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-config-" + memberName);
402 node.configDataStore = node.kit.setupDistributedDataStore("config_" + testName, moduleShardsConfig,
403 true, waitForshardLeader);
405 if(createOperDatastore) {
406 node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-oper-" + memberName);
407 node.operDataStore = node.kit.setupDistributedDataStore("oper_" + testName, moduleShardsConfig,
408 true, waitForshardLeader);
417 private static interface RaftStateVerifier {
418 void verify(OnDemandRaftState raftState);