import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ConvertMembersToNonvotingForAllShardsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ConvertMembersToVotingForAllShardsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInput;
import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
import org.opendaylight.yangtools.yang.common.RpcResult;
return newFailedRpcResultBuilder("A valid shard name must be specified").buildFuture();
}
+ DataStoreType dataStoreType = input.getDataStoreType();
+ if(dataStoreType == null) {
+ return newFailedRpcResultBuilder("A valid DataStoreType must be specified").buildFuture();
+ }
+
LOG.info("Adding replica for shard {}", shardName);
final SettableFuture<RpcResult<Void>> returnFuture = SettableFuture.create();
- ListenableFuture<List<Success>> future = sendMessageToShardManagers(new AddShardReplica(shardName));
- Futures.addCallback(future, new FutureCallback<List<Success>>() {
+ ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName));
+ Futures.addCallback(future, new FutureCallback<Success>() {
@Override
- public void onSuccess(List<Success> snapshots) {
+ public void onSuccess(Success success) {
LOG.info("Successfully added replica for shard {}", shardName);
returnFuture.set(newSuccessfulResult());
}
return Futures.allAsList(configFuture, operFuture);
}
+ private <T> ListenableFuture<T> sendMessageToShardManager(DataStoreType dataStoreType, Object message) {
+ ActorRef shardManager = dataStoreType == DataStoreType.Config ?
+ configDataStore.getActorContext().getShardManager() : operDataStore.getActorContext().getShardManager();
+ return ask(shardManager, message, new Timeout(1, TimeUnit.MINUTES));
+ }
+
private static void saveSnapshotsToFile(DatastoreSnapshotList snapshots, String fileName,
SettableFuture<RpcResult<Void>> returnFuture) {
try(FileOutputStream fos = new FileOutputStream(fileName)) {
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
RpcResult<Void> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder().
setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
- checkSuccessfulRpcResult(rpcResult);
+ verifySuccessfulRpcResult(rpcResult);
try(FileInputStream fis = new FileInputStream(fileName)) {
List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
leaderNode1.waitForMembersUp("member-2");
- testAddShardReplica(newReplicaNode2, "cars", "member-1");
+ doAddShardReplica(newReplicaNode2, "cars", "member-1");
MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name).
moduleShardsConfig(moduleShardsConfig).build();
leaderNode1.waitForMembersUp("member-3");
newReplicaNode2.waitForMembersUp("member-3");
- testAddShardReplica(newReplicaNode3, "cars", "member-1", "member-2");
+ doAddShardReplica(newReplicaNode3, "cars", "member-1", "member-2");
verifyRaftPeersPresent(newReplicaNode2.configDataStore(), "cars", "member-1", "member-3");
verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
readCarsNodeAndVerify(newReplicaNode3.configDataStore(), configCarsNode);
}
+ @Test
+ public void testAddShardReplicaFailures() throws Exception {
+ String name = "testAddShardReplicaFailures";
+ MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name).
+ moduleShardsConfig("module-shards-cars-member-1.conf").build();
+
+ ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
+ memberNode.operDataStore());
+
+ RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().
+ setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ verifyFailedRpcResult(rpcResult);
+
+ rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").
+ build()).get(10, TimeUnit.SECONDS);
+ verifyFailedRpcResult(rpcResult);
+
+ rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people").
+ setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ verifyFailedRpcResult(rpcResult);
+
+ service.close();
+ }
+
private NormalizedNode<?, ?> writeCarsNodeAndVerify(DistributedDataStore writeToStore,
DistributedDataStore readFromStore) throws Exception {
DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
assertEquals("Data node", expCarsNode, optional.get());
}
- private void testAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
+ private void doAddShardReplica(MemberNode memberNode, String shardName, String... peerMemberNames)
throws Exception {
memberNode.waitForMembersUp(peerMemberNames);
memberNode.operDataStore());
RpcResult<Void> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).
- build()).get(10, TimeUnit.SECONDS);
- checkSuccessfulRpcResult(rpcResult);
+ setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+ verifySuccessfulRpcResult(rpcResult);
verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
+
+ Optional<ActorRef> optional = memberNode.operDataStore().getActorContext().findLocalShard(shardName);
+ assertEquals("Oper shard present", false, optional.isPresent());
+
+ rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName).
+ setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
+ verifySuccessfulRpcResult(rpcResult);
+
verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
service.close();
}
- private void checkSuccessfulRpcResult(RpcResult<Void> rpcResult) {
+ private void verifySuccessfulRpcResult(RpcResult<Void> rpcResult) {
if(!rpcResult.isSuccessful()) {
if(rpcResult.getErrors().size() > 0) {
RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
}
}
+ private void verifyFailedRpcResult(RpcResult<Void> rpcResult) {
+ assertEquals("RpcResult", false, rpcResult.isSuccessful());
+ assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
+ RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+ assertNotNull("RpcResult error message null", error.getMessage());
+ }
+
@Test
public void testRemoveShardReplica() {
// TODO implement