package org.opendaylight.controller.cluster.datastore;
+import static akka.pattern.Patterns.ask;
import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.Address;
import akka.actor.Cancellable;
import akka.actor.OneForOneStrategy;
+import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.cluster.ClusterEvent;
+import akka.dispatch.OnComplete;
import akka.japi.Creator;
import akka.japi.Function;
import akka.persistence.RecoveryCompleted;
import akka.serialization.Serialization;
+import akka.util.Timeout;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Optional;
import com.google.common.collect.Sets;
import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
+import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
+import org.opendaylight.controller.cluster.datastore.messages.CreateShardReply;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
+import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
+import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
import org.opendaylight.controller.cluster.datastore.messages.SwitchShardBehavior;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
+import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
+import org.opendaylight.controller.cluster.raft.messages.AddServer;
+import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
+import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
+import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
- // Stores a mapping between a member name and the address of the member
- // Member names look like "member-1", "member-2" etc and are as specified
- // in configuration
- private final Map<String, Address> memberNameToAddress = new HashMap<>();
-
// Stores a mapping between a shard name and it's corresponding information
// Shard names look like inventory, topology etc and are as specified in
// configuration
// A data store could be of type config/operational
private final String type;
- private final String shardManagerIdentifierString;
-
private final ClusterWrapper cluster;
private final Configuration configuration;
private final PrimaryShardInfoFutureCache primaryShardInfoCache;
+ private final ShardPeerAddressResolver peerAddressResolver;
+
+ private SchemaContext schemaContext;
+
/**
*/
protected ShardManager(ClusterWrapper cluster, Configuration configuration,
this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
this.datastoreContext = datastoreContext;
this.type = datastoreContext.getDataStoreType();
- this.shardManagerIdentifierString = ShardManagerIdentifier.builder().type(type).build().toString();
this.shardDispatcherPath =
new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
this.primaryShardInfoCache = primaryShardInfoCache;
+ peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
+ this.datastoreContext = DatastoreContext.newBuilderFrom(datastoreContext).shardPeerAddressResolver(
+ peerAddressResolver).build();
+
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
}
public static Props props(
- final ClusterWrapper cluster,
- final Configuration configuration,
- final DatastoreContext datastoreContext,
- final CountDownLatch waitTillReadyCountdownLatch,
- final PrimaryShardInfoFutureCache primaryShardInfoCache) {
+ final ClusterWrapper cluster,
+ final Configuration configuration,
+ final DatastoreContext datastoreContext,
+ final CountDownLatch waitTillReadyCountdownLatch,
+ final PrimaryShardInfoFutureCache primaryShardInfoCache) {
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
onActorInitialized(message);
} else if (message instanceof ClusterEvent.MemberUp){
memberUp((ClusterEvent.MemberUp) message);
+ } else if (message instanceof ClusterEvent.MemberExited){
+ memberExited((ClusterEvent.MemberExited) message);
} else if(message instanceof ClusterEvent.MemberRemoved) {
memberRemoved((ClusterEvent.MemberRemoved) message);
} else if(message instanceof ClusterEvent.UnreachableMember) {
onLeaderStateChanged((ShardLeaderStateChanged) message);
} else if(message instanceof SwitchShardBehavior){
onSwitchShardBehavior((SwitchShardBehavior) message);
+ } else if(message instanceof CreateShard) {
+ onCreateShard((CreateShard)message);
+ } else if(message instanceof AddShardReplica){
+ onAddShardReplica((AddShardReplica)message);
+ } else if(message instanceof RemoveShardReplica){
+ onRemoveShardReplica((RemoveShardReplica)message);
} else {
unknownMessage(message);
}
}
+ private void onCreateShard(CreateShard createShard) {
+ Object reply;
+ try {
+ ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
+ if(localShards.containsKey(moduleShardConfig.getShardName())) {
+ throw new IllegalStateException(String.format("Shard with name %s already exists",
+ moduleShardConfig.getShardName()));
+ }
+
+ configuration.addModuleShardConfiguration(moduleShardConfig);
+
+ ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), moduleShardConfig.getShardName());
+ Map<String, String> peerAddresses = getPeerAddresses(moduleShardConfig.getShardName()/*,
+ moduleShardConfig.getShardMemberNames()*/);
+
+ LOG.debug("onCreateShard: shardId: {}, memberNames: {}. peerAddresses: {}", shardId,
+ moduleShardConfig.getShardMemberNames(), peerAddresses);
+
+ DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
+ if(shardDatastoreContext == null) {
+ shardDatastoreContext = datastoreContext;
+ } else {
+ shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
+ peerAddressResolver).build();
+ }
+
+ ShardInformation info = new ShardInformation(moduleShardConfig.getShardName(), shardId, peerAddresses,
+ shardDatastoreContext, createShard.getShardPropsCreator(), peerAddressResolver);
+ localShards.put(info.getShardName(), info);
+
+ mBean.addLocalShard(shardId.toString());
+
+ if(schemaContext != null) {
+ info.setActor(newShardActor(schemaContext, info));
+ }
+
+ reply = new CreateShardReply();
+ } catch (Exception e) {
+ LOG.error("onCreateShard failed", e);
+ reply = new akka.actor.Status.Failure(e);
+ }
+
+ if(getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
+ getSender().tell(reply, getSelf());
+ }
+ }
+
private void checkReady(){
if (isReadyWithLeaderId()) {
LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
getSender().tell(messageSupplier.get(), getSelf());
}
- private NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
+ private static NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
return new NoShardLeaderException(null, shardId.toString());
}
- private NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
+ private static NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
return new NotInitializedException(String.format(
"Found primary shard %s but it's not initialized yet. Please try again later", shardId));
}
LOG.debug("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
message.member().address());
- memberNameToAddress.remove(message.member().roles().head());
+ peerAddressResolver.removePeerAddress(memberName);
+
+ for(ShardInformation info : localShards.values()){
+ info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
+ }
+ }
+
+ private void memberExited(ClusterEvent.MemberExited message) {
+ String memberName = message.member().roles().head();
+
+ LOG.debug("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
+ message.member().address());
+
+ peerAddressResolver.removePeerAddress(memberName);
+
+ for(ShardInformation info : localShards.values()){
+ info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
+ }
}
private void memberUp(ClusterEvent.MemberUp message) {
LOG.debug("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
message.member().address());
- memberNameToAddress.put(memberName, message.member().address());
+ addPeerAddress(memberName, message.member().address());
+
+ checkReady();
+ }
+
+ private void addPeerAddress(String memberName, Address address) {
+ peerAddressResolver.addPeerAddress(memberName, address);
for(ShardInformation info : localShards.values()){
String shardName = info.getShardName();
- info.updatePeerAddress(getShardIdentifier(memberName, shardName).toString(),
- getShardActorPath(shardName, memberName), getSelf());
- }
+ String peerId = getShardIdentifier(memberName, shardName).toString();
+ info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
- checkReady();
+ info.peerUp(memberName, peerId, getSelf());
+ }
}
private void memberReachable(ClusterEvent.ReachableMember message) {
String memberName = message.member().roles().head();
LOG.debug("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
+ addPeerAddress(memberName, message.member().address());
+
markMemberAvailable(memberName);
}
primaryShardInfoCache.remove(info.getShardName());
}
+
+ info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
}
}
LOG.debug("Marking Leader {} as available.", leaderId);
info.setLeaderAvailable(true);
}
+
+ info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
}
}
private void onDatastoreContext(DatastoreContext context) {
- datastoreContext = context;
+ datastoreContext = DatastoreContext.newBuilderFrom(context).shardPeerAddressResolver(
+ peerAddressResolver).build();
for (ShardInformation info : localShards.values()) {
if (info.getActor() != null) {
info.getActor().tell(datastoreContext, getSelf());
* @param message
*/
private void updateSchemaContext(final Object message) {
- final SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+ schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getAllModuleIdentifiers().size());
@VisibleForTesting
protected ActorRef newShardActor(final SchemaContext schemaContext, ShardInformation info) {
- return getContext().actorOf(Shard.props(info.getShardId(),
- info.getPeerAddresses(), datastoreContext, schemaContext)
- .withDispatcher(shardDispatcherPath), info.getShardId().toString());
+ return getContext().actorOf(info.newProps(schemaContext)
+ .withDispatcher(shardDispatcherPath), info.getShardId().toString());
}
private void findPrimary(FindPrimary message) {
new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
- if(LOG.isDebugEnabled()) {
- LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
- }
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
+ }
- return found;
+ return found;
}
});
return;
}
- for(Map.Entry<String, Address> entry: memberNameToAddress.entrySet()) {
- if(!cluster.getCurrentMemberName().equals(entry.getKey())) {
- String path = getShardManagerActorPathBuilder(entry.getValue()).toString();
+ for(String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
+ LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}", persistenceId(),
+ shardName, address);
- LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}", persistenceId(),
- shardName, path);
-
- getContext().actorSelection(path).forward(new RemoteFindPrimary(shardName,
- message.isWaitUntilReady()), getContext());
- return;
- }
+ getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
+ message.isWaitUntilReady()), getContext());
+ return;
}
LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
String.format("No primary shard found for %s.", shardName)), getSelf());
}
- private StringBuilder getShardManagerActorPathBuilder(Address address) {
- StringBuilder builder = new StringBuilder();
- builder.append(address.toString()).append("/user/").append(shardManagerIdentifierString);
- return builder;
- }
-
- private String getShardActorPath(String shardName, String memberName) {
- Address address = memberNameToAddress.get(memberName);
- if(address != null) {
- StringBuilder builder = getShardManagerActorPathBuilder(address);
- builder.append("/")
- .append(getShardIdentifier(memberName, shardName));
- return builder.toString();
- }
- return null;
- }
-
/**
* Construct the name of the shard actor given the name of the member on
* which the shard resides and the name of the shard
* @return
*/
private ShardIdentifier getShardIdentifier(String memberName, String shardName){
- return ShardIdentifier.builder().memberName(memberName).shardName(shardName).type(type).build();
+ return peerAddressResolver.getShardIdentifier(memberName, shardName);
}
/**
*/
private void createLocalShards() {
String memberName = this.cluster.getCurrentMemberName();
- List<String> memberShardNames =
- this.configuration.getMemberShardNames(memberName);
+ Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
+ ShardPropsCreator shardPropsCreator = new DefaultShardPropsCreator();
List<String> localShardActorNames = new ArrayList<>();
for(String shardName : memberShardNames){
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
Map<String, String> peerAddresses = getPeerAddresses(shardName);
localShardActorNames.add(shardId.toString());
- localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses));
+ localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses, datastoreContext,
+ shardPropsCreator, peerAddressResolver));
}
- mBean = ShardManagerInfo.createShardManagerMBean("shard-manager-" + this.type,
- datastoreContext.getDataStoreMXBeanType(), localShardActorNames);
+ mBean = ShardManagerInfo.createShardManagerMBean(memberName, "shard-manager-" + this.type,
+ datastoreContext.getDataStoreMXBeanType(), localShardActorNames);
mBean.setShardManager(this);
}
* Given the name of the shard find the addresses of all it's peers
*
* @param shardName
- * @return
*/
- private Map<String, String> getPeerAddresses(String shardName){
-
+ private Map<String, String> getPeerAddresses(String shardName) {
+ Collection<String> members = configuration.getMembersFromShardName(shardName);
Map<String, String> peerAddresses = new HashMap<>();
- List<String> members = this.configuration.getMembersFromShardName(shardName);
-
String currentMemberName = this.cluster.getCurrentMemberName();
- for(String memberName : members){
- if(!currentMemberName.equals(memberName)){
+ for(String memberName : members) {
+ if(!currentMemberName.equals(memberName)) {
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
- String path = getShardActorPath(shardName, currentMemberName);
- peerAddresses.put(shardId.toString(), path);
+ String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
+ peerAddresses.put(shardId.toString(), address);
}
}
return peerAddresses;
public SupervisorStrategy supervisorStrategy() {
return new OneForOneStrategy(10, Duration.create("1 minute"),
- new Function<Throwable, SupervisorStrategy.Directive>() {
- @Override
- public SupervisorStrategy.Directive apply(Throwable t) {
- LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
- return SupervisorStrategy.resume();
- }
+ new Function<Throwable, SupervisorStrategy.Directive>() {
+ @Override
+ public SupervisorStrategy.Directive apply(Throwable t) {
+ LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
+ return SupervisorStrategy.resume();
}
- );
+ }
+ );
}
return mBean;
}
+ private DatastoreContext getInitShardDataStoreContext() {
+ return (DatastoreContext.newBuilderFrom(datastoreContext)
+ .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
+ .build());
+ }
+
+ private void onAddShardReplica (AddShardReplica shardReplicaMsg) {
+ final String shardName = shardReplicaMsg.getShardName();
+
+ // verify the local shard replica is already available in the controller node
+ LOG.debug ("received AddShardReplica for shard {}", shardName);
+ if (localShards.containsKey(shardName)) {
+ LOG.debug ("Local shard {} already available in the controller node", shardName);
+ getSender().tell(new akka.actor.Status.Failure(
+ new IllegalArgumentException(String.format("Local shard %s already exists",
+ shardName))), getSelf());
+ return;
+ }
+ // verify the shard with the specified name is present in the cluster configuration
+ if (!(this.configuration.isShardConfigured(shardName))) {
+ LOG.debug ("No module configuration exists for shard {}", shardName);
+ getSender().tell(new akka.actor.Status.Failure(new IllegalArgumentException(
+ String.format("No module configuration exists for shard %s",
+ shardName))), getSelf());
+ return;
+ }
+
+ // Create the localShard
+ if (schemaContext == null) {
+ LOG.debug ("schemaContext is not updated to create localShardActor");
+ getSender().tell(new akka.actor.Status.Failure(
+ new IllegalStateException(String.format(
+ "schemaContext not available to create localShardActor for %s",
+ shardName))), getSelf());
+ return;
+ }
+
+ Map<String, String> peerAddresses = getPeerAddresses(shardName);
+ if (peerAddresses.isEmpty()) {
+ LOG.debug ("Shard peers not available for replicating shard data from leader");
+ getSender().tell(new akka.actor.Status.Failure(
+ new IllegalStateException(String.format(
+ "Cannot add replica for shard %s because no peer is available",
+ shardName))), getSelf());
+ return;
+ }
+
+ Timeout findPrimaryTimeout = new Timeout(datastoreContext
+ .getShardInitializationTimeout().duration().$times(2));
+
+ final ActorRef sender = getSender();
+ Future<Object> futureObj = ask(getSelf(), new RemoteFindPrimary(shardName, true),
+ findPrimaryTimeout);
+ futureObj.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object response) {
+ if (failure != null) {
+ LOG.debug ("Failed to receive response for FindPrimary of shard {}",
+ shardName, failure);
+ sender.tell(new akka.actor.Status.Failure(new RuntimeException(
+ String.format("Failed to find leader for shard %s", shardName), failure)),
+ getSelf());
+ } else {
+ if (!(response instanceof RemotePrimaryShardFound)) {
+ LOG.debug ("Shard leader not available for creating local shard replica {}",
+ shardName);
+ sender.tell(new akka.actor.Status.Failure(
+ new IllegalStateException(String.format(
+ "Invalid response type, %s, received from FindPrimary for shard %s",
+ response.getClass().getName(), shardName))), getSelf());
+ return;
+ }
+ RemotePrimaryShardFound message = (RemotePrimaryShardFound)response;
+ addShard (shardName, message, sender);
+ }
+ }
+ }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
+ }
+
+ private void addShard(final String shardName, final RemotePrimaryShardFound response,
+ final ActorRef sender) {
+ ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
+ shardName);
+ String localShardAddress = peerAddressResolver.getShardActorAddress(shardName,
+ cluster.getCurrentMemberName());
+ final ShardInformation shardInfo = new ShardInformation(shardName, shardId,
+ getPeerAddresses(shardName), getInitShardDataStoreContext(),
+ new DefaultShardPropsCreator(), peerAddressResolver);
+ localShards.put(shardName, shardInfo);
+ shardInfo.setActor(newShardActor(schemaContext, shardInfo));
+
+ //inform ShardLeader to add this shard as a replica by sending an AddServer message
+ LOG.debug ("sending AddServer message to peer {} for shard {}",
+ response.getPrimaryPath(), shardId);
+
+ Timeout addServerTimeout = new Timeout(datastoreContext
+ .getShardLeaderElectionTimeout().duration().$times(4));
+ Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
+ new AddServer(shardId.toString(), localShardAddress, true), addServerTimeout);
+
+ futureObj.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable failure, Object addServerResponse) {
+ if (failure != null) {
+ LOG.debug ("AddServer request to {} for {} failed",
+ response.getPrimaryPath(), shardName, failure);
+ // Remove the shard
+ localShards.remove(shardName);
+ if (shardInfo.getActor() != null) {
+ shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
+ }
+ sender.tell(new akka.actor.Status.Failure(new RuntimeException(
+ String.format("AddServer request to leader %s for shard %s failed",
+ response.getPrimaryPath(), shardName), failure)), getSelf());
+ } else {
+ AddServerReply reply = (AddServerReply)addServerResponse;
+ onAddServerReply(shardName, shardInfo, reply, sender, response.getPrimaryPath());
+ }
+ }
+ }, new Dispatchers(context().system().dispatchers()).
+ getDispatcher(Dispatchers.DispatcherType.Client));
+ return;
+ }
+
+ private void onAddServerReply (String shardName, ShardInformation shardInfo,
+ AddServerReply replyMsg, ActorRef sender, String leaderPath) {
+ if (replyMsg.getStatus() == ServerChangeStatus.OK) {
+ LOG.debug ("Leader shard successfully added the replica shard {}",
+ shardName);
+ // Make the local shard voting capable
+ shardInfo.setDatastoreContext(datastoreContext, getSelf());
+ ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
+ shardName);
+ mBean.addLocalShard(shardId.toString());
+ sender.tell(new akka.actor.Status.Success(true), getSelf());
+ } else {
+ LOG.warn ("Cannot add shard replica {} status {}",
+ shardName, replyMsg.getStatus());
+ LOG.debug ("removing the local shard replica for shard {}",
+ shardName);
+ //remove the local replica created
+ localShards.remove(shardName);
+ if (shardInfo.getActor() != null) {
+ shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
+ }
+ switch (replyMsg.getStatus()) {
+ //case ServerChangeStatus.TIMEOUT:
+ case TIMEOUT:
+ sender.tell(new akka.actor.Status.Failure(new RuntimeException(
+ String.format("The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible causes - there was a problem replicating the data or shard leadership changed while replicating the shard data",
+ leaderPath, shardName))), getSelf());
+ break;
+ //case ServerChangeStatus.NO_LEADER:
+ case NO_LEADER:
+ sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
+ "There is no shard leader available for shard %s", shardName))), getSelf());
+ break;
+ default :
+ sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
+ "AddServer request to leader %s for shard %s failed with status %s",
+ leaderPath, shardName, replyMsg.getStatus()))), getSelf());
+ }
+ }
+ }
+
+ private void onRemoveShardReplica (RemoveShardReplica shardReplicaMsg) {
+ String shardName = shardReplicaMsg.getShardName();
+ boolean deleteStatus = false;
+
+ // verify the local shard replica is available in the controller node
+ if (!localShards.containsKey(shardName)) {
+ LOG.debug ("Local shard replica {} is not available in the controller node", shardName);
+ getSender().tell(new akka.actor.Status.Failure(
+ new IllegalArgumentException(String.format("Local shard %s not available",
+ shardName))), getSelf());
+ return;
+ }
+ // call RemoveShard for the shardName
+ getSender().tell(new akka.actor.Status.Success(true), getSelf());
+ return;
+ }
+
@VisibleForTesting
protected static class ShardInformation {
private final ShardIdentifier shardId;
private final String shardName;
private ActorRef actor;
private ActorPath actorPath;
- private final Map<String, String> peerAddresses;
+ private final Map<String, String> initialPeerAddresses;
private Optional<DataTree> localShardDataTree;
private boolean leaderAvailable = false;
private String leaderId;
private short leaderVersion;
+ private DatastoreContext datastoreContext;
+ private final ShardPropsCreator shardPropsCreator;
+ private final ShardPeerAddressResolver addressResolver;
+
private ShardInformation(String shardName, ShardIdentifier shardId,
- Map<String, String> peerAddresses) {
+ Map<String, String> initialPeerAddresses, DatastoreContext datastoreContext,
+ ShardPropsCreator shardPropsCreator, ShardPeerAddressResolver addressResolver) {
this.shardName = shardName;
this.shardId = shardId;
- this.peerAddresses = peerAddresses;
+ this.initialPeerAddresses = initialPeerAddresses;
+ this.datastoreContext = datastoreContext;
+ this.shardPropsCreator = shardPropsCreator;
+ this.addressResolver = addressResolver;
+ }
+
+ Props newProps(SchemaContext schemaContext) {
+ return shardPropsCreator.newProps(shardId, initialPeerAddresses, datastoreContext, schemaContext);
}
String getShardName() {
return localShardDataTree;
}
- Map<String, String> getPeerAddresses() {
- return peerAddresses;
- }
-
void updatePeerAddress(String peerId, String peerAddress, ActorRef sender){
- LOG.info("updatePeerAddress for peer {} with address {}", peerId,
- peerAddress);
- if(peerAddresses.containsKey(peerId)){
- peerAddresses.put(peerId, peerAddress);
-
- if(actor != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Sending PeerAddressResolved for peer {} with address {} to {}",
- peerId, peerAddress, actor.path());
- }
+ LOG.info("updatePeerAddress for peer {} with address {}", peerId, peerAddress);
- actor.tell(new PeerAddressResolved(peerId.toString(), peerAddress), sender);
+ if(actor != null) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending PeerAddressResolved for peer {} with address {} to {}",
+ peerId, peerAddress, actor.path());
}
- notifyOnShardInitializedCallbacks();
+ actor.tell(new PeerAddressResolved(peerId, peerAddress), sender);
+ }
+
+ notifyOnShardInitializedCallbacks();
+ }
+
+ void peerDown(String memberName, String peerId, ActorRef sender) {
+ if(actor != null) {
+ actor.tell(new PeerDown(memberName, peerId), sender);
+ }
+ }
+
+ void peerUp(String memberName, String peerId, ActorRef sender) {
+ if(actor != null) {
+ actor.tell(new PeerUp(memberName, peerId), sender);
}
}
boolean isShardReadyWithLeaderId() {
return leaderAvailable && isShardReady() && !RaftState.IsolatedLeader.name().equals(role) &&
- (isLeader() || peerAddresses.get(leaderId) != null);
+ (isLeader() || addressResolver.resolve(leaderId) != null);
}
boolean isShardInitialized() {
if(isLeader()) {
return Serialization.serializedActorPath(getActor());
} else {
- return peerAddresses.get(leaderId);
+ return addressResolver.resolve(leaderId);
}
}
void setLeaderVersion(short leaderVersion) {
this.leaderVersion = leaderVersion;
}
+
+ void setDatastoreContext(DatastoreContext datastoreContext, ActorRef sender) {
+ this.datastoreContext = datastoreContext;
+ //notify the datastoreContextchange
+ LOG.debug ("Notifying RaftPolicy change via datastoreContextChange for {}",
+ this.shardName);
+ if (actor != null) {
+ actor.tell(this.datastoreContext, sender);
+ }
+ }
}
private static class ShardManagerCreator implements Creator<ShardManager> {