2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import static akka.pattern.Patterns.ask;
12 import akka.actor.ActorPath;
13 import akka.actor.ActorRef;
14 import akka.actor.Address;
15 import akka.actor.Cancellable;
16 import akka.actor.OneForOneStrategy;
17 import akka.actor.PoisonPill;
18 import akka.actor.Props;
19 import akka.actor.SupervisorStrategy;
20 import akka.cluster.ClusterEvent;
21 import akka.dispatch.OnComplete;
22 import akka.japi.Creator;
23 import akka.japi.Function;
24 import akka.persistence.RecoveryCompleted;
25 import akka.serialization.Serialization;
26 import akka.util.Timeout;
27 import com.google.common.annotations.VisibleForTesting;
28 import com.google.common.base.Objects;
29 import com.google.common.base.Optional;
30 import com.google.common.base.Preconditions;
31 import com.google.common.base.Strings;
32 import com.google.common.base.Supplier;
33 import com.google.common.collect.Sets;
34 import java.io.Serializable;
35 import java.util.ArrayList;
36 import java.util.Collection;
37 import java.util.HashMap;
38 import java.util.Iterator;
39 import java.util.List;
42 import java.util.concurrent.CountDownLatch;
43 import java.util.concurrent.TimeUnit;
44 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
45 import org.opendaylight.controller.cluster.datastore.config.Configuration;
46 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
47 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
48 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
49 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
50 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
51 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
52 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
53 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
54 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
55 import org.opendaylight.controller.cluster.datastore.messages.CreateShardReply;
56 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
57 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
58 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
59 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
60 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
61 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
62 import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
63 import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
64 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
65 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
66 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
67 import org.opendaylight.controller.cluster.datastore.messages.SwitchShardBehavior;
68 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
69 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
70 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
71 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
72 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
73 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
74 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
75 import org.opendaylight.controller.cluster.raft.RaftState;
76 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
77 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
78 import org.opendaylight.controller.cluster.raft.messages.AddServer;
79 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
80 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
81 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
82 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
83 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
84 import org.slf4j.Logger;
85 import org.slf4j.LoggerFactory;
86 import scala.concurrent.Future;
87 import scala.concurrent.duration.Duration;
88 import scala.concurrent.duration.FiniteDuration;
91 * The ShardManager has the following jobs,
93 * <li> Create all the local shard replicas that belong on this cluster member
94 * <li> Find the address of the local shard
95 * <li> Find the primary replica for any given shard
96 * <li> Monitor the cluster members and store their addresses
99 public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
101 private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
103 // Stores a mapping between a shard name and it's corresponding information
104 // Shard names look like inventory, topology etc and are as specified in
106 private final Map<String, ShardInformation> localShards = new HashMap<>();
108 // The type of a ShardManager reflects the type of the datastore itself
109 // A data store could be of type config/operational
110 private final String type;
112 private final ClusterWrapper cluster;
114 private final Configuration configuration;
116 private final String shardDispatcherPath;
118 private ShardManagerInfo mBean;
120 private DatastoreContext datastoreContext;
122 private final CountDownLatch waitTillReadyCountdownLatch;
124 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
126 private final ShardPeerAddressResolver peerAddressResolver;
128 private SchemaContext schemaContext;
132 protected ShardManager(ClusterWrapper cluster, Configuration configuration,
133 DatastoreContext datastoreContext, CountDownLatch waitTillReadyCountdownLatch,
134 PrimaryShardInfoFutureCache primaryShardInfoCache) {
136 this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
137 this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
138 this.datastoreContext = datastoreContext;
139 this.type = datastoreContext.getDataStoreType();
140 this.shardDispatcherPath =
141 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
142 this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
143 this.primaryShardInfoCache = primaryShardInfoCache;
145 peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
146 this.datastoreContext = DatastoreContext.newBuilderFrom(datastoreContext).shardPeerAddressResolver(
147 peerAddressResolver).build();
149 // Subscribe this actor to cluster member events
150 cluster.subscribeToMemberEvents(getSelf());
155 public static Props props(
156 final ClusterWrapper cluster,
157 final Configuration configuration,
158 final DatastoreContext datastoreContext,
159 final CountDownLatch waitTillReadyCountdownLatch,
160 final PrimaryShardInfoFutureCache primaryShardInfoCache) {
162 Preconditions.checkNotNull(cluster, "cluster should not be null");
163 Preconditions.checkNotNull(configuration, "configuration should not be null");
164 Preconditions.checkNotNull(waitTillReadyCountdownLatch, "waitTillReadyCountdownLatch should not be null");
165 Preconditions.checkNotNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
167 return Props.create(new ShardManagerCreator(cluster, configuration, datastoreContext,
168 waitTillReadyCountdownLatch, primaryShardInfoCache));
172 public void postStop() {
173 LOG.info("Stopping ShardManager");
175 mBean.unregisterMBean();
179 public void handleCommand(Object message) throws Exception {
180 if (message instanceof FindPrimary) {
181 findPrimary((FindPrimary)message);
182 } else if(message instanceof FindLocalShard){
183 findLocalShard((FindLocalShard) message);
184 } else if (message instanceof UpdateSchemaContext) {
185 updateSchemaContext(message);
186 } else if(message instanceof ActorInitialized) {
187 onActorInitialized(message);
188 } else if (message instanceof ClusterEvent.MemberUp){
189 memberUp((ClusterEvent.MemberUp) message);
190 } else if (message instanceof ClusterEvent.MemberExited){
191 memberExited((ClusterEvent.MemberExited) message);
192 } else if(message instanceof ClusterEvent.MemberRemoved) {
193 memberRemoved((ClusterEvent.MemberRemoved) message);
194 } else if(message instanceof ClusterEvent.UnreachableMember) {
195 memberUnreachable((ClusterEvent.UnreachableMember)message);
196 } else if(message instanceof ClusterEvent.ReachableMember) {
197 memberReachable((ClusterEvent.ReachableMember) message);
198 } else if(message instanceof DatastoreContext) {
199 onDatastoreContext((DatastoreContext)message);
200 } else if(message instanceof RoleChangeNotification) {
201 onRoleChangeNotification((RoleChangeNotification) message);
202 } else if(message instanceof FollowerInitialSyncUpStatus){
203 onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
204 } else if(message instanceof ShardNotInitializedTimeout) {
205 onShardNotInitializedTimeout((ShardNotInitializedTimeout)message);
206 } else if(message instanceof ShardLeaderStateChanged) {
207 onLeaderStateChanged((ShardLeaderStateChanged) message);
208 } else if(message instanceof SwitchShardBehavior){
209 onSwitchShardBehavior((SwitchShardBehavior) message);
210 } else if(message instanceof CreateShard) {
211 onCreateShard((CreateShard)message);
212 } else if(message instanceof AddShardReplica){
213 onAddShardReplica((AddShardReplica)message);
214 } else if(message instanceof RemoveShardReplica){
215 onRemoveShardReplica((RemoveShardReplica)message);
217 unknownMessage(message);
222 private void onCreateShard(CreateShard createShard) {
225 ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
226 if(localShards.containsKey(moduleShardConfig.getShardName())) {
227 throw new IllegalStateException(String.format("Shard with name %s already exists",
228 moduleShardConfig.getShardName()));
231 configuration.addModuleShardConfiguration(moduleShardConfig);
233 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), moduleShardConfig.getShardName());
234 Map<String, String> peerAddresses = getPeerAddresses(moduleShardConfig.getShardName()/*,
235 moduleShardConfig.getShardMemberNames()*/);
237 LOG.debug("onCreateShard: shardId: {}, memberNames: {}. peerAddresses: {}", shardId,
238 moduleShardConfig.getShardMemberNames(), peerAddresses);
240 DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
241 if(shardDatastoreContext == null) {
242 shardDatastoreContext = datastoreContext;
244 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
245 peerAddressResolver).build();
248 ShardInformation info = new ShardInformation(moduleShardConfig.getShardName(), shardId, peerAddresses,
249 shardDatastoreContext, createShard.getShardPropsCreator(), peerAddressResolver);
250 localShards.put(info.getShardName(), info);
252 mBean.addLocalShard(shardId.toString());
254 if(schemaContext != null) {
255 info.setActor(newShardActor(schemaContext, info));
258 reply = new CreateShardReply();
259 } catch (Exception e) {
260 LOG.error("onCreateShard failed", e);
261 reply = new akka.actor.Status.Failure(e);
264 if(getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
265 getSender().tell(reply, getSelf());
269 private void checkReady(){
270 if (isReadyWithLeaderId()) {
271 LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
272 persistenceId(), type, waitTillReadyCountdownLatch.getCount());
274 waitTillReadyCountdownLatch.countDown();
278 private void onLeaderStateChanged(ShardLeaderStateChanged leaderStateChanged) {
279 LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
281 ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
282 if(shardInformation != null) {
283 shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
284 shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
285 if(shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
286 primaryShardInfoCache.remove(shardInformation.getShardName());
291 LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
295 private void onShardNotInitializedTimeout(ShardNotInitializedTimeout message) {
296 ShardInformation shardInfo = message.getShardInfo();
298 LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
299 shardInfo.getShardName());
301 shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
303 if(!shardInfo.isShardInitialized()) {
304 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
305 message.getSender().tell(createNotInitializedException(shardInfo.shardId), getSelf());
307 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
308 message.getSender().tell(createNoShardLeaderException(shardInfo.shardId), getSelf());
312 private void onFollowerInitialSyncStatus(FollowerInitialSyncUpStatus status) {
313 LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
314 status.getName(), status.isInitialSyncDone());
316 ShardInformation shardInformation = findShardInformation(status.getName());
318 if(shardInformation != null) {
319 shardInformation.setFollowerSyncStatus(status.isInitialSyncDone());
321 mBean.setSyncStatus(isInSync());
326 private void onRoleChangeNotification(RoleChangeNotification roleChanged) {
327 LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
328 roleChanged.getOldRole(), roleChanged.getNewRole());
330 ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
331 if(shardInformation != null) {
332 shardInformation.setRole(roleChanged.getNewRole());
334 mBean.setSyncStatus(isInSync());
339 private ShardInformation findShardInformation(String memberId) {
340 for(ShardInformation info : localShards.values()){
341 if(info.getShardId().toString().equals(memberId)){
349 private boolean isReadyWithLeaderId() {
350 boolean isReady = true;
351 for (ShardInformation info : localShards.values()) {
352 if(!info.isShardReadyWithLeaderId()){
360 private boolean isInSync(){
361 for (ShardInformation info : localShards.values()) {
362 if(!info.isInSync()){
369 private void onActorInitialized(Object message) {
370 final ActorRef sender = getSender();
372 if (sender == null) {
373 return; //why is a non-actor sending this message? Just ignore.
376 String actorName = sender.path().name();
377 //find shard name from actor name; actor name is stringified shardId
378 ShardIdentifier shardId = ShardIdentifier.builder().fromShardIdString(actorName).build();
380 if (shardId.getShardName() == null) {
384 markShardAsInitialized(shardId.getShardName());
387 private void markShardAsInitialized(String shardName) {
388 LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
390 ShardInformation shardInformation = localShards.get(shardName);
391 if (shardInformation != null) {
392 shardInformation.setActorInitialized();
394 shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
399 protected void handleRecover(Object message) throws Exception {
400 if (message instanceof RecoveryCompleted) {
401 LOG.info("Recovery complete : {}", persistenceId());
403 // We no longer persist SchemaContext modules so delete all the prior messages from the akka
404 // journal on upgrade from Helium.
405 deleteMessages(lastSequenceNr());
409 private void findLocalShard(FindLocalShard message) {
410 final ShardInformation shardInformation = localShards.get(message.getShardName());
412 if(shardInformation == null){
413 getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
417 sendResponse(shardInformation, message.isWaitUntilInitialized(), false, new Supplier<Object>() {
419 public Object get() {
420 return new LocalShardFound(shardInformation.getActor());
425 private void sendResponse(ShardInformation shardInformation, boolean doWait,
426 boolean wantShardReady, final Supplier<Object> messageSupplier) {
427 if (!shardInformation.isShardInitialized() || (wantShardReady && !shardInformation.isShardReadyWithLeaderId())) {
429 final ActorRef sender = getSender();
430 final ActorRef self = self();
432 Runnable replyRunnable = new Runnable() {
435 sender.tell(messageSupplier.get(), self);
439 OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
440 new OnShardInitialized(replyRunnable);
442 shardInformation.addOnShardInitialized(onShardInitialized);
444 LOG.debug("{}: Scheduling timer to wait for shard {}", persistenceId(), shardInformation.getShardName());
446 FiniteDuration timeout = datastoreContext.getShardInitializationTimeout().duration();
447 if(shardInformation.isShardInitialized()) {
448 // If the shard is already initialized then we'll wait enough time for the shard to
449 // elect a leader, ie 2 times the election timeout.
450 timeout = FiniteDuration.create(datastoreContext.getShardRaftConfig()
451 .getElectionTimeOutInterval().toMillis() * 2, TimeUnit.MILLISECONDS);
454 Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
456 new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
457 getContext().dispatcher(), getSelf());
459 onShardInitialized.setTimeoutSchedule(timeoutSchedule);
461 } else if (!shardInformation.isShardInitialized()) {
462 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
463 shardInformation.getShardName());
464 getSender().tell(createNotInitializedException(shardInformation.shardId), getSelf());
466 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
467 shardInformation.getShardName());
468 getSender().tell(createNoShardLeaderException(shardInformation.shardId), getSelf());
474 getSender().tell(messageSupplier.get(), getSelf());
477 private static NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
478 return new NoShardLeaderException(null, shardId.toString());
481 private static NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
482 return new NotInitializedException(String.format(
483 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
486 private void memberRemoved(ClusterEvent.MemberRemoved message) {
487 String memberName = message.member().roles().head();
489 LOG.debug("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
490 message.member().address());
492 peerAddressResolver.removePeerAddress(memberName);
494 for(ShardInformation info : localShards.values()){
495 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
499 private void memberExited(ClusterEvent.MemberExited message) {
500 String memberName = message.member().roles().head();
502 LOG.debug("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
503 message.member().address());
505 peerAddressResolver.removePeerAddress(memberName);
507 for(ShardInformation info : localShards.values()){
508 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
512 private void memberUp(ClusterEvent.MemberUp message) {
513 String memberName = message.member().roles().head();
515 LOG.debug("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
516 message.member().address());
518 addPeerAddress(memberName, message.member().address());
523 private void addPeerAddress(String memberName, Address address) {
524 peerAddressResolver.addPeerAddress(memberName, address);
526 for(ShardInformation info : localShards.values()){
527 String shardName = info.getShardName();
528 String peerId = getShardIdentifier(memberName, shardName).toString();
529 info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
531 info.peerUp(memberName, peerId, getSelf());
535 private void memberReachable(ClusterEvent.ReachableMember message) {
536 String memberName = message.member().roles().head();
537 LOG.debug("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
539 addPeerAddress(memberName, message.member().address());
541 markMemberAvailable(memberName);
544 private void memberUnreachable(ClusterEvent.UnreachableMember message) {
545 String memberName = message.member().roles().head();
546 LOG.debug("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
548 markMemberUnavailable(memberName);
551 private void markMemberUnavailable(final String memberName) {
552 for(ShardInformation info : localShards.values()){
553 String leaderId = info.getLeaderId();
554 if(leaderId != null && leaderId.contains(memberName)) {
555 LOG.debug("Marking Leader {} as unavailable.", leaderId);
556 info.setLeaderAvailable(false);
558 primaryShardInfoCache.remove(info.getShardName());
561 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
565 private void markMemberAvailable(final String memberName) {
566 for(ShardInformation info : localShards.values()){
567 String leaderId = info.getLeaderId();
568 if(leaderId != null && leaderId.contains(memberName)) {
569 LOG.debug("Marking Leader {} as available.", leaderId);
570 info.setLeaderAvailable(true);
573 info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
577 private void onDatastoreContext(DatastoreContext context) {
578 datastoreContext = DatastoreContext.newBuilderFrom(context).shardPeerAddressResolver(
579 peerAddressResolver).build();
580 for (ShardInformation info : localShards.values()) {
581 if (info.getActor() != null) {
582 info.getActor().tell(datastoreContext, getSelf());
587 private void onSwitchShardBehavior(SwitchShardBehavior message) {
588 ShardIdentifier identifier = ShardIdentifier.builder().fromShardIdString(message.getShardName()).build();
590 ShardInformation shardInformation = localShards.get(identifier.getShardName());
592 if(shardInformation != null && shardInformation.getActor() != null) {
593 shardInformation.getActor().tell(
594 new SwitchBehavior(RaftState.valueOf(message.getNewState()), message.getTerm()), getSelf());
596 LOG.warn("Could not switch the behavior of shard {} to {} - shard is not yet available",
597 message.getShardName(), message.getNewState());
602 * Notifies all the local shards of a change in the schema context
606 private void updateSchemaContext(final Object message) {
607 schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
609 LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getAllModuleIdentifiers().size());
611 for (ShardInformation info : localShards.values()) {
612 if (info.getActor() == null) {
613 LOG.debug("Creating Shard {}", info.getShardId());
614 info.setActor(newShardActor(schemaContext, info));
616 info.getActor().tell(message, getSelf());
622 protected ClusterWrapper getCluster() {
627 protected ActorRef newShardActor(final SchemaContext schemaContext, ShardInformation info) {
628 return getContext().actorOf(info.newProps(schemaContext)
629 .withDispatcher(shardDispatcherPath), info.getShardId().toString());
632 private void findPrimary(FindPrimary message) {
633 LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
635 final String shardName = message.getShardName();
636 final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
638 // First see if the there is a local replica for the shard
639 final ShardInformation info = localShards.get(shardName);
641 sendResponse(info, message.isWaitUntilReady(), true, new Supplier<Object>() {
643 public Object get() {
644 String primaryPath = info.getSerializedLeaderActor();
645 Object found = canReturnLocalShardState && info.isLeader() ?
646 new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
647 new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
649 if(LOG.isDebugEnabled()) {
650 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
660 for(String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
661 LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}", persistenceId(),
664 getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
665 message.isWaitUntilReady()), getContext());
669 LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
671 getSender().tell(new PrimaryNotFoundException(
672 String.format("No primary shard found for %s.", shardName)), getSelf());
676 * Construct the name of the shard actor given the name of the member on
677 * which the shard resides and the name of the shard
683 private ShardIdentifier getShardIdentifier(String memberName, String shardName){
684 return peerAddressResolver.getShardIdentifier(memberName, shardName);
688 * Create shards that are local to the member on which the ShardManager
692 private void createLocalShards() {
693 String memberName = this.cluster.getCurrentMemberName();
694 Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
696 ShardPropsCreator shardPropsCreator = new DefaultShardPropsCreator();
697 List<String> localShardActorNames = new ArrayList<>();
698 for(String shardName : memberShardNames){
699 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
700 Map<String, String> peerAddresses = getPeerAddresses(shardName);
701 localShardActorNames.add(shardId.toString());
702 localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses, datastoreContext,
703 shardPropsCreator, peerAddressResolver));
706 mBean = ShardManagerInfo.createShardManagerMBean(memberName, "shard-manager-" + this.type,
707 datastoreContext.getDataStoreMXBeanType(), localShardActorNames);
709 mBean.setShardManager(this);
713 * Given the name of the shard find the addresses of all it's peers
717 private Map<String, String> getPeerAddresses(String shardName) {
718 Collection<String> members = configuration.getMembersFromShardName(shardName);
719 Map<String, String> peerAddresses = new HashMap<>();
721 String currentMemberName = this.cluster.getCurrentMemberName();
723 for(String memberName : members) {
724 if(!currentMemberName.equals(memberName)) {
725 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
726 String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
727 peerAddresses.put(shardId.toString(), address);
730 return peerAddresses;
734 public SupervisorStrategy supervisorStrategy() {
736 return new OneForOneStrategy(10, Duration.create("1 minute"),
737 new Function<Throwable, SupervisorStrategy.Directive>() {
739 public SupervisorStrategy.Directive apply(Throwable t) {
740 LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
741 return SupervisorStrategy.resume();
749 public String persistenceId() {
750 return "shard-manager-" + type;
754 ShardManagerInfoMBean getMBean(){
758 private DatastoreContext getInitShardDataStoreContext() {
759 return (DatastoreContext.newBuilderFrom(datastoreContext)
760 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
764 private void onAddShardReplica (AddShardReplica shardReplicaMsg) {
765 final String shardName = shardReplicaMsg.getShardName();
767 // verify the local shard replica is already available in the controller node
768 LOG.debug ("received AddShardReplica for shard {}", shardName);
769 if (localShards.containsKey(shardName)) {
770 LOG.debug ("Local shard {} already available in the controller node", shardName);
771 getSender().tell(new akka.actor.Status.Failure(
772 new IllegalArgumentException(String.format("Local shard %s already exists",
773 shardName))), getSelf());
776 // verify the shard with the specified name is present in the cluster configuration
777 if (!(this.configuration.isShardConfigured(shardName))) {
778 LOG.debug ("No module configuration exists for shard {}", shardName);
779 getSender().tell(new akka.actor.Status.Failure(new IllegalArgumentException(
780 String.format("No module configuration exists for shard %s",
781 shardName))), getSelf());
785 // Create the localShard
786 if (schemaContext == null) {
787 LOG.debug ("schemaContext is not updated to create localShardActor");
788 getSender().tell(new akka.actor.Status.Failure(
789 new IllegalStateException(String.format(
790 "schemaContext not available to create localShardActor for %s",
791 shardName))), getSelf());
795 Map<String, String> peerAddresses = getPeerAddresses(shardName);
796 if (peerAddresses.isEmpty()) {
797 LOG.debug ("Shard peers not available for replicating shard data from leader");
798 getSender().tell(new akka.actor.Status.Failure(
799 new IllegalStateException(String.format(
800 "Cannot add replica for shard %s because no peer is available",
801 shardName))), getSelf());
805 Timeout findPrimaryTimeout = new Timeout(datastoreContext
806 .getShardInitializationTimeout().duration().$times(2));
808 final ActorRef sender = getSender();
809 Future<Object> futureObj = ask(getSelf(), new RemoteFindPrimary(shardName, true),
811 futureObj.onComplete(new OnComplete<Object>() {
813 public void onComplete(Throwable failure, Object response) {
814 if (failure != null) {
815 LOG.debug ("Failed to receive response for FindPrimary of shard {}",
817 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
818 String.format("Failed to find leader for shard %s", shardName), failure)),
821 if (!(response instanceof RemotePrimaryShardFound)) {
822 LOG.debug ("Shard leader not available for creating local shard replica {}",
824 sender.tell(new akka.actor.Status.Failure(
825 new IllegalStateException(String.format(
826 "Invalid response type, %s, received from FindPrimary for shard %s",
827 response.getClass().getName(), shardName))), getSelf());
830 RemotePrimaryShardFound message = (RemotePrimaryShardFound)response;
831 addShard (shardName, message, sender);
834 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
837 private void addShard(final String shardName, final RemotePrimaryShardFound response,
838 final ActorRef sender) {
839 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
841 String localShardAddress = peerAddressResolver.getShardActorAddress(shardName,
842 cluster.getCurrentMemberName());
843 final ShardInformation shardInfo = new ShardInformation(shardName, shardId,
844 getPeerAddresses(shardName), getInitShardDataStoreContext(),
845 new DefaultShardPropsCreator(), peerAddressResolver);
846 localShards.put(shardName, shardInfo);
847 shardInfo.setActor(newShardActor(schemaContext, shardInfo));
849 //inform ShardLeader to add this shard as a replica by sending an AddServer message
850 LOG.debug ("sending AddServer message to peer {} for shard {}",
851 response.getPrimaryPath(), shardId);
853 Timeout addServerTimeout = new Timeout(datastoreContext
854 .getShardLeaderElectionTimeout().duration().$times(4));
855 Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
856 new AddServer(shardId.toString(), localShardAddress, true), addServerTimeout);
858 futureObj.onComplete(new OnComplete<Object>() {
860 public void onComplete(Throwable failure, Object addServerResponse) {
861 if (failure != null) {
862 LOG.debug ("AddServer request to {} for {} failed",
863 response.getPrimaryPath(), shardName, failure);
865 localShards.remove(shardName);
866 if (shardInfo.getActor() != null) {
867 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
869 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
870 String.format("AddServer request to leader %s for shard %s failed",
871 response.getPrimaryPath(), shardName), failure)), getSelf());
873 AddServerReply reply = (AddServerReply)addServerResponse;
874 onAddServerReply(shardName, shardInfo, reply, sender, response.getPrimaryPath());
877 }, new Dispatchers(context().system().dispatchers()).
878 getDispatcher(Dispatchers.DispatcherType.Client));
882 private void onAddServerReply (String shardName, ShardInformation shardInfo,
883 AddServerReply replyMsg, ActorRef sender, String leaderPath) {
884 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
885 LOG.debug ("Leader shard successfully added the replica shard {}",
887 // Make the local shard voting capable
888 shardInfo.setDatastoreContext(datastoreContext, getSelf());
889 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
891 mBean.addLocalShard(shardId.toString());
892 sender.tell(new akka.actor.Status.Success(true), getSelf());
894 LOG.warn ("Cannot add shard replica {} status {}",
895 shardName, replyMsg.getStatus());
896 LOG.debug ("removing the local shard replica for shard {}",
898 //remove the local replica created
899 localShards.remove(shardName);
900 if (shardInfo.getActor() != null) {
901 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
903 switch (replyMsg.getStatus()) {
904 //case ServerChangeStatus.TIMEOUT:
906 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
907 String.format("The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible causes - there was a problem replicating the data or shard leadership changed while replicating the shard data",
908 leaderPath, shardName))), getSelf());
910 //case ServerChangeStatus.NO_LEADER:
912 sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
913 "There is no shard leader available for shard %s", shardName))), getSelf());
916 sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
917 "AddServer request to leader %s for shard %s failed with status %s",
918 leaderPath, shardName, replyMsg.getStatus()))), getSelf());
923 private void onRemoveShardReplica (RemoveShardReplica shardReplicaMsg) {
924 String shardName = shardReplicaMsg.getShardName();
925 boolean deleteStatus = false;
927 // verify the local shard replica is available in the controller node
928 if (!localShards.containsKey(shardName)) {
929 LOG.debug ("Local shard replica {} is not available in the controller node", shardName);
930 getSender().tell(new akka.actor.Status.Failure(
931 new IllegalArgumentException(String.format("Local shard %s not available",
932 shardName))), getSelf());
935 // call RemoveShard for the shardName
936 getSender().tell(new akka.actor.Status.Success(true), getSelf());
941 protected static class ShardInformation {
942 private final ShardIdentifier shardId;
943 private final String shardName;
944 private ActorRef actor;
945 private ActorPath actorPath;
946 private final Map<String, String> initialPeerAddresses;
947 private Optional<DataTree> localShardDataTree;
948 private boolean leaderAvailable = false;
950 // flag that determines if the actor is ready for business
951 private boolean actorInitialized = false;
953 private boolean followerSyncStatus = false;
955 private final Set<OnShardInitialized> onShardInitializedSet = Sets.newHashSet();
956 private String role ;
957 private String leaderId;
958 private short leaderVersion;
960 private DatastoreContext datastoreContext;
961 private final ShardPropsCreator shardPropsCreator;
962 private final ShardPeerAddressResolver addressResolver;
964 private ShardInformation(String shardName, ShardIdentifier shardId,
965 Map<String, String> initialPeerAddresses, DatastoreContext datastoreContext,
966 ShardPropsCreator shardPropsCreator, ShardPeerAddressResolver addressResolver) {
967 this.shardName = shardName;
968 this.shardId = shardId;
969 this.initialPeerAddresses = initialPeerAddresses;
970 this.datastoreContext = datastoreContext;
971 this.shardPropsCreator = shardPropsCreator;
972 this.addressResolver = addressResolver;
975 Props newProps(SchemaContext schemaContext) {
976 return shardPropsCreator.newProps(shardId, initialPeerAddresses, datastoreContext, schemaContext);
979 String getShardName() {
987 ActorPath getActorPath() {
991 void setActor(ActorRef actor) {
993 this.actorPath = actor.path();
996 ShardIdentifier getShardId() {
1000 void setLocalDataTree(Optional<DataTree> localShardDataTree) {
1001 this.localShardDataTree = localShardDataTree;
1004 Optional<DataTree> getLocalShardDataTree() {
1005 return localShardDataTree;
1008 void updatePeerAddress(String peerId, String peerAddress, ActorRef sender){
1009 LOG.info("updatePeerAddress for peer {} with address {}", peerId, peerAddress);
1012 if(LOG.isDebugEnabled()) {
1013 LOG.debug("Sending PeerAddressResolved for peer {} with address {} to {}",
1014 peerId, peerAddress, actor.path());
1017 actor.tell(new PeerAddressResolved(peerId, peerAddress), sender);
1020 notifyOnShardInitializedCallbacks();
1023 void peerDown(String memberName, String peerId, ActorRef sender) {
1025 actor.tell(new PeerDown(memberName, peerId), sender);
1029 void peerUp(String memberName, String peerId, ActorRef sender) {
1031 actor.tell(new PeerUp(memberName, peerId), sender);
1035 boolean isShardReady() {
1036 return !RaftState.Candidate.name().equals(role) && !Strings.isNullOrEmpty(role);
1039 boolean isShardReadyWithLeaderId() {
1040 return leaderAvailable && isShardReady() && !RaftState.IsolatedLeader.name().equals(role) &&
1041 (isLeader() || addressResolver.resolve(leaderId) != null);
1044 boolean isShardInitialized() {
1045 return getActor() != null && actorInitialized;
1048 boolean isLeader() {
1049 return Objects.equal(leaderId, shardId.toString());
1052 String getSerializedLeaderActor() {
1054 return Serialization.serializedActorPath(getActor());
1056 return addressResolver.resolve(leaderId);
1060 void setActorInitialized() {
1061 LOG.debug("Shard {} is initialized", shardId);
1063 this.actorInitialized = true;
1065 notifyOnShardInitializedCallbacks();
1068 private void notifyOnShardInitializedCallbacks() {
1069 if(onShardInitializedSet.isEmpty()) {
1073 boolean ready = isShardReadyWithLeaderId();
1075 if(LOG.isDebugEnabled()) {
1076 LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
1077 ready ? "ready" : "initialized", onShardInitializedSet.size());
1080 Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
1081 while(iter.hasNext()) {
1082 OnShardInitialized onShardInitialized = iter.next();
1083 if(!(onShardInitialized instanceof OnShardReady) || ready) {
1085 onShardInitialized.getTimeoutSchedule().cancel();
1086 onShardInitialized.getReplyRunnable().run();
1091 void addOnShardInitialized(OnShardInitialized onShardInitialized) {
1092 onShardInitializedSet.add(onShardInitialized);
1095 void removeOnShardInitialized(OnShardInitialized onShardInitialized) {
1096 onShardInitializedSet.remove(onShardInitialized);
1099 void setRole(String newRole) {
1100 this.role = newRole;
1102 notifyOnShardInitializedCallbacks();
1105 void setFollowerSyncStatus(boolean syncStatus){
1106 this.followerSyncStatus = syncStatus;
1110 if(RaftState.Follower.name().equals(this.role)){
1111 return followerSyncStatus;
1112 } else if(RaftState.Leader.name().equals(this.role)){
1119 boolean setLeaderId(String leaderId) {
1120 boolean changed = !Objects.equal(this.leaderId, leaderId);
1121 this.leaderId = leaderId;
1122 if(leaderId != null) {
1123 this.leaderAvailable = true;
1125 notifyOnShardInitializedCallbacks();
1130 String getLeaderId() {
1134 void setLeaderAvailable(boolean leaderAvailable) {
1135 this.leaderAvailable = leaderAvailable;
1138 short getLeaderVersion() {
1139 return leaderVersion;
1142 void setLeaderVersion(short leaderVersion) {
1143 this.leaderVersion = leaderVersion;
1146 void setDatastoreContext(DatastoreContext datastoreContext, ActorRef sender) {
1147 this.datastoreContext = datastoreContext;
1148 //notify the datastoreContextchange
1149 LOG.debug ("Notifying RaftPolicy change via datastoreContextChange for {}",
1151 if (actor != null) {
1152 actor.tell(this.datastoreContext, sender);
1157 private static class ShardManagerCreator implements Creator<ShardManager> {
1158 private static final long serialVersionUID = 1L;
1160 final ClusterWrapper cluster;
1161 final Configuration configuration;
1162 final DatastoreContext datastoreContext;
1163 private final CountDownLatch waitTillReadyCountdownLatch;
1164 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
1166 ShardManagerCreator(ClusterWrapper cluster, Configuration configuration, DatastoreContext datastoreContext,
1167 CountDownLatch waitTillReadyCountdownLatch, PrimaryShardInfoFutureCache primaryShardInfoCache) {
1168 this.cluster = cluster;
1169 this.configuration = configuration;
1170 this.datastoreContext = datastoreContext;
1171 this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
1172 this.primaryShardInfoCache = primaryShardInfoCache;
1176 public ShardManager create() throws Exception {
1177 return new ShardManager(cluster, configuration, datastoreContext, waitTillReadyCountdownLatch,
1178 primaryShardInfoCache);
1182 private static class OnShardInitialized {
1183 private final Runnable replyRunnable;
1184 private Cancellable timeoutSchedule;
1186 OnShardInitialized(Runnable replyRunnable) {
1187 this.replyRunnable = replyRunnable;
1190 Runnable getReplyRunnable() {
1191 return replyRunnable;
1194 Cancellable getTimeoutSchedule() {
1195 return timeoutSchedule;
1198 void setTimeoutSchedule(Cancellable timeoutSchedule) {
1199 this.timeoutSchedule = timeoutSchedule;
1203 private static class OnShardReady extends OnShardInitialized {
1204 OnShardReady(Runnable replyRunnable) {
1205 super(replyRunnable);
1209 private static class ShardNotInitializedTimeout {
1210 private final ActorRef sender;
1211 private final ShardInformation shardInfo;
1212 private final OnShardInitialized onShardInitialized;
1214 ShardNotInitializedTimeout(ShardInformation shardInfo, OnShardInitialized onShardInitialized, ActorRef sender) {
1215 this.sender = sender;
1216 this.shardInfo = shardInfo;
1217 this.onShardInitialized = onShardInitialized;
1220 ActorRef getSender() {
1224 ShardInformation getShardInfo() {
1228 OnShardInitialized getOnShardInitialized() {
1229 return onShardInitialized;
1234 * We no longer persist SchemaContextModules but keep this class around for now for backwards
1235 * compatibility so we don't get de-serialization failures on upgrade from Helium.
1238 static class SchemaContextModules implements Serializable {
1239 private static final long serialVersionUID = -8884620101025936590L;
1241 private final Set<String> modules;
1243 SchemaContextModules(Set<String> modules){
1244 this.modules = modules;
1247 public Set<String> getModules() {