2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import static akka.pattern.Patterns.ask;
12 import akka.actor.ActorPath;
13 import akka.actor.ActorRef;
14 import akka.actor.Address;
15 import akka.actor.Cancellable;
16 import akka.actor.OneForOneStrategy;
17 import akka.actor.PoisonPill;
18 import akka.actor.Props;
19 import akka.actor.SupervisorStrategy;
20 import akka.cluster.ClusterEvent;
21 import akka.dispatch.OnComplete;
22 import akka.japi.Creator;
23 import akka.japi.Function;
24 import akka.persistence.RecoveryCompleted;
25 import akka.serialization.Serialization;
26 import akka.util.Timeout;
27 import com.google.common.annotations.VisibleForTesting;
28 import com.google.common.base.Objects;
29 import com.google.common.base.Optional;
30 import com.google.common.base.Preconditions;
31 import com.google.common.base.Strings;
32 import com.google.common.base.Supplier;
33 import com.google.common.collect.Sets;
34 import java.io.Serializable;
35 import java.util.ArrayList;
36 import java.util.Collection;
37 import java.util.HashMap;
38 import java.util.Iterator;
39 import java.util.List;
42 import java.util.concurrent.CountDownLatch;
43 import java.util.concurrent.TimeUnit;
44 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
45 import org.opendaylight.controller.cluster.datastore.config.Configuration;
46 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
47 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
48 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
49 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
50 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
51 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
52 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
53 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
54 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
55 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
56 import org.opendaylight.controller.cluster.datastore.messages.CreateShardReply;
57 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
58 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
59 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
60 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
61 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
62 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
63 import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
64 import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
65 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
66 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
67 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
68 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
69 import org.opendaylight.controller.cluster.datastore.messages.SwitchShardBehavior;
70 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
71 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
72 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
73 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
74 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
75 import org.opendaylight.controller.cluster.raft.RaftState;
76 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
77 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
78 import org.opendaylight.controller.cluster.raft.messages.AddServer;
79 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
80 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
81 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
82 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
83 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
84 import org.slf4j.Logger;
85 import org.slf4j.LoggerFactory;
86 import scala.concurrent.Future;
87 import scala.concurrent.duration.Duration;
88 import scala.concurrent.duration.FiniteDuration;
91 * The ShardManager has the following jobs,
93 * <li> Create all the local shard replicas that belong on this cluster member
94 * <li> Find the address of the local shard
95 * <li> Find the primary replica for any given shard
96 * <li> Monitor the cluster members and store their addresses
99 public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
101 private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
103 // Stores a mapping between a shard name and it's corresponding information
104 // Shard names look like inventory, topology etc and are as specified in
106 private final Map<String, ShardInformation> localShards = new HashMap<>();
108 // The type of a ShardManager reflects the type of the datastore itself
109 // A data store could be of type config/operational
110 private final String type;
112 private final ClusterWrapper cluster;
114 private final Configuration configuration;
116 private final String shardDispatcherPath;
118 private ShardManagerInfo mBean;
120 private DatastoreContext datastoreContext;
122 private final CountDownLatch waitTillReadyCountdownLatch;
124 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
126 private final ShardPeerAddressResolver peerAddressResolver;
128 private SchemaContext schemaContext;
132 protected ShardManager(ClusterWrapper cluster, Configuration configuration,
133 DatastoreContext datastoreContext, CountDownLatch waitTillReadyCountdownLatch,
134 PrimaryShardInfoFutureCache primaryShardInfoCache) {
136 this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
137 this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
138 this.datastoreContext = datastoreContext;
139 this.type = datastoreContext.getDataStoreType();
140 this.shardDispatcherPath =
141 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
142 this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
143 this.primaryShardInfoCache = primaryShardInfoCache;
145 peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
146 this.datastoreContext = DatastoreContext.newBuilderFrom(datastoreContext).shardPeerAddressResolver(
147 peerAddressResolver).build();
149 // Subscribe this actor to cluster member events
150 cluster.subscribeToMemberEvents(getSelf());
155 public static Props props(
156 final ClusterWrapper cluster,
157 final Configuration configuration,
158 final DatastoreContext datastoreContext,
159 final CountDownLatch waitTillReadyCountdownLatch,
160 final PrimaryShardInfoFutureCache primaryShardInfoCache) {
162 Preconditions.checkNotNull(cluster, "cluster should not be null");
163 Preconditions.checkNotNull(configuration, "configuration should not be null");
164 Preconditions.checkNotNull(waitTillReadyCountdownLatch, "waitTillReadyCountdownLatch should not be null");
165 Preconditions.checkNotNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
167 return Props.create(new ShardManagerCreator(cluster, configuration, datastoreContext,
168 waitTillReadyCountdownLatch, primaryShardInfoCache));
172 public void postStop() {
173 LOG.info("Stopping ShardManager");
175 mBean.unregisterMBean();
179 public void handleCommand(Object message) throws Exception {
180 if (message instanceof FindPrimary) {
181 findPrimary((FindPrimary)message);
182 } else if(message instanceof FindLocalShard){
183 findLocalShard((FindLocalShard) message);
184 } else if (message instanceof UpdateSchemaContext) {
185 updateSchemaContext(message);
186 } else if(message instanceof ActorInitialized) {
187 onActorInitialized(message);
188 } else if (message instanceof ClusterEvent.MemberUp){
189 memberUp((ClusterEvent.MemberUp) message);
190 } else if (message instanceof ClusterEvent.MemberExited){
191 memberExited((ClusterEvent.MemberExited) message);
192 } else if(message instanceof ClusterEvent.MemberRemoved) {
193 memberRemoved((ClusterEvent.MemberRemoved) message);
194 } else if(message instanceof ClusterEvent.UnreachableMember) {
195 memberUnreachable((ClusterEvent.UnreachableMember)message);
196 } else if(message instanceof ClusterEvent.ReachableMember) {
197 memberReachable((ClusterEvent.ReachableMember) message);
198 } else if(message instanceof DatastoreContext) {
199 onDatastoreContext((DatastoreContext)message);
200 } else if(message instanceof RoleChangeNotification) {
201 onRoleChangeNotification((RoleChangeNotification) message);
202 } else if(message instanceof FollowerInitialSyncUpStatus){
203 onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
204 } else if(message instanceof ShardNotInitializedTimeout) {
205 onShardNotInitializedTimeout((ShardNotInitializedTimeout)message);
206 } else if(message instanceof ShardLeaderStateChanged) {
207 onLeaderStateChanged((ShardLeaderStateChanged) message);
208 } else if(message instanceof SwitchShardBehavior){
209 onSwitchShardBehavior((SwitchShardBehavior) message);
210 } else if(message instanceof CreateShard) {
211 onCreateShard((CreateShard)message);
212 } else if(message instanceof AddShardReplica){
213 onAddShardReplica((AddShardReplica)message);
214 } else if(message instanceof RemoveShardReplica){
215 onRemoveShardReplica((RemoveShardReplica)message);
217 unknownMessage(message);
222 private void onCreateShard(CreateShard createShard) {
225 ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
226 if(localShards.containsKey(moduleShardConfig.getShardName())) {
227 throw new IllegalStateException(String.format("Shard with name %s already exists",
228 moduleShardConfig.getShardName()));
231 configuration.addModuleShardConfiguration(moduleShardConfig);
233 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), moduleShardConfig.getShardName());
234 Map<String, String> peerAddresses = getPeerAddresses(moduleShardConfig.getShardName()/*,
235 moduleShardConfig.getShardMemberNames()*/);
237 LOG.debug("onCreateShard: shardId: {}, memberNames: {}. peerAddresses: {}", shardId,
238 moduleShardConfig.getShardMemberNames(), peerAddresses);
240 DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
241 if(shardDatastoreContext == null) {
242 shardDatastoreContext = datastoreContext;
244 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
245 peerAddressResolver).build();
248 ShardInformation info = new ShardInformation(moduleShardConfig.getShardName(), shardId, peerAddresses,
249 shardDatastoreContext, createShard.getShardPropsCreator(), peerAddressResolver);
250 localShards.put(info.getShardName(), info);
252 mBean.addLocalShard(shardId.toString());
254 if(schemaContext != null) {
255 info.setActor(newShardActor(schemaContext, info));
258 reply = new CreateShardReply();
259 } catch (Exception e) {
260 LOG.error("onCreateShard failed", e);
261 reply = new akka.actor.Status.Failure(e);
264 if(getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
265 getSender().tell(reply, getSelf());
269 private void checkReady(){
270 if (isReadyWithLeaderId()) {
271 LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
272 persistenceId(), type, waitTillReadyCountdownLatch.getCount());
274 waitTillReadyCountdownLatch.countDown();
278 private void onLeaderStateChanged(ShardLeaderStateChanged leaderStateChanged) {
279 LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
281 ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
282 if(shardInformation != null) {
283 shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
284 shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
285 if(shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
286 primaryShardInfoCache.remove(shardInformation.getShardName());
291 LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
295 private void onShardNotInitializedTimeout(ShardNotInitializedTimeout message) {
296 ShardInformation shardInfo = message.getShardInfo();
298 LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
299 shardInfo.getShardName());
301 shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
303 if(!shardInfo.isShardInitialized()) {
304 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
305 message.getSender().tell(createNotInitializedException(shardInfo.shardId), getSelf());
307 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
308 message.getSender().tell(createNoShardLeaderException(shardInfo.shardId), getSelf());
312 private void onFollowerInitialSyncStatus(FollowerInitialSyncUpStatus status) {
313 LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
314 status.getName(), status.isInitialSyncDone());
316 ShardInformation shardInformation = findShardInformation(status.getName());
318 if(shardInformation != null) {
319 shardInformation.setFollowerSyncStatus(status.isInitialSyncDone());
321 mBean.setSyncStatus(isInSync());
326 private void onRoleChangeNotification(RoleChangeNotification roleChanged) {
327 LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
328 roleChanged.getOldRole(), roleChanged.getNewRole());
330 ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
331 if(shardInformation != null) {
332 shardInformation.setRole(roleChanged.getNewRole());
334 mBean.setSyncStatus(isInSync());
339 private ShardInformation findShardInformation(String memberId) {
340 for(ShardInformation info : localShards.values()){
341 if(info.getShardId().toString().equals(memberId)){
349 private boolean isReadyWithLeaderId() {
350 boolean isReady = true;
351 for (ShardInformation info : localShards.values()) {
352 if(!info.isShardReadyWithLeaderId()){
360 private boolean isInSync(){
361 for (ShardInformation info : localShards.values()) {
362 if(!info.isInSync()){
369 private void onActorInitialized(Object message) {
370 final ActorRef sender = getSender();
372 if (sender == null) {
373 return; //why is a non-actor sending this message? Just ignore.
376 String actorName = sender.path().name();
377 //find shard name from actor name; actor name is stringified shardId
378 ShardIdentifier shardId = ShardIdentifier.builder().fromShardIdString(actorName).build();
380 if (shardId.getShardName() == null) {
384 markShardAsInitialized(shardId.getShardName());
387 private void markShardAsInitialized(String shardName) {
388 LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
390 ShardInformation shardInformation = localShards.get(shardName);
391 if (shardInformation != null) {
392 shardInformation.setActorInitialized();
394 shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
399 protected void handleRecover(Object message) throws Exception {
400 if (message instanceof RecoveryCompleted) {
401 LOG.info("Recovery complete : {}", persistenceId());
403 // We no longer persist SchemaContext modules so delete all the prior messages from the akka
404 // journal on upgrade from Helium.
405 deleteMessages(lastSequenceNr());
409 private void findLocalShard(FindLocalShard message) {
410 final ShardInformation shardInformation = localShards.get(message.getShardName());
412 if(shardInformation == null){
413 getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
417 sendResponse(shardInformation, message.isWaitUntilInitialized(), false, new Supplier<Object>() {
419 public Object get() {
420 return new LocalShardFound(shardInformation.getActor());
425 private void sendResponse(ShardInformation shardInformation, boolean doWait,
426 boolean wantShardReady, final Supplier<Object> messageSupplier) {
427 if (!shardInformation.isShardInitialized() || (wantShardReady && !shardInformation.isShardReadyWithLeaderId())) {
429 final ActorRef sender = getSender();
430 final ActorRef self = self();
432 Runnable replyRunnable = new Runnable() {
435 sender.tell(messageSupplier.get(), self);
439 OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
440 new OnShardInitialized(replyRunnable);
442 shardInformation.addOnShardInitialized(onShardInitialized);
444 LOG.debug("{}: Scheduling timer to wait for shard {}", persistenceId(), shardInformation.getShardName());
446 FiniteDuration timeout = datastoreContext.getShardInitializationTimeout().duration();
447 if(shardInformation.isShardInitialized()) {
448 // If the shard is already initialized then we'll wait enough time for the shard to
449 // elect a leader, ie 2 times the election timeout.
450 timeout = FiniteDuration.create(datastoreContext.getShardRaftConfig()
451 .getElectionTimeOutInterval().toMillis() * 2, TimeUnit.MILLISECONDS);
454 Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
456 new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
457 getContext().dispatcher(), getSelf());
459 onShardInitialized.setTimeoutSchedule(timeoutSchedule);
461 } else if (!shardInformation.isShardInitialized()) {
462 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
463 shardInformation.getShardName());
464 getSender().tell(createNotInitializedException(shardInformation.shardId), getSelf());
466 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
467 shardInformation.getShardName());
468 getSender().tell(createNoShardLeaderException(shardInformation.shardId), getSelf());
474 getSender().tell(messageSupplier.get(), getSelf());
477 private static NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
478 return new NoShardLeaderException(null, shardId.toString());
481 private static NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
482 return new NotInitializedException(String.format(
483 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
486 private void memberRemoved(ClusterEvent.MemberRemoved message) {
487 String memberName = message.member().roles().head();
489 LOG.debug("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
490 message.member().address());
492 peerAddressResolver.removePeerAddress(memberName);
494 for(ShardInformation info : localShards.values()){
495 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
499 private void memberExited(ClusterEvent.MemberExited message) {
500 String memberName = message.member().roles().head();
502 LOG.debug("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
503 message.member().address());
505 peerAddressResolver.removePeerAddress(memberName);
507 for(ShardInformation info : localShards.values()){
508 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
512 private void memberUp(ClusterEvent.MemberUp message) {
513 String memberName = message.member().roles().head();
515 LOG.debug("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
516 message.member().address());
518 addPeerAddress(memberName, message.member().address());
523 private void addPeerAddress(String memberName, Address address) {
524 peerAddressResolver.addPeerAddress(memberName, address);
526 for(ShardInformation info : localShards.values()){
527 String shardName = info.getShardName();
528 String peerId = getShardIdentifier(memberName, shardName).toString();
529 info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
531 info.peerUp(memberName, peerId, getSelf());
535 private void memberReachable(ClusterEvent.ReachableMember message) {
536 String memberName = message.member().roles().head();
537 LOG.debug("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
539 addPeerAddress(memberName, message.member().address());
541 markMemberAvailable(memberName);
544 private void memberUnreachable(ClusterEvent.UnreachableMember message) {
545 String memberName = message.member().roles().head();
546 LOG.debug("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
548 markMemberUnavailable(memberName);
551 private void markMemberUnavailable(final String memberName) {
552 for(ShardInformation info : localShards.values()){
553 String leaderId = info.getLeaderId();
554 if(leaderId != null && leaderId.contains(memberName)) {
555 LOG.debug("Marking Leader {} as unavailable.", leaderId);
556 info.setLeaderAvailable(false);
558 primaryShardInfoCache.remove(info.getShardName());
561 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
565 private void markMemberAvailable(final String memberName) {
566 for(ShardInformation info : localShards.values()){
567 String leaderId = info.getLeaderId();
568 if(leaderId != null && leaderId.contains(memberName)) {
569 LOG.debug("Marking Leader {} as available.", leaderId);
570 info.setLeaderAvailable(true);
573 info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
577 private void onDatastoreContext(DatastoreContext context) {
578 datastoreContext = DatastoreContext.newBuilderFrom(context).shardPeerAddressResolver(
579 peerAddressResolver).build();
580 for (ShardInformation info : localShards.values()) {
581 if (info.getActor() != null) {
582 info.getActor().tell(datastoreContext, getSelf());
587 private void onSwitchShardBehavior(SwitchShardBehavior message) {
588 ShardIdentifier identifier = ShardIdentifier.builder().fromShardIdString(message.getShardName()).build();
590 ShardInformation shardInformation = localShards.get(identifier.getShardName());
592 if(shardInformation != null && shardInformation.getActor() != null) {
593 shardInformation.getActor().tell(
594 new SwitchBehavior(RaftState.valueOf(message.getNewState()), message.getTerm()), getSelf());
596 LOG.warn("Could not switch the behavior of shard {} to {} - shard is not yet available",
597 message.getShardName(), message.getNewState());
602 * Notifies all the local shards of a change in the schema context
606 private void updateSchemaContext(final Object message) {
607 schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
609 LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getAllModuleIdentifiers().size());
611 for (ShardInformation info : localShards.values()) {
612 if (info.getActor() == null) {
613 LOG.debug("Creating Shard {}", info.getShardId());
614 info.setActor(newShardActor(schemaContext, info));
616 info.getActor().tell(message, getSelf());
622 protected ClusterWrapper getCluster() {
627 protected ActorRef newShardActor(final SchemaContext schemaContext, ShardInformation info) {
628 return getContext().actorOf(info.newProps(schemaContext)
629 .withDispatcher(shardDispatcherPath), info.getShardId().toString());
632 private void findPrimary(FindPrimary message) {
633 LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
635 final String shardName = message.getShardName();
636 final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
638 // First see if the there is a local replica for the shard
639 final ShardInformation info = localShards.get(shardName);
641 sendResponse(info, message.isWaitUntilReady(), true, new Supplier<Object>() {
643 public Object get() {
644 String primaryPath = info.getSerializedLeaderActor();
645 Object found = canReturnLocalShardState && info.isLeader() ?
646 new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
647 new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
649 if(LOG.isDebugEnabled()) {
650 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
660 for(String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
661 LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}", persistenceId(),
664 getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
665 message.isWaitUntilReady()), getContext());
669 LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
671 getSender().tell(new PrimaryNotFoundException(
672 String.format("No primary shard found for %s.", shardName)), getSelf());
676 * Construct the name of the shard actor given the name of the member on
677 * which the shard resides and the name of the shard
683 private ShardIdentifier getShardIdentifier(String memberName, String shardName){
684 return peerAddressResolver.getShardIdentifier(memberName, shardName);
688 * Create shards that are local to the member on which the ShardManager
692 private void createLocalShards() {
693 String memberName = this.cluster.getCurrentMemberName();
694 Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
696 ShardPropsCreator shardPropsCreator = new DefaultShardPropsCreator();
697 List<String> localShardActorNames = new ArrayList<>();
698 for(String shardName : memberShardNames){
699 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
700 Map<String, String> peerAddresses = getPeerAddresses(shardName);
701 localShardActorNames.add(shardId.toString());
702 localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses, datastoreContext,
703 shardPropsCreator, peerAddressResolver));
706 mBean = ShardManagerInfo.createShardManagerMBean(memberName, "shard-manager-" + this.type,
707 datastoreContext.getDataStoreMXBeanType(), localShardActorNames);
709 mBean.setShardManager(this);
713 * Given the name of the shard find the addresses of all it's peers
717 private Map<String, String> getPeerAddresses(String shardName) {
718 Collection<String> members = configuration.getMembersFromShardName(shardName);
719 Map<String, String> peerAddresses = new HashMap<>();
721 String currentMemberName = this.cluster.getCurrentMemberName();
723 for(String memberName : members) {
724 if(!currentMemberName.equals(memberName)) {
725 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
726 String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
727 peerAddresses.put(shardId.toString(), address);
730 return peerAddresses;
734 public SupervisorStrategy supervisorStrategy() {
736 return new OneForOneStrategy(10, Duration.create("1 minute"),
737 new Function<Throwable, SupervisorStrategy.Directive>() {
739 public SupervisorStrategy.Directive apply(Throwable t) {
740 LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
741 return SupervisorStrategy.resume();
749 public String persistenceId() {
750 return "shard-manager-" + type;
754 ShardManagerInfoMBean getMBean(){
758 private DatastoreContext getInitShardDataStoreContext() {
759 return (DatastoreContext.newBuilderFrom(datastoreContext)
760 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
764 private void checkLocalShardExists(final String shardName, final ActorRef sender) {
765 if (localShards.containsKey(shardName)) {
766 String msg = String.format("Local shard %s already exists", shardName);
767 LOG.debug ("{}: {}", persistenceId(), msg);
768 sender.tell(new akka.actor.Status.Failure(new IllegalArgumentException(msg)), getSelf());
772 private void onAddShardReplica (AddShardReplica shardReplicaMsg) {
773 final String shardName = shardReplicaMsg.getShardName();
775 // verify the local shard replica is already available in the controller node
776 LOG.debug ("onAddShardReplica: {}", shardReplicaMsg);
778 checkLocalShardExists(shardName, getSender());
780 // verify the shard with the specified name is present in the cluster configuration
781 if (!(this.configuration.isShardConfigured(shardName))) {
782 String msg = String.format("No module configuration exists for shard %s", shardName);
783 LOG.debug ("{}: {}", persistenceId(), msg);
784 getSender().tell(new akka.actor.Status.Failure(new IllegalArgumentException(msg)), getSelf());
788 // Create the localShard
789 if (schemaContext == null) {
790 String msg = String.format(
791 "No SchemaContext is available in order to create a local shard instance for %s", shardName);
792 LOG.debug ("{}: {}", persistenceId(), msg);
793 getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(msg)), getSelf());
797 Map<String, String> peerAddresses = getPeerAddresses(shardName);
798 if (peerAddresses.isEmpty()) {
799 String msg = String.format("Cannot add replica for shard %s because no peer is available", shardName);
800 LOG.debug ("{}: {}", persistenceId(), msg);
801 getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(msg)), getSelf());
805 Timeout findPrimaryTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
807 final ActorRef sender = getSender();
808 Future<Object> futureObj = ask(getSelf(), new RemoteFindPrimary(shardName, true), findPrimaryTimeout);
809 futureObj.onComplete(new OnComplete<Object>() {
811 public void onComplete(Throwable failure, Object response) {
812 if (failure != null) {
813 LOG.debug ("{}: Received failure from FindPrimary for shard {}", persistenceId(), shardName, failure);
814 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
815 String.format("Failed to find leader for shard %s", shardName), failure)),
818 if (!(response instanceof RemotePrimaryShardFound)) {
819 String msg = String.format("Failed to find leader for shard %s: received response: %s",
820 shardName, response);
821 LOG.debug ("{}: {}", persistenceId(), msg);
822 sender.tell(new akka.actor.Status.Failure(new RuntimeException(msg)), getSelf());
826 RemotePrimaryShardFound message = (RemotePrimaryShardFound)response;
827 addShard (shardName, message, sender);
830 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
833 private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
834 checkLocalShardExists(shardName, sender);
836 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
837 String localShardAddress = peerAddressResolver.getShardActorAddress(shardName, cluster.getCurrentMemberName());
838 final ShardInformation shardInfo = new ShardInformation(shardName, shardId,
839 getPeerAddresses(shardName), getInitShardDataStoreContext(),
840 new DefaultShardPropsCreator(), peerAddressResolver);
841 localShards.put(shardName, shardInfo);
842 shardInfo.setActor(newShardActor(schemaContext, shardInfo));
844 //inform ShardLeader to add this shard as a replica by sending an AddServer message
845 LOG.debug ("{}: Sending AddServer message to peer {} for shard {}", persistenceId(),
846 response.getPrimaryPath(), shardId);
848 Timeout addServerTimeout = new Timeout(datastoreContext
849 .getShardLeaderElectionTimeout().duration().$times(4));
850 Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
851 new AddServer(shardId.toString(), localShardAddress, true), addServerTimeout);
853 futureObj.onComplete(new OnComplete<Object>() {
855 public void onComplete(Throwable failure, Object addServerResponse) {
856 if (failure != null) {
857 LOG.debug ("{}: AddServer request to {} for {} failed", persistenceId(),
858 response.getPrimaryPath(), shardName, failure);
861 localShards.remove(shardName);
862 if (shardInfo.getActor() != null) {
863 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
866 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
867 String.format("AddServer request to leader %s for shard %s failed",
868 response.getPrimaryPath(), shardName), failure)), getSelf());
870 AddServerReply reply = (AddServerReply)addServerResponse;
871 onAddServerReply(shardName, shardInfo, reply, sender, response.getPrimaryPath());
874 }, new Dispatchers(context().system().dispatchers()).
875 getDispatcher(Dispatchers.DispatcherType.Client));
879 private void onAddServerReply (String shardName, ShardInformation shardInfo,
880 AddServerReply replyMsg, ActorRef sender, String leaderPath) {
881 LOG.debug ("{}: Received {} for shard {} from leader {}", persistenceId(), replyMsg, shardName, leaderPath);
883 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
884 LOG.debug ("{}: Leader shard successfully added the replica shard {}", persistenceId(), shardName);
886 // Make the local shard voting capable
887 shardInfo.setDatastoreContext(datastoreContext, getSelf());
889 mBean.addLocalShard(shardInfo.getShardId().toString());
890 sender.tell(new akka.actor.Status.Success(true), getSelf());
892 LOG.warn ("{}: Leader failed to add shard replica {} with status {} - removing the local shard",
893 persistenceId(), shardName, replyMsg.getStatus());
895 //remove the local replica created
896 localShards.remove(shardName);
897 if (shardInfo.getActor() != null) {
898 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
900 switch (replyMsg.getStatus()) {
902 sender.tell(new akka.actor.Status.Failure(new RuntimeException(
903 String.format("The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible causes - there was a problem replicating the data or shard leadership changed while replicating the shard data",
904 leaderPath, shardName))), getSelf());
907 sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
908 "There is no shard leader available for shard %s", shardName))), getSelf());
911 sender.tell(new akka.actor.Status.Failure(new RuntimeException(String.format(
912 "AddServer request to leader %s for shard %s failed with status %s",
913 leaderPath, shardName, replyMsg.getStatus()))), getSelf());
918 private void onRemoveShardReplica (RemoveShardReplica shardReplicaMsg) {
919 String shardName = shardReplicaMsg.getShardName();
921 // verify the local shard replica is available in the controller node
922 if (!localShards.containsKey(shardName)) {
923 String msg = String.format("Local shard %s does not", shardName);
924 LOG.debug ("{}: {}", persistenceId(), msg);
925 getSender().tell(new akka.actor.Status.Failure(new IllegalArgumentException(msg)), getSelf());
928 // call RemoveShard for the shardName
929 getSender().tell(new akka.actor.Status.Success(true), getSelf());
934 protected static class ShardInformation {
935 private final ShardIdentifier shardId;
936 private final String shardName;
937 private ActorRef actor;
938 private ActorPath actorPath;
939 private final Map<String, String> initialPeerAddresses;
940 private Optional<DataTree> localShardDataTree;
941 private boolean leaderAvailable = false;
943 // flag that determines if the actor is ready for business
944 private boolean actorInitialized = false;
946 private boolean followerSyncStatus = false;
948 private final Set<OnShardInitialized> onShardInitializedSet = Sets.newHashSet();
949 private String role ;
950 private String leaderId;
951 private short leaderVersion;
953 private DatastoreContext datastoreContext;
954 private final ShardPropsCreator shardPropsCreator;
955 private final ShardPeerAddressResolver addressResolver;
957 private ShardInformation(String shardName, ShardIdentifier shardId,
958 Map<String, String> initialPeerAddresses, DatastoreContext datastoreContext,
959 ShardPropsCreator shardPropsCreator, ShardPeerAddressResolver addressResolver) {
960 this.shardName = shardName;
961 this.shardId = shardId;
962 this.initialPeerAddresses = initialPeerAddresses;
963 this.datastoreContext = datastoreContext;
964 this.shardPropsCreator = shardPropsCreator;
965 this.addressResolver = addressResolver;
968 Props newProps(SchemaContext schemaContext) {
969 return shardPropsCreator.newProps(shardId, initialPeerAddresses, datastoreContext, schemaContext);
972 String getShardName() {
980 ActorPath getActorPath() {
984 void setActor(ActorRef actor) {
986 this.actorPath = actor.path();
989 ShardIdentifier getShardId() {
993 void setLocalDataTree(Optional<DataTree> localShardDataTree) {
994 this.localShardDataTree = localShardDataTree;
997 Optional<DataTree> getLocalShardDataTree() {
998 return localShardDataTree;
1001 void updatePeerAddress(String peerId, String peerAddress, ActorRef sender){
1002 LOG.info("updatePeerAddress for peer {} with address {}", peerId, peerAddress);
1005 if(LOG.isDebugEnabled()) {
1006 LOG.debug("Sending PeerAddressResolved for peer {} with address {} to {}",
1007 peerId, peerAddress, actor.path());
1010 actor.tell(new PeerAddressResolved(peerId, peerAddress), sender);
1013 notifyOnShardInitializedCallbacks();
1016 void peerDown(String memberName, String peerId, ActorRef sender) {
1018 actor.tell(new PeerDown(memberName, peerId), sender);
1022 void peerUp(String memberName, String peerId, ActorRef sender) {
1024 actor.tell(new PeerUp(memberName, peerId), sender);
1028 boolean isShardReady() {
1029 return !RaftState.Candidate.name().equals(role) && !Strings.isNullOrEmpty(role);
1032 boolean isShardReadyWithLeaderId() {
1033 return leaderAvailable && isShardReady() && !RaftState.IsolatedLeader.name().equals(role) &&
1034 (isLeader() || addressResolver.resolve(leaderId) != null);
1037 boolean isShardInitialized() {
1038 return getActor() != null && actorInitialized;
1041 boolean isLeader() {
1042 return Objects.equal(leaderId, shardId.toString());
1045 String getSerializedLeaderActor() {
1047 return Serialization.serializedActorPath(getActor());
1049 return addressResolver.resolve(leaderId);
1053 void setActorInitialized() {
1054 LOG.debug("Shard {} is initialized", shardId);
1056 this.actorInitialized = true;
1058 notifyOnShardInitializedCallbacks();
1061 private void notifyOnShardInitializedCallbacks() {
1062 if(onShardInitializedSet.isEmpty()) {
1066 boolean ready = isShardReadyWithLeaderId();
1068 if(LOG.isDebugEnabled()) {
1069 LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
1070 ready ? "ready" : "initialized", onShardInitializedSet.size());
1073 Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
1074 while(iter.hasNext()) {
1075 OnShardInitialized onShardInitialized = iter.next();
1076 if(!(onShardInitialized instanceof OnShardReady) || ready) {
1078 onShardInitialized.getTimeoutSchedule().cancel();
1079 onShardInitialized.getReplyRunnable().run();
1084 void addOnShardInitialized(OnShardInitialized onShardInitialized) {
1085 onShardInitializedSet.add(onShardInitialized);
1088 void removeOnShardInitialized(OnShardInitialized onShardInitialized) {
1089 onShardInitializedSet.remove(onShardInitialized);
1092 void setRole(String newRole) {
1093 this.role = newRole;
1095 notifyOnShardInitializedCallbacks();
1098 void setFollowerSyncStatus(boolean syncStatus){
1099 this.followerSyncStatus = syncStatus;
1103 if(RaftState.Follower.name().equals(this.role)){
1104 return followerSyncStatus;
1105 } else if(RaftState.Leader.name().equals(this.role)){
1112 boolean setLeaderId(String leaderId) {
1113 boolean changed = !Objects.equal(this.leaderId, leaderId);
1114 this.leaderId = leaderId;
1115 if(leaderId != null) {
1116 this.leaderAvailable = true;
1118 notifyOnShardInitializedCallbacks();
1123 String getLeaderId() {
1127 void setLeaderAvailable(boolean leaderAvailable) {
1128 this.leaderAvailable = leaderAvailable;
1131 short getLeaderVersion() {
1132 return leaderVersion;
1135 void setLeaderVersion(short leaderVersion) {
1136 this.leaderVersion = leaderVersion;
1139 void setDatastoreContext(DatastoreContext datastoreContext, ActorRef sender) {
1140 this.datastoreContext = datastoreContext;
1141 //notify the datastoreContextchange
1142 LOG.debug ("Notifying RaftPolicy change via datastoreContextChange for {}",
1144 if (actor != null) {
1145 actor.tell(this.datastoreContext, sender);
1150 private static class ShardManagerCreator implements Creator<ShardManager> {
1151 private static final long serialVersionUID = 1L;
1153 final ClusterWrapper cluster;
1154 final Configuration configuration;
1155 final DatastoreContext datastoreContext;
1156 private final CountDownLatch waitTillReadyCountdownLatch;
1157 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
1159 ShardManagerCreator(ClusterWrapper cluster, Configuration configuration, DatastoreContext datastoreContext,
1160 CountDownLatch waitTillReadyCountdownLatch, PrimaryShardInfoFutureCache primaryShardInfoCache) {
1161 this.cluster = cluster;
1162 this.configuration = configuration;
1163 this.datastoreContext = datastoreContext;
1164 this.waitTillReadyCountdownLatch = waitTillReadyCountdownLatch;
1165 this.primaryShardInfoCache = primaryShardInfoCache;
1169 public ShardManager create() throws Exception {
1170 return new ShardManager(cluster, configuration, datastoreContext, waitTillReadyCountdownLatch,
1171 primaryShardInfoCache);
1175 private static class OnShardInitialized {
1176 private final Runnable replyRunnable;
1177 private Cancellable timeoutSchedule;
1179 OnShardInitialized(Runnable replyRunnable) {
1180 this.replyRunnable = replyRunnable;
1183 Runnable getReplyRunnable() {
1184 return replyRunnable;
1187 Cancellable getTimeoutSchedule() {
1188 return timeoutSchedule;
1191 void setTimeoutSchedule(Cancellable timeoutSchedule) {
1192 this.timeoutSchedule = timeoutSchedule;
1196 private static class OnShardReady extends OnShardInitialized {
1197 OnShardReady(Runnable replyRunnable) {
1198 super(replyRunnable);
1202 private static class ShardNotInitializedTimeout {
1203 private final ActorRef sender;
1204 private final ShardInformation shardInfo;
1205 private final OnShardInitialized onShardInitialized;
1207 ShardNotInitializedTimeout(ShardInformation shardInfo, OnShardInitialized onShardInitialized, ActorRef sender) {
1208 this.sender = sender;
1209 this.shardInfo = shardInfo;
1210 this.onShardInitialized = onShardInitialized;
1213 ActorRef getSender() {
1217 ShardInformation getShardInfo() {
1221 OnShardInitialized getOnShardInitialized() {
1222 return onShardInitialized;
1227 * We no longer persist SchemaContextModules but keep this class around for now for backwards
1228 * compatibility so we don't get de-serialization failures on upgrade from Helium.
1231 static class SchemaContextModules implements Serializable {
1232 private static final long serialVersionUID = -8884620101025936590L;
1234 private final Set<String> modules;
1236 SchemaContextModules(Set<String> modules){
1237 this.modules = modules;
1240 public Set<String> getModules() {