2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore.shardmanager;
11 import static akka.pattern.Patterns.ask;
13 import akka.actor.ActorRef;
14 import akka.actor.Address;
15 import akka.actor.Cancellable;
16 import akka.actor.OneForOneStrategy;
17 import akka.actor.PoisonPill;
18 import akka.actor.Status;
19 import akka.actor.SupervisorStrategy;
20 import akka.actor.SupervisorStrategy.Directive;
21 import akka.cluster.ClusterEvent;
22 import akka.cluster.ClusterEvent.MemberWeaklyUp;
23 import akka.cluster.Member;
24 import akka.dispatch.Futures;
25 import akka.dispatch.OnComplete;
26 import akka.japi.Function;
27 import akka.pattern.Patterns;
28 import akka.persistence.RecoveryCompleted;
29 import akka.persistence.SaveSnapshotFailure;
30 import akka.persistence.SaveSnapshotSuccess;
31 import akka.persistence.SnapshotOffer;
32 import akka.persistence.SnapshotSelectionCriteria;
33 import akka.util.Timeout;
34 import com.google.common.annotations.VisibleForTesting;
35 import com.google.common.base.Preconditions;
36 import java.util.ArrayList;
37 import java.util.Collection;
38 import java.util.Collections;
39 import java.util.HashMap;
40 import java.util.HashSet;
41 import java.util.List;
43 import java.util.Map.Entry;
45 import java.util.concurrent.CountDownLatch;
46 import java.util.concurrent.TimeUnit;
47 import java.util.concurrent.TimeoutException;
48 import java.util.function.Consumer;
49 import java.util.function.Supplier;
50 import org.opendaylight.controller.cluster.access.concepts.MemberName;
51 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
52 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
53 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
54 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
55 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
56 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
57 import org.opendaylight.controller.cluster.datastore.Shard;
58 import org.opendaylight.controller.cluster.datastore.config.Configuration;
59 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
60 import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
61 import org.opendaylight.controller.cluster.datastore.exceptions.AlreadyExistsException;
62 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
63 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
64 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
65 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
66 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
67 import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
68 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
69 import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
70 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
71 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
72 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
73 import org.opendaylight.controller.cluster.datastore.messages.FlipShardMembersVotingStatus;
74 import org.opendaylight.controller.cluster.datastore.messages.GetShardRole;
75 import org.opendaylight.controller.cluster.datastore.messages.GetShardRoleReply;
76 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
77 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
78 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
79 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
80 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
81 import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
82 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
83 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
84 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
85 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
86 import org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot;
87 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
88 import org.opendaylight.controller.cluster.datastore.utils.CompositeOnComplete;
89 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
90 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
91 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
92 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
93 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
94 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
95 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
96 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
97 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
98 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
99 import org.opendaylight.controller.cluster.raft.messages.AddServer;
100 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
101 import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
102 import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
103 import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
104 import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
105 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
106 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
107 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
108 import org.opendaylight.controller.cluster.sharding.PrefixedShardConfigUpdateHandler;
109 import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
110 import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
111 import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
112 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
113 import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
114 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
115 import org.opendaylight.yangtools.concepts.ListenerRegistration;
116 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
117 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
118 import org.slf4j.Logger;
119 import org.slf4j.LoggerFactory;
120 import scala.concurrent.ExecutionContext;
121 import scala.concurrent.Future;
122 import scala.concurrent.duration.Duration;
123 import scala.concurrent.duration.FiniteDuration;
126 * Manages the shards for a data store. The ShardManager has the following jobs:
128 * <li> Create all the local shard replicas that belong on this cluster member
129 * <li> Find the address of the local shard
130 * <li> Find the primary replica for any given shard
131 * <li> Monitor the cluster members and store their addresses
134 class ShardManager extends AbstractUntypedPersistentActorWithMetering {
135 private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
137 // Stores a mapping between a shard name and it's corresponding information
138 // Shard names look like inventory, topology etc and are as specified in
140 private final Map<String, ShardInformation> localShards = new HashMap<>();
142 // The type of a ShardManager reflects the type of the datastore itself
143 // A data store could be of type config/operational
144 private final String type;
146 private final ClusterWrapper cluster;
148 private final Configuration configuration;
150 private final String shardDispatcherPath;
152 private final ShardManagerInfo shardManagerMBean;
154 private DatastoreContextFactory datastoreContextFactory;
156 private final CountDownLatch waitTillReadyCountdownLatch;
158 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
160 private final ShardPeerAddressResolver peerAddressResolver;
162 private SchemaContext schemaContext;
164 private DatastoreSnapshot restoreFromSnapshot;
166 private ShardManagerSnapshot currentSnapshot;
168 private final Set<String> shardReplicaOperationsInProgress = new HashSet<>();
170 private final Map<String, CompositeOnComplete<Boolean>> shardActorsStopping = new HashMap<>();
172 private final String persistenceId;
173 private final AbstractDataStore dataStore;
175 private ListenerRegistration<DOMDataTreeChangeListener> configListenerReg = null;
176 private PrefixedShardConfigUpdateHandler configUpdateHandler;
178 ShardManager(final AbstractShardManagerCreator<?> builder) {
179 this.cluster = builder.getCluster();
180 this.configuration = builder.getConfiguration();
181 this.datastoreContextFactory = builder.getDatastoreContextFactory();
182 this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
183 this.shardDispatcherPath =
184 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
185 this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
186 this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
187 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
189 String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
190 persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
192 peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
194 // Subscribe this actor to cluster member events
195 cluster.subscribeToMemberEvents(getSelf());
197 shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
198 "shard-manager-" + this.type,
199 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
200 shardManagerMBean.registerMBean();
202 dataStore = builder.getDistributedDataStore();
206 public void preStart() {
207 LOG.info("Starting ShardManager {}", persistenceId);
211 public void postStop() {
212 LOG.info("Stopping ShardManager {}", persistenceId());
214 shardManagerMBean.unregisterMBean();
216 if (configListenerReg != null) {
217 configListenerReg.close();
218 configListenerReg = null;
223 public void handleCommand(final Object message) throws Exception {
224 if (message instanceof FindPrimary) {
225 findPrimary((FindPrimary)message);
226 } else if (message instanceof FindLocalShard) {
227 findLocalShard((FindLocalShard) message);
228 } else if (message instanceof UpdateSchemaContext) {
229 updateSchemaContext(message);
230 } else if (message instanceof ActorInitialized) {
231 onActorInitialized(message);
232 } else if (message instanceof ClusterEvent.MemberUp) {
233 memberUp((ClusterEvent.MemberUp) message);
234 } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
235 memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
236 } else if (message instanceof ClusterEvent.MemberExited) {
237 memberExited((ClusterEvent.MemberExited) message);
238 } else if (message instanceof ClusterEvent.MemberRemoved) {
239 memberRemoved((ClusterEvent.MemberRemoved) message);
240 } else if (message instanceof ClusterEvent.UnreachableMember) {
241 memberUnreachable((ClusterEvent.UnreachableMember) message);
242 } else if (message instanceof ClusterEvent.ReachableMember) {
243 memberReachable((ClusterEvent.ReachableMember) message);
244 } else if (message instanceof DatastoreContextFactory) {
245 onDatastoreContextFactory((DatastoreContextFactory) message);
246 } else if (message instanceof RoleChangeNotification) {
247 onRoleChangeNotification((RoleChangeNotification) message);
248 } else if (message instanceof FollowerInitialSyncUpStatus) {
249 onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
250 } else if (message instanceof ShardNotInitializedTimeout) {
251 onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
252 } else if (message instanceof ShardLeaderStateChanged) {
253 onLeaderStateChanged((ShardLeaderStateChanged) message);
254 } else if (message instanceof SwitchShardBehavior) {
255 onSwitchShardBehavior((SwitchShardBehavior) message);
256 } else if (message instanceof CreateShard) {
257 onCreateShard((CreateShard)message);
258 } else if (message instanceof AddShardReplica) {
259 onAddShardReplica((AddShardReplica) message);
260 } else if (message instanceof AddPrefixShardReplica) {
261 onAddPrefixShardReplica((AddPrefixShardReplica) message);
262 } else if (message instanceof PrefixShardCreated) {
263 onPrefixShardCreated((PrefixShardCreated) message);
264 } else if (message instanceof PrefixShardRemoved) {
265 onPrefixShardRemoved((PrefixShardRemoved) message);
266 } else if (message instanceof InitConfigListener) {
267 onInitConfigListener();
268 } else if (message instanceof ForwardedAddServerReply) {
269 ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
270 onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
271 msg.removeShardOnFailure);
272 } else if (message instanceof ForwardedAddServerFailure) {
273 ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
274 onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
275 } else if (message instanceof RemoveShardReplica) {
276 onRemoveShardReplica((RemoveShardReplica) message);
277 } else if (message instanceof RemovePrefixShardReplica) {
278 onRemovePrefixShardReplica((RemovePrefixShardReplica) message);
279 } else if (message instanceof WrappedShardResponse) {
280 onWrappedShardResponse((WrappedShardResponse) message);
281 } else if (message instanceof GetSnapshot) {
283 } else if (message instanceof ServerRemoved) {
284 onShardReplicaRemoved((ServerRemoved) message);
285 } else if (message instanceof ChangeShardMembersVotingStatus) {
286 onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
287 } else if (message instanceof FlipShardMembersVotingStatus) {
288 onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
289 } else if (message instanceof SaveSnapshotSuccess) {
290 onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
291 } else if (message instanceof SaveSnapshotFailure) {
292 LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
293 ((SaveSnapshotFailure) message).cause());
294 } else if (message instanceof Shutdown) {
296 } else if (message instanceof GetLocalShardIds) {
297 onGetLocalShardIds();
298 } else if (message instanceof GetShardRole) {
299 onGetShardRole((GetShardRole) message);
300 } else if (message instanceof RunnableMessage) {
301 ((RunnableMessage)message).run();
303 unknownMessage(message);
307 private void onGetShardRole(final GetShardRole message) {
308 LOG.debug("{}: onGetShardRole for shard: {}", persistenceId(), message.getName());
310 final String name = message.getName();
312 final ShardInformation shardInformation = localShards.get(name);
314 if (shardInformation == null) {
315 LOG.info("{}: no shard information for {} found", persistenceId(), name);
316 getSender().tell(new Status.Failure(
317 new IllegalArgumentException("Shard with name " + name + " not present.")), ActorRef.noSender());
321 getSender().tell(new GetShardRoleReply(shardInformation.getRole()), ActorRef.noSender());
324 private void onInitConfigListener() {
325 LOG.debug("{}: Initializing config listener on {}", persistenceId(), cluster.getCurrentMemberName());
327 final org.opendaylight.mdsal.common.api.LogicalDatastoreType type =
328 org.opendaylight.mdsal.common.api.LogicalDatastoreType
329 .valueOf(datastoreContextFactory.getBaseDatastoreContext().getLogicalStoreType().name());
331 if (configUpdateHandler != null) {
332 configUpdateHandler.close();
335 configUpdateHandler = new PrefixedShardConfigUpdateHandler(self(), cluster.getCurrentMemberName());
336 configUpdateHandler.initListener(dataStore, type);
339 private void onShutDown() {
340 List<Future<Boolean>> stopFutures = new ArrayList<>(localShards.size());
341 for (ShardInformation info : localShards.values()) {
342 if (info.getActor() != null) {
343 LOG.debug("{}: Issuing gracefulStop to shard {}", persistenceId(), info.getShardId());
345 FiniteDuration duration = info.getDatastoreContext().getShardRaftConfig()
346 .getElectionTimeOutInterval().$times(2);
347 stopFutures.add(Patterns.gracefulStop(info.getActor(), duration, Shutdown.INSTANCE));
351 LOG.info("Shutting down ShardManager {} - waiting on {} shards", persistenceId(), stopFutures.size());
353 ExecutionContext dispatcher = new Dispatchers(context().system().dispatchers())
354 .getDispatcher(Dispatchers.DispatcherType.Client);
355 Future<Iterable<Boolean>> combinedFutures = Futures.sequence(stopFutures, dispatcher);
357 combinedFutures.onComplete(new OnComplete<Iterable<Boolean>>() {
359 public void onComplete(final Throwable failure, final Iterable<Boolean> results) {
360 LOG.debug("{}: All shards shutdown - sending PoisonPill to self", persistenceId());
362 self().tell(PoisonPill.getInstance(), self());
364 if (failure != null) {
365 LOG.warn("{}: An error occurred attempting to shut down the shards", persistenceId(), failure);
368 for (Boolean result : results) {
375 LOG.warn("{}: {} shards did not shut down gracefully", persistenceId(), nfailed);
382 private void onWrappedShardResponse(final WrappedShardResponse message) {
383 if (message.getResponse() instanceof RemoveServerReply) {
384 onRemoveServerReply(getSender(), message.getShardId(), (RemoveServerReply) message.getResponse(),
385 message.getLeaderPath());
389 private void onRemoveServerReply(final ActorRef originalSender, final ShardIdentifier shardId,
390 final RemoveServerReply replyMsg, final String leaderPath) {
391 shardReplicaOperationsInProgress.remove(shardId.getShardName());
393 LOG.debug("{}: Received {} for shard {}", persistenceId(), replyMsg, shardId.getShardName());
395 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
396 LOG.debug("{}: Leader shard successfully removed the replica shard {}", persistenceId(),
397 shardId.getShardName());
398 originalSender.tell(new Status.Success(null), getSelf());
400 LOG.warn("{}: Leader failed to remove shard replica {} with status {}",
401 persistenceId(), shardId, replyMsg.getStatus());
403 Exception failure = getServerChangeException(RemoveServer.class, replyMsg.getStatus(), leaderPath, shardId);
404 originalSender.tell(new Status.Failure(failure), getSelf());
408 private void removePrefixShardReplica(final RemovePrefixShardReplica contextMessage, final String shardName,
409 final String primaryPath, final ActorRef sender) {
410 if (isShardReplicaOperationInProgress(shardName, sender)) {
414 shardReplicaOperationsInProgress.add(shardName);
416 final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
418 final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
420 //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
421 LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
422 primaryPath, shardId);
424 Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
425 Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
426 new RemoveServer(shardId.toString()), removeServerTimeout);
428 futureObj.onComplete(new OnComplete<Object>() {
430 public void onComplete(final Throwable failure, final Object response) {
431 if (failure != null) {
432 shardReplicaOperationsInProgress.remove(shardName);
433 String msg = String.format("RemoveServer request to leader %s for shard %s failed",
434 primaryPath, shardName);
436 LOG.debug("{}: {}", persistenceId(), msg, failure);
439 sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
442 self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
445 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
448 private void removeShardReplica(final RemoveShardReplica contextMessage, final String shardName,
449 final String primaryPath, final ActorRef sender) {
450 if (isShardReplicaOperationInProgress(shardName, sender)) {
454 shardReplicaOperationsInProgress.add(shardName);
456 final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
458 final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
460 //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
461 LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
462 primaryPath, shardId);
464 Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
465 Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
466 new RemoveServer(shardId.toString()), removeServerTimeout);
468 futureObj.onComplete(new OnComplete<Object>() {
470 public void onComplete(final Throwable failure, final Object response) {
471 if (failure != null) {
472 shardReplicaOperationsInProgress.remove(shardName);
473 String msg = String.format("RemoveServer request to leader %s for shard %s failed",
474 primaryPath, shardName);
476 LOG.debug("{}: {}", persistenceId(), msg, failure);
479 sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
482 self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
485 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
488 private void onShardReplicaRemoved(final ServerRemoved message) {
489 removeShard(new ShardIdentifier.Builder().fromShardIdString(message.getServerId()).build());
492 @SuppressWarnings("checkstyle:IllegalCatch")
493 private void removeShard(final ShardIdentifier shardId) {
494 final String shardName = shardId.getShardName();
495 final ShardInformation shardInformation = localShards.remove(shardName);
496 if (shardInformation == null) {
497 LOG.debug("{} : Shard replica {} is not present in list", persistenceId(), shardId.toString());
501 final ActorRef shardActor = shardInformation.getActor();
502 if (shardActor != null) {
503 long timeoutInMS = Math.max(shardInformation.getDatastoreContext().getShardRaftConfig()
504 .getElectionTimeOutInterval().$times(3).toMillis(), 10000);
506 LOG.debug("{} : Sending Shutdown to Shard actor {} with {} ms timeout", persistenceId(), shardActor,
509 final Future<Boolean> stopFuture = Patterns.gracefulStop(shardActor,
510 FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS), Shutdown.INSTANCE);
512 final CompositeOnComplete<Boolean> onComplete = new CompositeOnComplete<Boolean>() {
514 public void onComplete(final Throwable failure, final Boolean result) {
515 if (failure == null) {
516 LOG.debug("{} : Successfully shut down Shard actor {}", persistenceId(), shardActor);
518 LOG.warn("{}: Failed to shut down Shard actor {}", persistenceId(), shardActor, failure);
521 self().tell((RunnableMessage) () -> {
522 // At any rate, invalidate primaryShardInfo cache
523 primaryShardInfoCache.remove(shardName);
525 shardActorsStopping.remove(shardName);
526 notifyOnCompleteTasks(failure, result);
527 }, ActorRef.noSender());
531 shardActorsStopping.put(shardName, onComplete);
532 stopFuture.onComplete(onComplete, new Dispatchers(context().system().dispatchers())
533 .getDispatcher(Dispatchers.DispatcherType.Client));
536 LOG.debug("{} : Local Shard replica for shard {} has been removed", persistenceId(), shardName);
540 private void onGetSnapshot() {
541 LOG.debug("{}: onGetSnapshot", persistenceId());
543 List<String> notInitialized = null;
544 for (ShardInformation shardInfo : localShards.values()) {
545 if (!shardInfo.isShardInitialized()) {
546 if (notInitialized == null) {
547 notInitialized = new ArrayList<>();
550 notInitialized.add(shardInfo.getShardName());
554 if (notInitialized != null) {
555 getSender().tell(new Status.Failure(new IllegalStateException(String.format(
556 "%d shard(s) %s are not initialized", notInitialized.size(), notInitialized))), getSelf());
560 ActorRef replyActor = getContext().actorOf(ShardManagerGetSnapshotReplyActor.props(
561 new ArrayList<>(localShards.keySet()), type, currentSnapshot , getSender(), persistenceId(),
562 datastoreContextFactory.getBaseDatastoreContext().getShardInitializationTimeout().duration()));
564 for (ShardInformation shardInfo: localShards.values()) {
565 shardInfo.getActor().tell(GetSnapshot.INSTANCE, replyActor);
569 @SuppressWarnings("checkstyle:IllegalCatch")
570 private void onCreateShard(final CreateShard createShard) {
571 LOG.debug("{}: onCreateShard: {}", persistenceId(), createShard);
575 String shardName = createShard.getModuleShardConfig().getShardName();
576 if (localShards.containsKey(shardName)) {
577 LOG.debug("{}: Shard {} already exists", persistenceId(), shardName);
578 reply = new Status.Success(String.format("Shard with name %s already exists", shardName));
580 doCreateShard(createShard);
581 reply = new Status.Success(null);
583 } catch (Exception e) {
584 LOG.error("{}: onCreateShard failed", persistenceId(), e);
585 reply = new Status.Failure(e);
588 if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
589 getSender().tell(reply, getSelf());
593 private void onPrefixShardCreated(final PrefixShardCreated message) {
594 LOG.debug("{}: onPrefixShardCreated: {}", persistenceId(), message);
596 final PrefixShardConfiguration config = message.getConfiguration();
597 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
598 ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier()));
599 final String shardName = shardId.getShardName();
601 if (isPreviousShardActorStopInProgress(shardName, message)) {
605 if (localShards.containsKey(shardName)) {
606 LOG.debug("{}: Received create for an already existing shard {}", persistenceId(), shardName);
607 final PrefixShardConfiguration existing =
608 configuration.getAllPrefixShardConfigurations().get(config.getPrefix());
610 if (existing != null && existing.equals(config)) {
611 // we don't have to do nothing here
616 doCreatePrefixShard(config, shardId, shardName);
619 private boolean isPreviousShardActorStopInProgress(final String shardName, final Object messageToDefer) {
620 final CompositeOnComplete<Boolean> stopOnComplete = shardActorsStopping.get(shardName);
621 if (stopOnComplete == null) {
625 LOG.debug("{} : Stop is in progress for shard {} - adding OnComplete callback to defer {}", persistenceId(),
626 shardName, messageToDefer);
627 final ActorRef sender = getSender();
628 stopOnComplete.addOnComplete(new OnComplete<Boolean>() {
630 public void onComplete(final Throwable failure, final Boolean result) {
631 LOG.debug("{} : Stop complete for shard {} - re-queing {}", persistenceId(), shardName, messageToDefer);
632 self().tell(messageToDefer, sender);
639 private void doCreatePrefixShard(final PrefixShardConfiguration config, final ShardIdentifier shardId,
640 final String shardName) {
641 configuration.addPrefixShardConfiguration(config);
643 final Builder builder = newShardDatastoreContextBuilder(shardName);
644 builder.logicalStoreType(LogicalDatastoreType.valueOf(config.getPrefix().getDatastoreType().name()))
645 .storeRoot(config.getPrefix().getRootIdentifier());
646 DatastoreContext shardDatastoreContext = builder.build();
648 final Map<String, String> peerAddresses = getPeerAddresses(shardName);
649 final boolean isActiveMember = true;
651 LOG.debug("{} doCreatePrefixShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
652 persistenceId(), shardId, config.getShardMemberNames(), peerAddresses, isActiveMember);
654 final ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
655 shardDatastoreContext, Shard.builder(), peerAddressResolver);
656 info.setActiveMember(isActiveMember);
657 localShards.put(info.getShardName(), info);
659 if (schemaContext != null) {
660 info.setSchemaContext(schemaContext);
661 info.setActor(newShardActor(info));
665 private void onPrefixShardRemoved(final PrefixShardRemoved message) {
666 LOG.debug("{}: onPrefixShardRemoved : {}", persistenceId(), message);
668 final DOMDataTreeIdentifier prefix = message.getPrefix();
669 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
670 ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
672 configuration.removePrefixShardConfiguration(prefix);
673 removeShard(shardId);
676 private void doCreateShard(final CreateShard createShard) {
677 final ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
678 final String shardName = moduleShardConfig.getShardName();
680 configuration.addModuleShardConfiguration(moduleShardConfig);
682 DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
683 if (shardDatastoreContext == null) {
684 shardDatastoreContext = newShardDatastoreContext(shardName);
686 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
687 peerAddressResolver).build();
690 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
692 boolean shardWasInRecoveredSnapshot = currentSnapshot != null
693 && currentSnapshot.getShardList().contains(shardName);
695 Map<String, String> peerAddresses;
696 boolean isActiveMember;
697 if (shardWasInRecoveredSnapshot || configuration.getMembersFromShardName(shardName)
698 .contains(cluster.getCurrentMemberName())) {
699 peerAddresses = getPeerAddresses(shardName);
700 isActiveMember = true;
702 // The local member is not in the static shard member configuration and the shard did not
703 // previously exist (ie !shardWasInRecoveredSnapshot). In this case we'll create
704 // the shard with no peers and with elections disabled so it stays as follower. A
705 // subsequent AddServer request will be needed to make it an active member.
706 isActiveMember = false;
707 peerAddresses = Collections.emptyMap();
708 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
709 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
712 LOG.debug("{} doCreateShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
713 persistenceId(), shardId, moduleShardConfig.getShardMemberNames(), peerAddresses,
716 ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
717 shardDatastoreContext, createShard.getShardBuilder(), peerAddressResolver);
718 info.setActiveMember(isActiveMember);
719 localShards.put(info.getShardName(), info);
721 if (schemaContext != null) {
722 info.setSchemaContext(schemaContext);
723 info.setActor(newShardActor(info));
727 private DatastoreContext.Builder newShardDatastoreContextBuilder(final String shardName) {
728 return DatastoreContext.newBuilderFrom(datastoreContextFactory.getShardDatastoreContext(shardName))
729 .shardPeerAddressResolver(peerAddressResolver);
732 private DatastoreContext newShardDatastoreContext(final String shardName) {
733 return newShardDatastoreContextBuilder(shardName).build();
736 private void checkReady() {
737 if (isReadyWithLeaderId()) {
738 LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
739 persistenceId(), type, waitTillReadyCountdownLatch.getCount());
741 waitTillReadyCountdownLatch.countDown();
745 private void onLeaderStateChanged(final ShardLeaderStateChanged leaderStateChanged) {
746 LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
748 ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
749 if (shardInformation != null) {
750 shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
751 shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
752 if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
753 primaryShardInfoCache.remove(shardInformation.getShardName());
758 LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
762 private void onShardNotInitializedTimeout(final ShardNotInitializedTimeout message) {
763 ShardInformation shardInfo = message.getShardInfo();
765 LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
766 shardInfo.getShardName());
768 shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
770 if (!shardInfo.isShardInitialized()) {
771 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
772 message.getSender().tell(createNotInitializedException(shardInfo.getShardId()), getSelf());
774 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
775 message.getSender().tell(createNoShardLeaderException(shardInfo.getShardId()), getSelf());
779 private void onFollowerInitialSyncStatus(final FollowerInitialSyncUpStatus status) {
780 LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
781 status.getName(), status.isInitialSyncDone());
783 ShardInformation shardInformation = findShardInformation(status.getName());
785 if (shardInformation != null) {
786 shardInformation.setFollowerSyncStatus(status.isInitialSyncDone());
788 shardManagerMBean.setSyncStatus(isInSync());
793 private void onRoleChangeNotification(final RoleChangeNotification roleChanged) {
794 LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
795 roleChanged.getOldRole(), roleChanged.getNewRole());
797 ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
798 if (shardInformation != null) {
799 shardInformation.setRole(roleChanged.getNewRole());
801 shardManagerMBean.setSyncStatus(isInSync());
806 private ShardInformation findShardInformation(final String memberId) {
807 for (ShardInformation info : localShards.values()) {
808 if (info.getShardId().toString().equals(memberId)) {
816 private boolean isReadyWithLeaderId() {
817 boolean isReady = true;
818 for (ShardInformation info : localShards.values()) {
819 if (!info.isShardReadyWithLeaderId()) {
827 private boolean isInSync() {
828 for (ShardInformation info : localShards.values()) {
829 if (!info.isInSync()) {
836 private void onActorInitialized(final Object message) {
837 final ActorRef sender = getSender();
839 if (sender == null) {
840 return; //why is a non-actor sending this message? Just ignore.
843 String actorName = sender.path().name();
844 //find shard name from actor name; actor name is stringified shardId
846 final ShardIdentifier shardId;
848 shardId = ShardIdentifier.fromShardIdString(actorName);
849 } catch (IllegalArgumentException e) {
850 LOG.debug("{}: ignoring actor {}", actorName, e);
854 markShardAsInitialized(shardId.getShardName());
857 private void markShardAsInitialized(final String shardName) {
858 LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
860 ShardInformation shardInformation = localShards.get(shardName);
861 if (shardInformation != null) {
862 shardInformation.setActorInitialized();
864 shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
869 protected void handleRecover(final Object message) throws Exception {
870 if (message instanceof RecoveryCompleted) {
871 onRecoveryCompleted();
872 } else if (message instanceof SnapshotOffer) {
873 applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
877 @SuppressWarnings("checkstyle:IllegalCatch")
878 private void onRecoveryCompleted() {
879 LOG.info("Recovery complete : {}", persistenceId());
881 // We no longer persist SchemaContext modules so delete all the prior messages from the akka
882 // journal on upgrade from Helium.
883 deleteMessages(lastSequenceNr());
885 if (currentSnapshot == null && restoreFromSnapshot != null
886 && restoreFromSnapshot.getShardManagerSnapshot() != null) {
887 ShardManagerSnapshot snapshot = restoreFromSnapshot.getShardManagerSnapshot();
889 LOG.debug("{}: Restoring from ShardManagerSnapshot: {}", persistenceId(), snapshot);
891 applyShardManagerSnapshot(snapshot);
897 private void sendResponse(final ShardInformation shardInformation, final boolean doWait,
898 final boolean wantShardReady, final Supplier<Object> messageSupplier) {
899 if (!shardInformation.isShardInitialized() || wantShardReady && !shardInformation.isShardReadyWithLeaderId()) {
901 final ActorRef sender = getSender();
902 final ActorRef self = self();
904 Runnable replyRunnable = () -> sender.tell(messageSupplier.get(), self);
906 OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
907 new OnShardInitialized(replyRunnable);
909 shardInformation.addOnShardInitialized(onShardInitialized);
911 FiniteDuration timeout = shardInformation.getDatastoreContext()
912 .getShardInitializationTimeout().duration();
913 if (shardInformation.isShardInitialized()) {
914 // If the shard is already initialized then we'll wait enough time for the shard to
915 // elect a leader, ie 2 times the election timeout.
916 timeout = FiniteDuration.create(shardInformation.getDatastoreContext().getShardRaftConfig()
917 .getElectionTimeOutInterval().toMillis() * 2, TimeUnit.MILLISECONDS);
920 LOG.debug("{}: Scheduling {} ms timer to wait for shard {}", persistenceId(), timeout.toMillis(),
921 shardInformation.getShardName());
923 Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
925 new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
926 getContext().dispatcher(), getSelf());
928 onShardInitialized.setTimeoutSchedule(timeoutSchedule);
930 } else if (!shardInformation.isShardInitialized()) {
931 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
932 shardInformation.getShardName());
933 getSender().tell(createNotInitializedException(shardInformation.getShardId()), getSelf());
935 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
936 shardInformation.getShardName());
937 getSender().tell(createNoShardLeaderException(shardInformation.getShardId()), getSelf());
943 getSender().tell(messageSupplier.get(), getSelf());
946 private static NoShardLeaderException createNoShardLeaderException(final ShardIdentifier shardId) {
947 return new NoShardLeaderException(null, shardId.toString());
950 private static NotInitializedException createNotInitializedException(final ShardIdentifier shardId) {
951 return new NotInitializedException(String.format(
952 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
956 static MemberName memberToName(final Member member) {
957 return MemberName.forName(member.roles().iterator().next());
960 private void memberRemoved(final ClusterEvent.MemberRemoved message) {
961 MemberName memberName = memberToName(message.member());
963 LOG.info("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
964 message.member().address());
966 peerAddressResolver.removePeerAddress(memberName);
968 for (ShardInformation info : localShards.values()) {
969 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
973 private void memberExited(final ClusterEvent.MemberExited message) {
974 MemberName memberName = memberToName(message.member());
976 LOG.info("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
977 message.member().address());
979 peerAddressResolver.removePeerAddress(memberName);
981 for (ShardInformation info : localShards.values()) {
982 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
986 private void memberUp(final ClusterEvent.MemberUp message) {
987 MemberName memberName = memberToName(message.member());
989 LOG.info("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
990 message.member().address());
992 memberUp(memberName, message.member().address());
995 private void memberUp(final MemberName memberName, final Address address) {
996 addPeerAddress(memberName, address);
1000 private void memberWeaklyUp(final MemberWeaklyUp message) {
1001 MemberName memberName = memberToName(message.member());
1003 LOG.info("{}: Received MemberWeaklyUp: memberName: {}, address: {}", persistenceId(), memberName,
1004 message.member().address());
1006 memberUp(memberName, message.member().address());
1009 private void addPeerAddress(final MemberName memberName, final Address address) {
1010 peerAddressResolver.addPeerAddress(memberName, address);
1012 for (ShardInformation info : localShards.values()) {
1013 String shardName = info.getShardName();
1014 String peerId = getShardIdentifier(memberName, shardName).toString();
1015 info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
1017 info.peerUp(memberName, peerId, getSelf());
1021 private void memberReachable(final ClusterEvent.ReachableMember message) {
1022 MemberName memberName = memberToName(message.member());
1023 LOG.info("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
1025 addPeerAddress(memberName, message.member().address());
1027 markMemberAvailable(memberName);
1030 private void memberUnreachable(final ClusterEvent.UnreachableMember message) {
1031 MemberName memberName = memberToName(message.member());
1032 LOG.info("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
1034 markMemberUnavailable(memberName);
1037 private void markMemberUnavailable(final MemberName memberName) {
1038 final String memberStr = memberName.getName();
1039 for (ShardInformation info : localShards.values()) {
1040 String leaderId = info.getLeaderId();
1041 // XXX: why are we using String#contains() here?
1042 if (leaderId != null && leaderId.contains(memberStr)) {
1043 LOG.debug("Marking Leader {} as unavailable.", leaderId);
1044 info.setLeaderAvailable(false);
1046 primaryShardInfoCache.remove(info.getShardName());
1049 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
1053 private void markMemberAvailable(final MemberName memberName) {
1054 final String memberStr = memberName.getName();
1055 for (ShardInformation info : localShards.values()) {
1056 String leaderId = info.getLeaderId();
1057 // XXX: why are we using String#contains() here?
1058 if (leaderId != null && leaderId.contains(memberStr)) {
1059 LOG.debug("Marking Leader {} as available.", leaderId);
1060 info.setLeaderAvailable(true);
1063 info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
1067 private void onDatastoreContextFactory(final DatastoreContextFactory factory) {
1068 datastoreContextFactory = factory;
1069 for (ShardInformation info : localShards.values()) {
1070 info.setDatastoreContext(newShardDatastoreContext(info.getShardName()), getSelf());
1074 private void onGetLocalShardIds() {
1075 final List<String> response = new ArrayList<>(localShards.size());
1077 for (ShardInformation info : localShards.values()) {
1078 response.add(info.getShardId().toString());
1081 getSender().tell(new Status.Success(response), getSelf());
1084 private void onSwitchShardBehavior(final SwitchShardBehavior message) {
1085 final ShardIdentifier identifier = message.getShardId();
1087 if (identifier != null) {
1088 final ShardInformation info = localShards.get(identifier.getShardName());
1090 getSender().tell(new Status.Failure(
1091 new IllegalArgumentException("Shard " + identifier + " is not local")), getSelf());
1095 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
1097 for (ShardInformation info : localShards.values()) {
1098 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
1102 getSender().tell(new Status.Success(null), getSelf());
1105 private void switchShardBehavior(final ShardInformation info, final SwitchBehavior switchBehavior) {
1106 final ActorRef actor = info.getActor();
1107 if (actor != null) {
1108 actor.tell(switchBehavior, getSelf());
1110 LOG.warn("Could not switch the behavior of shard {} to {} - shard is not yet available",
1111 info.getShardName(), switchBehavior.getNewState());
1116 * Notifies all the local shards of a change in the schema context.
1118 * @param message the message to send
1120 private void updateSchemaContext(final Object message) {
1121 schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
1123 LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getAllModuleIdentifiers().size());
1125 for (ShardInformation info : localShards.values()) {
1126 info.setSchemaContext(schemaContext);
1128 if (info.getActor() == null) {
1129 LOG.debug("Creating Shard {}", info.getShardId());
1130 info.setActor(newShardActor(info));
1132 info.getActor().tell(message, getSelf());
1138 protected ClusterWrapper getCluster() {
1143 protected ActorRef newShardActor(final ShardInformation info) {
1144 return getContext().actorOf(info.newProps().withDispatcher(shardDispatcherPath),
1145 info.getShardId().toString());
1148 private void findPrimary(final FindPrimary message) {
1149 LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
1151 final String shardName = message.getShardName();
1152 final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
1154 // First see if the there is a local replica for the shard
1155 final ShardInformation info = localShards.get(shardName);
1156 if (info != null && info.isActiveMember()) {
1157 sendResponse(info, message.isWaitUntilReady(), true, () -> {
1158 String primaryPath = info.getSerializedLeaderActor();
1159 Object found = canReturnLocalShardState && info.isLeader()
1160 ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
1161 new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
1163 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
1170 final Collection<String> visitedAddresses;
1171 if (message instanceof RemoteFindPrimary) {
1172 visitedAddresses = ((RemoteFindPrimary)message).getVisitedAddresses();
1174 visitedAddresses = new ArrayList<>(1);
1177 visitedAddresses.add(peerAddressResolver.getShardManagerActorPathBuilder(cluster.getSelfAddress()).toString());
1179 for (String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
1180 if (visitedAddresses.contains(address)) {
1184 LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}, visitedAddresses: {}",
1185 persistenceId(), shardName, address, visitedAddresses);
1187 getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
1188 message.isWaitUntilReady(), visitedAddresses), getContext());
1192 LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
1194 getSender().tell(new PrimaryNotFoundException(
1195 String.format("No primary shard found for %s.", shardName)), getSelf());
1198 private void findPrimary(final String shardName, final FindPrimaryResponseHandler handler) {
1199 Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1200 .getShardInitializationTimeout().duration().$times(2));
1202 Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
1203 futureObj.onComplete(new OnComplete<Object>() {
1205 public void onComplete(final Throwable failure, final Object response) {
1206 if (failure != null) {
1207 handler.onFailure(failure);
1209 if (response instanceof RemotePrimaryShardFound) {
1210 handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
1211 } else if (response instanceof LocalPrimaryShardFound) {
1212 handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
1214 handler.onUnknownResponse(response);
1218 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1222 * Construct the name of the shard actor given the name of the member on
1223 * which the shard resides and the name of the shard.
1225 * @param memberName the member name
1226 * @param shardName the shard name
1229 private ShardIdentifier getShardIdentifier(final MemberName memberName, final String shardName) {
1230 return peerAddressResolver.getShardIdentifier(memberName, shardName);
1234 * Create shards that are local to the member on which the ShardManager runs.
1236 private void createLocalShards() {
1237 MemberName memberName = this.cluster.getCurrentMemberName();
1238 Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
1240 Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
1241 if (restoreFromSnapshot != null) {
1242 for (DatastoreSnapshot.ShardSnapshot snapshot: restoreFromSnapshot.getShardSnapshots()) {
1243 shardSnapshots.put(snapshot.getName(), snapshot);
1247 restoreFromSnapshot = null; // null out to GC
1249 for (String shardName : memberShardNames) {
1250 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1252 LOG.debug("{}: Creating local shard: {}", persistenceId(), shardId);
1254 Map<String, String> peerAddresses = getPeerAddresses(shardName);
1255 localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses,
1256 newShardDatastoreContext(shardName), Shard.builder().restoreFromSnapshot(
1257 shardSnapshots.get(shardName)), peerAddressResolver));
1262 * Given the name of the shard find the addresses of all it's peers.
1264 * @param shardName the shard name
1266 private Map<String, String> getPeerAddresses(final String shardName) {
1267 final Collection<MemberName> members = configuration.getMembersFromShardName(shardName);
1268 return getPeerAddresses(shardName, members);
1271 private Map<String, String> getPeerAddresses(final String shardName, final Collection<MemberName> members) {
1272 Map<String, String> peerAddresses = new HashMap<>();
1273 MemberName currentMemberName = this.cluster.getCurrentMemberName();
1275 for (MemberName memberName : members) {
1276 if (!currentMemberName.equals(memberName)) {
1277 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1278 String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
1279 peerAddresses.put(shardId.toString(), address);
1282 return peerAddresses;
1286 public SupervisorStrategy supervisorStrategy() {
1288 return new OneForOneStrategy(10, Duration.create("1 minute"),
1289 (Function<Throwable, Directive>) t -> {
1290 LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
1291 return SupervisorStrategy.resume();
1296 public String persistenceId() {
1297 return persistenceId;
1301 ShardManagerInfoMBean getMBean() {
1302 return shardManagerMBean;
1305 private boolean isShardReplicaOperationInProgress(final String shardName, final ActorRef sender) {
1306 if (shardReplicaOperationsInProgress.contains(shardName)) {
1307 String msg = String.format("A shard replica operation for %s is already in progress", shardName);
1308 LOG.debug("{}: {}", persistenceId(), msg);
1309 sender.tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1316 private void onAddPrefixShardReplica(final AddPrefixShardReplica message) {
1317 LOG.debug("{}: onAddPrefixShardReplica: {}", persistenceId(), message);
1319 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
1320 ClusterUtils.getCleanShardName(message.getShardPrefix()));
1321 final String shardName = shardId.getShardName();
1323 // Create the localShard
1324 if (schemaContext == null) {
1325 String msg = String.format(
1326 "No SchemaContext is available in order to create a local shard instance for %s", shardName);
1327 LOG.debug("{}: {}", persistenceId(), msg);
1328 getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1332 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
1335 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1336 final RunnableMessage runnable = (RunnableMessage) () -> addPrefixShard(getShardName(),
1337 message.getShardPrefix(), response, getSender());
1338 if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
1339 getSelf().tell(runnable, getTargetActor());
1344 public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
1345 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1350 private void onAddShardReplica(final AddShardReplica shardReplicaMsg) {
1351 final String shardName = shardReplicaMsg.getShardName();
1353 LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
1355 // verify the shard with the specified name is present in the cluster configuration
1356 if (!this.configuration.isShardConfigured(shardName)) {
1357 String msg = String.format("No module configuration exists for shard %s", shardName);
1358 LOG.debug("{}: {}", persistenceId(), msg);
1359 getSender().tell(new Status.Failure(new IllegalArgumentException(msg)), getSelf());
1363 // Create the localShard
1364 if (schemaContext == null) {
1365 String msg = String.format(
1366 "No SchemaContext is available in order to create a local shard instance for %s", shardName);
1367 LOG.debug("{}: {}", persistenceId(), msg);
1368 getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1372 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
1375 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1376 final RunnableMessage runnable = (RunnableMessage) () ->
1377 addShard(getShardName(), response, getSender());
1378 if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
1379 getSelf().tell(runnable, getTargetActor());
1384 public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
1385 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1390 private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
1391 String msg = String.format("Local shard %s already exists", shardName);
1392 LOG.debug("{}: {}", persistenceId(), msg);
1393 sender.tell(new Status.Failure(new AlreadyExistsException(msg)), getSelf());
1396 private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
1397 final RemotePrimaryShardFound response, final ActorRef sender) {
1398 if (isShardReplicaOperationInProgress(shardName, sender)) {
1402 shardReplicaOperationsInProgress.add(shardName);
1404 final ShardInformation shardInfo;
1405 final boolean removeShardOnFailure;
1406 ShardInformation existingShardInfo = localShards.get(shardName);
1407 if (existingShardInfo == null) {
1408 removeShardOnFailure = true;
1409 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1411 final Builder builder = newShardDatastoreContextBuilder(shardName);
1412 builder.storeRoot(shardPrefix).customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
1414 DatastoreContext datastoreContext = builder.build();
1416 shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
1417 Shard.builder(), peerAddressResolver);
1418 shardInfo.setActiveMember(false);
1419 shardInfo.setSchemaContext(schemaContext);
1420 localShards.put(shardName, shardInfo);
1421 shardInfo.setActor(newShardActor(shardInfo));
1423 removeShardOnFailure = false;
1424 shardInfo = existingShardInfo;
1427 execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
1430 private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
1431 if (isShardReplicaOperationInProgress(shardName, sender)) {
1435 shardReplicaOperationsInProgress.add(shardName);
1437 final ShardInformation shardInfo;
1438 final boolean removeShardOnFailure;
1439 ShardInformation existingShardInfo = localShards.get(shardName);
1440 if (existingShardInfo == null) {
1441 removeShardOnFailure = true;
1442 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1444 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName)
1445 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
1447 shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
1448 Shard.builder(), peerAddressResolver);
1449 shardInfo.setActiveMember(false);
1450 shardInfo.setSchemaContext(schemaContext);
1451 localShards.put(shardName, shardInfo);
1452 shardInfo.setActor(newShardActor(shardInfo));
1454 removeShardOnFailure = false;
1455 shardInfo = existingShardInfo;
1458 execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
1461 private void execAddShard(final String shardName,
1462 final ShardInformation shardInfo,
1463 final RemotePrimaryShardFound response,
1464 final boolean removeShardOnFailure,
1465 final ActorRef sender) {
1467 final String localShardAddress =
1468 peerAddressResolver.getShardActorAddress(shardName, cluster.getCurrentMemberName());
1470 //inform ShardLeader to add this shard as a replica by sending an AddServer message
1471 LOG.debug("{}: Sending AddServer message to peer {} for shard {}", persistenceId(),
1472 response.getPrimaryPath(), shardInfo.getShardId());
1474 final Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
1475 .getShardLeaderElectionTimeout().duration());
1476 final Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
1477 new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
1479 futureObj.onComplete(new OnComplete<Object>() {
1481 public void onComplete(final Throwable failure, final Object addServerResponse) {
1482 if (failure != null) {
1483 LOG.debug("{}: AddServer request to {} for {} failed", persistenceId(),
1484 response.getPrimaryPath(), shardName, failure);
1486 final String msg = String.format("AddServer request to leader %s for shard %s failed",
1487 response.getPrimaryPath(), shardName);
1488 self().tell(new ForwardedAddServerFailure(shardName, msg, failure, removeShardOnFailure), sender);
1490 self().tell(new ForwardedAddServerReply(shardInfo, (AddServerReply)addServerResponse,
1491 response.getPrimaryPath(), removeShardOnFailure), sender);
1494 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1497 private void onAddServerFailure(final String shardName, final String message, final Throwable failure,
1498 final ActorRef sender, final boolean removeShardOnFailure) {
1499 shardReplicaOperationsInProgress.remove(shardName);
1501 if (removeShardOnFailure) {
1502 ShardInformation shardInfo = localShards.remove(shardName);
1503 if (shardInfo.getActor() != null) {
1504 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
1508 sender.tell(new Status.Failure(message == null ? failure :
1509 new RuntimeException(message, failure)), getSelf());
1512 private void onAddServerReply(final ShardInformation shardInfo, final AddServerReply replyMsg,
1513 final ActorRef sender, final String leaderPath, final boolean removeShardOnFailure) {
1514 String shardName = shardInfo.getShardName();
1515 shardReplicaOperationsInProgress.remove(shardName);
1517 LOG.debug("{}: Received {} for shard {} from leader {}", persistenceId(), replyMsg, shardName, leaderPath);
1519 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1520 LOG.debug("{}: Leader shard successfully added the replica shard {}", persistenceId(), shardName);
1522 // Make the local shard voting capable
1523 shardInfo.setDatastoreContext(newShardDatastoreContext(shardName), getSelf());
1524 shardInfo.setActiveMember(true);
1527 sender.tell(new Status.Success(null), getSelf());
1528 } else if (replyMsg.getStatus() == ServerChangeStatus.ALREADY_EXISTS) {
1529 sendLocalReplicaAlreadyExistsReply(shardName, sender);
1531 LOG.warn("{}: Leader failed to add shard replica {} with status {}",
1532 persistenceId(), shardName, replyMsg.getStatus());
1534 Exception failure = getServerChangeException(AddServer.class, replyMsg.getStatus(), leaderPath,
1535 shardInfo.getShardId());
1537 onAddServerFailure(shardName, null, failure, sender, removeShardOnFailure);
1541 private static Exception getServerChangeException(final Class<?> serverChange,
1542 final ServerChangeStatus serverChangeStatus, final String leaderPath, final ShardIdentifier shardId) {
1543 switch (serverChangeStatus) {
1545 return new TimeoutException(String.format(
1546 "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
1547 + "Possible causes - there was a problem replicating the data or shard leadership changed "
1548 + "while replicating the shard data", leaderPath, shardId.getShardName()));
1550 return createNoShardLeaderException(shardId);
1552 return new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
1553 serverChange.getSimpleName(), shardId.getShardName()));
1555 return new RuntimeException(String.format("%s request to leader %s for shard %s failed with status %s",
1556 serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
1560 private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
1561 LOG.debug("{}: onRemoveShardReplica: {}", persistenceId(), shardReplicaMsg);
1563 findPrimary(shardReplicaMsg.getShardName(), new AutoFindPrimaryFailureResponseHandler(getSender(),
1564 shardReplicaMsg.getShardName(), persistenceId(), getSelf()) {
1566 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1567 doRemoveShardReplicaAsync(response.getPrimaryPath());
1571 public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
1572 doRemoveShardReplicaAsync(response.getPrimaryPath());
1575 private void doRemoveShardReplicaAsync(final String primaryPath) {
1576 getSelf().tell((RunnableMessage) () -> removeShardReplica(shardReplicaMsg, getShardName(),
1577 primaryPath, getSender()), getTargetActor());
1582 private void onRemovePrefixShardReplica(final RemovePrefixShardReplica message) {
1583 LOG.debug("{}: onRemovePrefixShardReplica: {}", persistenceId(), message);
1585 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
1586 ClusterUtils.getCleanShardName(message.getShardPrefix()));
1587 final String shardName = shardId.getShardName();
1589 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(),
1590 shardName, persistenceId(), getSelf()) {
1592 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1593 doRemoveShardReplicaAsync(response.getPrimaryPath());
1597 public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
1598 doRemoveShardReplicaAsync(response.getPrimaryPath());
1601 private void doRemoveShardReplicaAsync(final String primaryPath) {
1602 getSelf().tell((RunnableMessage) () -> removePrefixShardReplica(message, getShardName(),
1603 primaryPath, getSender()), getTargetActor());
1608 private void persistShardList() {
1609 List<String> shardList = new ArrayList<>(localShards.keySet());
1610 for (ShardInformation shardInfo : localShards.values()) {
1611 if (!shardInfo.isActiveMember()) {
1612 shardList.remove(shardInfo.getShardName());
1615 LOG.debug("{}: persisting the shard list {}", persistenceId(), shardList);
1616 saveSnapshot(updateShardManagerSnapshot(shardList, configuration.getAllPrefixShardConfigurations()));
1619 private ShardManagerSnapshot updateShardManagerSnapshot(
1620 final List<String> shardList,
1621 final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> allPrefixShardConfigurations) {
1622 currentSnapshot = new ShardManagerSnapshot(shardList, allPrefixShardConfigurations);
1623 return currentSnapshot;
1626 private void applyShardManagerSnapshot(final ShardManagerSnapshot snapshot) {
1627 currentSnapshot = snapshot;
1629 LOG.debug("{}: onSnapshotOffer: {}", persistenceId(), currentSnapshot);
1631 final MemberName currentMember = cluster.getCurrentMemberName();
1632 Set<String> configuredShardList =
1633 new HashSet<>(configuration.getMemberShardNames(currentMember));
1634 for (String shard : currentSnapshot.getShardList()) {
1635 if (!configuredShardList.contains(shard)) {
1636 // add the current member as a replica for the shard
1637 LOG.debug("{}: adding shard {}", persistenceId(), shard);
1638 configuration.addMemberReplicaForShard(shard, currentMember);
1640 configuredShardList.remove(shard);
1643 for (String shard : configuredShardList) {
1644 // remove the member as a replica for the shard
1645 LOG.debug("{}: removing shard {}", persistenceId(), shard);
1646 configuration.removeMemberReplicaForShard(shard, currentMember);
1650 private void onSaveSnapshotSuccess(final SaveSnapshotSuccess successMessage) {
1651 LOG.debug("{} saved ShardManager snapshot successfully. Deleting the prev snapshot if available",
1653 deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), successMessage.metadata().timestamp() - 1,
1657 private void onChangeShardServersVotingStatus(final ChangeShardMembersVotingStatus changeMembersVotingStatus) {
1658 LOG.debug("{}: onChangeShardServersVotingStatus: {}", persistenceId(), changeMembersVotingStatus);
1660 String shardName = changeMembersVotingStatus.getShardName();
1661 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1662 for (Entry<String, Boolean> e: changeMembersVotingStatus.getMeberVotingStatusMap().entrySet()) {
1663 serverVotingStatusMap.put(getShardIdentifier(MemberName.forName(e.getKey()), shardName).toString(),
1667 ChangeServersVotingStatus changeServersVotingStatus = new ChangeServersVotingStatus(serverVotingStatusMap);
1669 findLocalShard(shardName, getSender(),
1670 localShardFound -> changeShardMembersVotingStatus(changeServersVotingStatus, shardName,
1671 localShardFound.getPath(), getSender()));
1674 private void onFlipShardMembersVotingStatus(final FlipShardMembersVotingStatus flipMembersVotingStatus) {
1675 LOG.debug("{}: onFlipShardMembersVotingStatus: {}", persistenceId(), flipMembersVotingStatus);
1677 ActorRef sender = getSender();
1678 final String shardName = flipMembersVotingStatus.getShardName();
1679 findLocalShard(shardName, sender, localShardFound -> {
1680 Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
1681 Timeout.apply(30, TimeUnit.SECONDS));
1683 future.onComplete(new OnComplete<Object>() {
1685 public void onComplete(final Throwable failure, final Object response) {
1686 if (failure != null) {
1687 sender.tell(new Status.Failure(new RuntimeException(
1688 String.format("Failed to access local shard %s", shardName), failure)), self());
1692 OnDemandRaftState raftState = (OnDemandRaftState) response;
1693 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1694 for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
1695 serverVotingStatusMap.put(e.getKey(), !e.getValue());
1698 serverVotingStatusMap.put(getShardIdentifier(cluster.getCurrentMemberName(), shardName)
1699 .toString(), !raftState.isVoting());
1701 changeShardMembersVotingStatus(new ChangeServersVotingStatus(serverVotingStatusMap),
1702 shardName, localShardFound.getPath(), sender);
1704 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1709 private void findLocalShard(final FindLocalShard message) {
1710 LOG.debug("{}: findLocalShard : {}", persistenceId(), message.getShardName());
1712 final ShardInformation shardInformation = localShards.get(message.getShardName());
1714 if (shardInformation == null) {
1715 LOG.debug("{}: Local shard {} not found - shards present: {}",
1716 persistenceId(), message.getShardName(), localShards.keySet());
1718 getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
1722 sendResponse(shardInformation, message.isWaitUntilInitialized(), false,
1723 () -> new LocalShardFound(shardInformation.getActor()));
1726 private void findLocalShard(final String shardName, final ActorRef sender,
1727 final Consumer<LocalShardFound> onLocalShardFound) {
1728 Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1729 .getShardInitializationTimeout().duration().$times(2));
1731 Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
1732 futureObj.onComplete(new OnComplete<Object>() {
1734 public void onComplete(final Throwable failure, final Object response) {
1735 if (failure != null) {
1736 LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
1738 sender.tell(new Status.Failure(new RuntimeException(
1739 String.format("Failed to find local shard %s", shardName), failure)), self());
1741 if (response instanceof LocalShardFound) {
1742 getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
1744 } else if (response instanceof LocalShardNotFound) {
1745 String msg = String.format("Local shard %s does not exist", shardName);
1746 LOG.debug("{}: {}", persistenceId, msg);
1747 sender.tell(new Status.Failure(new IllegalArgumentException(msg)), self());
1749 String msg = String.format("Failed to find local shard %s: received response: %s",
1750 shardName, response);
1751 LOG.debug("{}: {}", persistenceId, msg);
1752 sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
1753 new RuntimeException(msg)), self());
1757 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1760 private void changeShardMembersVotingStatus(final ChangeServersVotingStatus changeServersVotingStatus,
1761 final String shardName, final ActorRef shardActorRef, final ActorRef sender) {
1762 if (isShardReplicaOperationInProgress(shardName, sender)) {
1766 shardReplicaOperationsInProgress.add(shardName);
1768 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
1769 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1771 LOG.debug("{}: Sending ChangeServersVotingStatus message {} to local shard {}", persistenceId(),
1772 changeServersVotingStatus, shardActorRef.path());
1774 Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
1775 Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
1777 futureObj.onComplete(new OnComplete<Object>() {
1779 public void onComplete(final Throwable failure, final Object response) {
1780 shardReplicaOperationsInProgress.remove(shardName);
1781 if (failure != null) {
1782 String msg = String.format("ChangeServersVotingStatus request to local shard %s failed",
1783 shardActorRef.path());
1784 LOG.debug("{}: {}", persistenceId(), msg, failure);
1785 sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
1787 LOG.debug("{}: Received {} from local shard {}", persistenceId(), response, shardActorRef.path());
1789 ServerChangeReply replyMsg = (ServerChangeReply) response;
1790 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1791 LOG.debug("{}: ChangeServersVotingStatus succeeded for shard {}", persistenceId(), shardName);
1792 sender.tell(new Status.Success(null), getSelf());
1793 } else if (replyMsg.getStatus() == ServerChangeStatus.INVALID_REQUEST) {
1794 sender.tell(new Status.Failure(new IllegalArgumentException(String.format(
1795 "The requested voting state change for shard %s is invalid. At least one member "
1796 + "must be voting", shardId.getShardName()))), getSelf());
1798 LOG.warn("{}: ChangeServersVotingStatus failed for shard {} with status {}",
1799 persistenceId(), shardName, replyMsg.getStatus());
1801 Exception error = getServerChangeException(ChangeServersVotingStatus.class,
1802 replyMsg.getStatus(), shardActorRef.path().toString(), shardId);
1803 sender.tell(new Status.Failure(error), getSelf());
1807 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1810 private static final class ForwardedAddServerReply {
1811 ShardInformation shardInfo;
1812 AddServerReply addServerReply;
1814 boolean removeShardOnFailure;
1816 ForwardedAddServerReply(final ShardInformation shardInfo, final AddServerReply addServerReply,
1817 final String leaderPath, final boolean removeShardOnFailure) {
1818 this.shardInfo = shardInfo;
1819 this.addServerReply = addServerReply;
1820 this.leaderPath = leaderPath;
1821 this.removeShardOnFailure = removeShardOnFailure;
1825 private static final class ForwardedAddServerFailure {
1827 String failureMessage;
1829 boolean removeShardOnFailure;
1831 ForwardedAddServerFailure(final String shardName, final String failureMessage, final Throwable failure,
1832 final boolean removeShardOnFailure) {
1833 this.shardName = shardName;
1834 this.failureMessage = failureMessage;
1835 this.failure = failure;
1836 this.removeShardOnFailure = removeShardOnFailure;
1840 static class OnShardInitialized {
1841 private final Runnable replyRunnable;
1842 private Cancellable timeoutSchedule;
1844 OnShardInitialized(final Runnable replyRunnable) {
1845 this.replyRunnable = replyRunnable;
1848 Runnable getReplyRunnable() {
1849 return replyRunnable;
1852 Cancellable getTimeoutSchedule() {
1853 return timeoutSchedule;
1856 void setTimeoutSchedule(final Cancellable timeoutSchedule) {
1857 this.timeoutSchedule = timeoutSchedule;
1861 static class OnShardReady extends OnShardInitialized {
1862 OnShardReady(final Runnable replyRunnable) {
1863 super(replyRunnable);
1867 private interface RunnableMessage extends Runnable {
1871 * The FindPrimaryResponseHandler provides specific callback methods which are invoked when a response to the
1872 * a remote or local find primary message is processed.
1874 private interface FindPrimaryResponseHandler {
1876 * Invoked when a Failure message is received as a response.
1878 * @param failure the failure exception
1880 void onFailure(Throwable failure);
1883 * Invoked when a RemotePrimaryShardFound response is received.
1885 * @param response the response
1887 void onRemotePrimaryShardFound(RemotePrimaryShardFound response);
1890 * Invoked when a LocalPrimaryShardFound response is received.
1892 * @param response the response
1894 void onLocalPrimaryFound(LocalPrimaryShardFound response);
1897 * Invoked when an unknown response is received. This is another type of failure.
1899 * @param response the response
1901 void onUnknownResponse(Object response);
1905 * The AutoFindPrimaryFailureResponseHandler automatically processes Failure responses when finding a primary
1906 * replica and sends a wrapped Failure response to some targetActor.
1908 private abstract static class AutoFindPrimaryFailureResponseHandler implements FindPrimaryResponseHandler {
1909 private final ActorRef targetActor;
1910 private final String shardName;
1911 private final String persistenceId;
1912 private final ActorRef shardManagerActor;
1915 * Constructs an instance.
1917 * @param targetActor The actor to whom the Failure response should be sent when a FindPrimary failure occurs
1918 * @param shardName The name of the shard for which the primary replica had to be found
1919 * @param persistenceId The persistenceId for the ShardManager
1920 * @param shardManagerActor The ShardManager actor which triggered the call to FindPrimary
1922 protected AutoFindPrimaryFailureResponseHandler(final ActorRef targetActor, final String shardName,
1923 final String persistenceId, final ActorRef shardManagerActor) {
1924 this.targetActor = Preconditions.checkNotNull(targetActor);
1925 this.shardName = Preconditions.checkNotNull(shardName);
1926 this.persistenceId = Preconditions.checkNotNull(persistenceId);
1927 this.shardManagerActor = Preconditions.checkNotNull(shardManagerActor);
1930 public ActorRef getTargetActor() {
1934 public String getShardName() {
1939 public void onFailure(final Throwable failure) {
1940 LOG.debug("{}: Received failure from FindPrimary for shard {}", persistenceId, shardName, failure);
1941 targetActor.tell(new Status.Failure(new RuntimeException(
1942 String.format("Failed to find leader for shard %s", shardName), failure)), shardManagerActor);
1946 public void onUnknownResponse(final Object response) {
1947 String msg = String.format("Failed to find leader for shard %s: received response: %s",
1948 shardName, response);
1949 LOG.debug("{}: {}", persistenceId, msg);
1950 targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
1951 new RuntimeException(msg)), shardManagerActor);
1956 * The WrappedShardResponse class wraps a response from a Shard.
1958 private static final class WrappedShardResponse {
1959 private final ShardIdentifier shardId;
1960 private final Object response;
1961 private final String leaderPath;
1963 WrappedShardResponse(final ShardIdentifier shardId, final Object response, final String leaderPath) {
1964 this.shardId = shardId;
1965 this.response = response;
1966 this.leaderPath = leaderPath;
1969 ShardIdentifier getShardId() {
1973 Object getResponse() {
1977 String getLeaderPath() {
1982 private static final class ShardNotInitializedTimeout {
1983 private final ActorRef sender;
1984 private final ShardInformation shardInfo;
1985 private final OnShardInitialized onShardInitialized;
1987 ShardNotInitializedTimeout(final ShardInformation shardInfo, final OnShardInitialized onShardInitialized,
1988 final ActorRef sender) {
1989 this.sender = sender;
1990 this.shardInfo = shardInfo;
1991 this.onShardInitialized = onShardInitialized;
1994 ActorRef getSender() {
1998 ShardInformation getShardInfo() {
2002 OnShardInitialized getOnShardInitialized() {
2003 return onShardInitialized;