2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore.shardmanager;
11 import static akka.pattern.Patterns.ask;
12 import static java.util.Objects.requireNonNull;
14 import akka.actor.ActorRef;
15 import akka.actor.Address;
16 import akka.actor.Cancellable;
17 import akka.actor.OneForOneStrategy;
18 import akka.actor.PoisonPill;
19 import akka.actor.Status;
20 import akka.actor.SupervisorStrategy;
21 import akka.actor.SupervisorStrategy.Directive;
22 import akka.cluster.ClusterEvent;
23 import akka.cluster.ClusterEvent.MemberWeaklyUp;
24 import akka.cluster.Member;
25 import akka.dispatch.Futures;
26 import akka.dispatch.OnComplete;
27 import akka.japi.Function;
28 import akka.pattern.Patterns;
29 import akka.persistence.DeleteSnapshotsFailure;
30 import akka.persistence.DeleteSnapshotsSuccess;
31 import akka.persistence.RecoveryCompleted;
32 import akka.persistence.SaveSnapshotFailure;
33 import akka.persistence.SaveSnapshotSuccess;
34 import akka.persistence.SnapshotOffer;
35 import akka.persistence.SnapshotSelectionCriteria;
36 import akka.util.Timeout;
37 import com.google.common.annotations.VisibleForTesting;
38 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
39 import java.util.ArrayList;
40 import java.util.Collection;
41 import java.util.Collections;
42 import java.util.HashMap;
43 import java.util.HashSet;
44 import java.util.List;
46 import java.util.Map.Entry;
48 import java.util.concurrent.CountDownLatch;
49 import java.util.concurrent.TimeUnit;
50 import java.util.concurrent.TimeoutException;
51 import java.util.function.Consumer;
52 import java.util.function.Supplier;
53 import org.opendaylight.controller.cluster.access.concepts.MemberName;
54 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
55 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
56 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
57 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
58 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
59 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
60 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
61 import org.opendaylight.controller.cluster.datastore.Shard;
62 import org.opendaylight.controller.cluster.datastore.config.Configuration;
63 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
64 import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
65 import org.opendaylight.controller.cluster.datastore.exceptions.AlreadyExistsException;
66 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
67 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
68 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
69 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
70 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
71 import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
72 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
73 import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
74 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
75 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
76 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
77 import org.opendaylight.controller.cluster.datastore.messages.FlipShardMembersVotingStatus;
78 import org.opendaylight.controller.cluster.datastore.messages.GetShardRole;
79 import org.opendaylight.controller.cluster.datastore.messages.GetShardRoleReply;
80 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
81 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
82 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
83 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
84 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
85 import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
86 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
87 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
88 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
89 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
90 import org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot;
91 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
92 import org.opendaylight.controller.cluster.datastore.utils.CompositeOnComplete;
93 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
94 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
95 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListenerReply;
96 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
97 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
98 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
99 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
100 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
101 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
102 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
103 import org.opendaylight.controller.cluster.raft.messages.AddServer;
104 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
105 import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
106 import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
107 import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
108 import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
109 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
110 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
111 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
112 import org.opendaylight.controller.cluster.sharding.PrefixedShardConfigUpdateHandler;
113 import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
114 import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
115 import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
116 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
117 import org.opendaylight.yangtools.concepts.Registration;
118 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
119 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
120 import org.slf4j.Logger;
121 import org.slf4j.LoggerFactory;
122 import scala.concurrent.ExecutionContext;
123 import scala.concurrent.Future;
124 import scala.concurrent.duration.FiniteDuration;
127 * Manages the shards for a data store. The ShardManager has the following jobs:
129 * <li> Create all the local shard replicas that belong on this cluster member
130 * <li> Find the address of the local shard
131 * <li> Find the primary replica for any given shard
132 * <li> Monitor the cluster members and store their addresses
135 class ShardManager extends AbstractUntypedPersistentActorWithMetering {
136 private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
138 // Stores a mapping between a shard name and it's corresponding information
139 // Shard names look like inventory, topology etc and are as specified in
142 final Map<String, ShardInformation> localShards = new HashMap<>();
144 // The type of a ShardManager reflects the type of the datastore itself
145 // A data store could be of type config/operational
146 private final String type;
148 private final ClusterWrapper cluster;
150 private final Configuration configuration;
153 final String shardDispatcherPath;
155 private final ShardManagerInfo shardManagerMBean;
157 private DatastoreContextFactory datastoreContextFactory;
159 private final CountDownLatch waitTillReadyCountdownLatch;
161 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
164 final ShardPeerAddressResolver peerAddressResolver;
166 private SchemaContext schemaContext;
168 private DatastoreSnapshot restoreFromSnapshot;
170 private ShardManagerSnapshot currentSnapshot;
172 private final Set<String> shardReplicaOperationsInProgress = new HashSet<>();
174 private final Map<String, CompositeOnComplete<Boolean>> shardActorsStopping = new HashMap<>();
176 private final Set<Consumer<String>> shardAvailabilityCallbacks = new HashSet<>();
178 private final String persistenceId;
179 private final AbstractDataStore dataStore;
181 private PrefixedShardConfigUpdateHandler configUpdateHandler;
183 ShardManager(final AbstractShardManagerCreator<?> builder) {
184 this.cluster = builder.getCluster();
185 this.configuration = builder.getConfiguration();
186 this.datastoreContextFactory = builder.getDatastoreContextFactory();
187 this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
188 this.shardDispatcherPath =
189 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
190 this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
191 this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
192 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
194 String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
195 persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
197 peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
199 // Subscribe this actor to cluster member events
200 cluster.subscribeToMemberEvents(getSelf());
202 shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
203 "shard-manager-" + this.type,
204 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
205 shardManagerMBean.registerMBean();
207 dataStore = builder.getDistributedDataStore();
211 public void preStart() {
212 LOG.info("Starting ShardManager {}", persistenceId);
216 public void postStop() {
217 LOG.info("Stopping ShardManager {}", persistenceId());
219 shardManagerMBean.unregisterMBean();
223 public void handleCommand(final Object message) throws Exception {
224 if (message instanceof FindPrimary) {
225 findPrimary((FindPrimary)message);
226 } else if (message instanceof FindLocalShard) {
227 findLocalShard((FindLocalShard) message);
228 } else if (message instanceof UpdateSchemaContext) {
229 updateSchemaContext(message);
230 } else if (message instanceof ActorInitialized) {
231 onActorInitialized(message);
232 } else if (message instanceof ClusterEvent.MemberUp) {
233 memberUp((ClusterEvent.MemberUp) message);
234 } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
235 memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
236 } else if (message instanceof ClusterEvent.MemberExited) {
237 memberExited((ClusterEvent.MemberExited) message);
238 } else if (message instanceof ClusterEvent.MemberRemoved) {
239 memberRemoved((ClusterEvent.MemberRemoved) message);
240 } else if (message instanceof ClusterEvent.UnreachableMember) {
241 memberUnreachable((ClusterEvent.UnreachableMember) message);
242 } else if (message instanceof ClusterEvent.ReachableMember) {
243 memberReachable((ClusterEvent.ReachableMember) message);
244 } else if (message instanceof DatastoreContextFactory) {
245 onDatastoreContextFactory((DatastoreContextFactory) message);
246 } else if (message instanceof RoleChangeNotification) {
247 onRoleChangeNotification((RoleChangeNotification) message);
248 } else if (message instanceof FollowerInitialSyncUpStatus) {
249 onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
250 } else if (message instanceof ShardNotInitializedTimeout) {
251 onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
252 } else if (message instanceof ShardLeaderStateChanged) {
253 onLeaderStateChanged((ShardLeaderStateChanged) message);
254 } else if (message instanceof SwitchShardBehavior) {
255 onSwitchShardBehavior((SwitchShardBehavior) message);
256 } else if (message instanceof CreateShard) {
257 onCreateShard((CreateShard)message);
258 } else if (message instanceof AddShardReplica) {
259 onAddShardReplica((AddShardReplica) message);
260 } else if (message instanceof AddPrefixShardReplica) {
261 onAddPrefixShardReplica((AddPrefixShardReplica) message);
262 } else if (message instanceof PrefixShardCreated) {
263 onPrefixShardCreated((PrefixShardCreated) message);
264 } else if (message instanceof PrefixShardRemoved) {
265 onPrefixShardRemoved((PrefixShardRemoved) message);
266 } else if (message instanceof InitConfigListener) {
267 onInitConfigListener();
268 } else if (message instanceof ForwardedAddServerReply) {
269 ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
270 onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
271 msg.removeShardOnFailure);
272 } else if (message instanceof ForwardedAddServerFailure) {
273 ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
274 onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
275 } else if (message instanceof RemoveShardReplica) {
276 onRemoveShardReplica((RemoveShardReplica) message);
277 } else if (message instanceof RemovePrefixShardReplica) {
278 onRemovePrefixShardReplica((RemovePrefixShardReplica) message);
279 } else if (message instanceof WrappedShardResponse) {
280 onWrappedShardResponse((WrappedShardResponse) message);
281 } else if (message instanceof GetSnapshot) {
283 } else if (message instanceof ServerRemoved) {
284 onShardReplicaRemoved((ServerRemoved) message);
285 } else if (message instanceof ChangeShardMembersVotingStatus) {
286 onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
287 } else if (message instanceof FlipShardMembersVotingStatus) {
288 onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
289 } else if (message instanceof SaveSnapshotSuccess) {
290 onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
291 } else if (message instanceof SaveSnapshotFailure) {
292 LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
293 ((SaveSnapshotFailure) message).cause());
294 } else if (message instanceof Shutdown) {
296 } else if (message instanceof GetLocalShardIds) {
297 onGetLocalShardIds();
298 } else if (message instanceof GetShardRole) {
299 onGetShardRole((GetShardRole) message);
300 } else if (message instanceof RunnableMessage) {
301 ((RunnableMessage)message).run();
302 } else if (message instanceof RegisterForShardAvailabilityChanges) {
303 onRegisterForShardAvailabilityChanges((RegisterForShardAvailabilityChanges)message);
304 } else if (message instanceof DeleteSnapshotsFailure) {
305 LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
306 ((DeleteSnapshotsFailure) message).cause());
307 } else if (message instanceof DeleteSnapshotsSuccess) {
308 LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
309 } else if (message instanceof RegisterRoleChangeListenerReply) {
310 LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
311 } else if (message instanceof ClusterEvent.MemberEvent) {
312 LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), message);
314 unknownMessage(message);
318 private void onRegisterForShardAvailabilityChanges(final RegisterForShardAvailabilityChanges message) {
319 LOG.debug("{}: onRegisterForShardAvailabilityChanges: {}", persistenceId(), message);
321 final Consumer<String> callback = message.getCallback();
322 shardAvailabilityCallbacks.add(callback);
324 getSender().tell(new Status.Success((Registration)
325 () -> executeInSelf(() -> shardAvailabilityCallbacks.remove(callback))), self());
328 private void onGetShardRole(final GetShardRole message) {
329 LOG.debug("{}: onGetShardRole for shard: {}", persistenceId(), message.getName());
331 final String name = message.getName();
333 final ShardInformation shardInformation = localShards.get(name);
335 if (shardInformation == null) {
336 LOG.info("{}: no shard information for {} found", persistenceId(), name);
337 getSender().tell(new Status.Failure(
338 new IllegalArgumentException("Shard with name " + name + " not present.")), ActorRef.noSender());
342 getSender().tell(new GetShardRoleReply(shardInformation.getRole()), ActorRef.noSender());
345 private void onInitConfigListener() {
346 LOG.debug("{}: Initializing config listener on {}", persistenceId(), cluster.getCurrentMemberName());
348 final org.opendaylight.mdsal.common.api.LogicalDatastoreType datastoreType =
349 org.opendaylight.mdsal.common.api.LogicalDatastoreType
350 .valueOf(datastoreContextFactory.getBaseDatastoreContext().getLogicalStoreType().name());
352 if (configUpdateHandler != null) {
353 configUpdateHandler.close();
356 configUpdateHandler = new PrefixedShardConfigUpdateHandler(self(), cluster.getCurrentMemberName());
357 configUpdateHandler.initListener(dataStore, datastoreType);
360 private void onShutDown() {
361 List<Future<Boolean>> stopFutures = new ArrayList<>(localShards.size());
362 for (ShardInformation info : localShards.values()) {
363 if (info.getActor() != null) {
364 LOG.debug("{}: Issuing gracefulStop to shard {}", persistenceId(), info.getShardId());
366 FiniteDuration duration = info.getDatastoreContext().getShardRaftConfig()
367 .getElectionTimeOutInterval().$times(2);
368 stopFutures.add(Patterns.gracefulStop(info.getActor(), duration, Shutdown.INSTANCE));
372 LOG.info("Shutting down ShardManager {} - waiting on {} shards", persistenceId(), stopFutures.size());
374 ExecutionContext dispatcher = new Dispatchers(context().system().dispatchers())
375 .getDispatcher(Dispatchers.DispatcherType.Client);
376 Future<Iterable<Boolean>> combinedFutures = Futures.sequence(stopFutures, dispatcher);
378 combinedFutures.onComplete(new OnComplete<Iterable<Boolean>>() {
380 public void onComplete(final Throwable failure, final Iterable<Boolean> results) {
381 LOG.debug("{}: All shards shutdown - sending PoisonPill to self", persistenceId());
383 self().tell(PoisonPill.getInstance(), self());
385 if (failure != null) {
386 LOG.warn("{}: An error occurred attempting to shut down the shards", persistenceId(), failure);
389 for (Boolean result : results) {
396 LOG.warn("{}: {} shards did not shut down gracefully", persistenceId(), nfailed);
403 private void onWrappedShardResponse(final WrappedShardResponse message) {
404 if (message.getResponse() instanceof RemoveServerReply) {
405 onRemoveServerReply(getSender(), message.getShardId(), (RemoveServerReply) message.getResponse(),
406 message.getLeaderPath());
410 private void onRemoveServerReply(final ActorRef originalSender, final ShardIdentifier shardId,
411 final RemoveServerReply replyMsg, final String leaderPath) {
412 shardReplicaOperationsInProgress.remove(shardId.getShardName());
414 LOG.debug("{}: Received {} for shard {}", persistenceId(), replyMsg, shardId.getShardName());
416 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
417 LOG.debug("{}: Leader shard successfully removed the replica shard {}", persistenceId(),
418 shardId.getShardName());
419 originalSender.tell(new Status.Success(null), getSelf());
421 LOG.warn("{}: Leader failed to remove shard replica {} with status {}",
422 persistenceId(), shardId, replyMsg.getStatus());
424 Exception failure = getServerChangeException(RemoveServer.class, replyMsg.getStatus(), leaderPath, shardId);
425 originalSender.tell(new Status.Failure(failure), getSelf());
429 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
430 justification = "https://github.com/spotbugs/spotbugs/issues/811")
431 private void removePrefixShardReplica(final RemovePrefixShardReplica contextMessage, final String shardName,
432 final String primaryPath, final ActorRef sender) {
433 if (isShardReplicaOperationInProgress(shardName, sender)) {
437 shardReplicaOperationsInProgress.add(shardName);
439 final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
441 final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
443 //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
444 LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
445 primaryPath, shardId);
447 Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
448 Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
449 new RemoveServer(shardId.toString()), removeServerTimeout);
451 futureObj.onComplete(new OnComplete<Object>() {
453 public void onComplete(final Throwable failure, final Object response) {
454 if (failure != null) {
455 shardReplicaOperationsInProgress.remove(shardName);
457 LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
461 sender.tell(new Status.Failure(new RuntimeException(
462 String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
466 self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
469 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
472 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
473 justification = "https://github.com/spotbugs/spotbugs/issues/811")
474 private void removeShardReplica(final RemoveShardReplica contextMessage, final String shardName,
475 final String primaryPath, final ActorRef sender) {
476 if (isShardReplicaOperationInProgress(shardName, sender)) {
480 shardReplicaOperationsInProgress.add(shardName);
482 final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
484 final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
486 //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
487 LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
488 primaryPath, shardId);
490 Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
491 Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
492 new RemoveServer(shardId.toString()), removeServerTimeout);
494 futureObj.onComplete(new OnComplete<Object>() {
496 public void onComplete(final Throwable failure, final Object response) {
497 if (failure != null) {
498 shardReplicaOperationsInProgress.remove(shardName);
499 LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
503 sender.tell(new Status.Failure(new RuntimeException(
504 String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
508 self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
511 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
514 private void onShardReplicaRemoved(final ServerRemoved message) {
515 removeShard(new ShardIdentifier.Builder().fromShardIdString(message.getServerId()).build());
518 @SuppressWarnings("checkstyle:IllegalCatch")
519 private void removeShard(final ShardIdentifier shardId) {
520 final String shardName = shardId.getShardName();
521 final ShardInformation shardInformation = localShards.remove(shardName);
522 if (shardInformation == null) {
523 LOG.debug("{} : Shard replica {} is not present in list", persistenceId(), shardId.toString());
527 final ActorRef shardActor = shardInformation.getActor();
528 if (shardActor != null) {
529 long timeoutInMS = Math.max(shardInformation.getDatastoreContext().getShardRaftConfig()
530 .getElectionTimeOutInterval().$times(3).toMillis(), 10000);
532 LOG.debug("{} : Sending Shutdown to Shard actor {} with {} ms timeout", persistenceId(), shardActor,
535 final Future<Boolean> stopFuture = Patterns.gracefulStop(shardActor,
536 FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS), Shutdown.INSTANCE);
538 final CompositeOnComplete<Boolean> onComplete = new CompositeOnComplete<Boolean>() {
540 public void onComplete(final Throwable failure, final Boolean result) {
541 if (failure == null) {
542 LOG.debug("{} : Successfully shut down Shard actor {}", persistenceId(), shardActor);
544 LOG.warn("{}: Failed to shut down Shard actor {}", persistenceId(), shardActor, failure);
547 self().tell((RunnableMessage) () -> {
548 // At any rate, invalidate primaryShardInfo cache
549 primaryShardInfoCache.remove(shardName);
551 shardActorsStopping.remove(shardName);
552 notifyOnCompleteTasks(failure, result);
553 }, ActorRef.noSender());
557 shardActorsStopping.put(shardName, onComplete);
558 stopFuture.onComplete(onComplete, new Dispatchers(context().system().dispatchers())
559 .getDispatcher(Dispatchers.DispatcherType.Client));
562 LOG.debug("{} : Local Shard replica for shard {} has been removed", persistenceId(), shardName);
566 private void onGetSnapshot() {
567 LOG.debug("{}: onGetSnapshot", persistenceId());
569 List<String> notInitialized = null;
570 for (ShardInformation shardInfo : localShards.values()) {
571 if (!shardInfo.isShardInitialized()) {
572 if (notInitialized == null) {
573 notInitialized = new ArrayList<>();
576 notInitialized.add(shardInfo.getShardName());
580 if (notInitialized != null) {
581 getSender().tell(new Status.Failure(new IllegalStateException(String.format(
582 "%d shard(s) %s are not initialized", notInitialized.size(), notInitialized))), getSelf());
586 ActorRef replyActor = getContext().actorOf(ShardManagerGetSnapshotReplyActor.props(
587 new ArrayList<>(localShards.keySet()), type, currentSnapshot , getSender(), persistenceId(),
588 datastoreContextFactory.getBaseDatastoreContext().getShardInitializationTimeout().duration()));
590 for (ShardInformation shardInfo: localShards.values()) {
591 shardInfo.getActor().tell(GetSnapshot.INSTANCE, replyActor);
595 @SuppressWarnings("checkstyle:IllegalCatch")
596 private void onCreateShard(final CreateShard createShard) {
597 LOG.debug("{}: onCreateShard: {}", persistenceId(), createShard);
601 String shardName = createShard.getModuleShardConfig().getShardName();
602 if (localShards.containsKey(shardName)) {
603 LOG.debug("{}: Shard {} already exists", persistenceId(), shardName);
604 reply = new Status.Success(String.format("Shard with name %s already exists", shardName));
606 doCreateShard(createShard);
607 reply = new Status.Success(null);
609 } catch (Exception e) {
610 LOG.error("{}: onCreateShard failed", persistenceId(), e);
611 reply = new Status.Failure(e);
614 if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
615 getSender().tell(reply, getSelf());
619 private void onPrefixShardCreated(final PrefixShardCreated message) {
620 LOG.debug("{}: onPrefixShardCreated: {}", persistenceId(), message);
622 final PrefixShardConfiguration config = message.getConfiguration();
623 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
624 ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier()));
625 final String shardName = shardId.getShardName();
627 if (isPreviousShardActorStopInProgress(shardName, message)) {
631 if (localShards.containsKey(shardName)) {
632 LOG.debug("{}: Received create for an already existing shard {}", persistenceId(), shardName);
633 final PrefixShardConfiguration existing =
634 configuration.getAllPrefixShardConfigurations().get(config.getPrefix());
636 if (existing != null && existing.equals(config)) {
637 // we don't have to do nothing here
642 doCreatePrefixShard(config, shardId, shardName);
645 private boolean isPreviousShardActorStopInProgress(final String shardName, final Object messageToDefer) {
646 final CompositeOnComplete<Boolean> stopOnComplete = shardActorsStopping.get(shardName);
647 if (stopOnComplete == null) {
651 LOG.debug("{} : Stop is in progress for shard {} - adding OnComplete callback to defer {}", persistenceId(),
652 shardName, messageToDefer);
653 final ActorRef sender = getSender();
654 stopOnComplete.addOnComplete(new OnComplete<Boolean>() {
656 public void onComplete(final Throwable failure, final Boolean result) {
657 LOG.debug("{} : Stop complete for shard {} - re-queing {}", persistenceId(), shardName, messageToDefer);
658 self().tell(messageToDefer, sender);
665 private void doCreatePrefixShard(final PrefixShardConfiguration config, final ShardIdentifier shardId,
666 final String shardName) {
667 configuration.addPrefixShardConfiguration(config);
669 final Builder builder = newShardDatastoreContextBuilder(shardName);
670 builder.logicalStoreType(config.getPrefix().getDatastoreType())
671 .storeRoot(config.getPrefix().getRootIdentifier());
672 DatastoreContext shardDatastoreContext = builder.build();
674 final Map<String, String> peerAddresses = getPeerAddresses(shardName);
675 final boolean isActiveMember = true;
677 LOG.debug("{} doCreatePrefixShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
678 persistenceId(), shardId, config.getShardMemberNames(), peerAddresses, isActiveMember);
680 final ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
681 shardDatastoreContext, Shard.builder(), peerAddressResolver);
682 info.setActiveMember(isActiveMember);
683 localShards.put(info.getShardName(), info);
685 if (schemaContext != null) {
686 info.setSchemaContext(schemaContext);
687 info.setActor(newShardActor(info));
691 private void onPrefixShardRemoved(final PrefixShardRemoved message) {
692 LOG.debug("{}: onPrefixShardRemoved : {}", persistenceId(), message);
694 final DOMDataTreeIdentifier prefix = message.getPrefix();
695 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
696 ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
698 configuration.removePrefixShardConfiguration(prefix);
699 removeShard(shardId);
702 private void doCreateShard(final CreateShard createShard) {
703 final ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
704 final String shardName = moduleShardConfig.getShardName();
706 configuration.addModuleShardConfiguration(moduleShardConfig);
708 DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
709 if (shardDatastoreContext == null) {
710 shardDatastoreContext = newShardDatastoreContext(shardName);
712 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
713 peerAddressResolver).build();
716 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
718 boolean shardWasInRecoveredSnapshot = currentSnapshot != null
719 && currentSnapshot.getShardList().contains(shardName);
721 Map<String, String> peerAddresses;
722 boolean isActiveMember;
723 if (shardWasInRecoveredSnapshot || configuration.getMembersFromShardName(shardName)
724 .contains(cluster.getCurrentMemberName())) {
725 peerAddresses = getPeerAddresses(shardName);
726 isActiveMember = true;
728 // The local member is not in the static shard member configuration and the shard did not
729 // previously exist (ie !shardWasInRecoveredSnapshot). In this case we'll create
730 // the shard with no peers and with elections disabled so it stays as follower. A
731 // subsequent AddServer request will be needed to make it an active member.
732 isActiveMember = false;
733 peerAddresses = Collections.emptyMap();
734 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
735 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
738 LOG.debug("{} doCreateShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
739 persistenceId(), shardId, moduleShardConfig.getShardMemberNames(), peerAddresses,
742 ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
743 shardDatastoreContext, createShard.getShardBuilder(), peerAddressResolver);
744 info.setActiveMember(isActiveMember);
745 localShards.put(info.getShardName(), info);
747 if (schemaContext != null) {
748 info.setSchemaContext(schemaContext);
749 info.setActor(newShardActor(info));
753 private DatastoreContext.Builder newShardDatastoreContextBuilder(final String shardName) {
754 return DatastoreContext.newBuilderFrom(datastoreContextFactory.getShardDatastoreContext(shardName))
755 .shardPeerAddressResolver(peerAddressResolver);
758 private DatastoreContext newShardDatastoreContext(final String shardName) {
759 return newShardDatastoreContextBuilder(shardName).build();
762 private void checkReady() {
763 if (isReadyWithLeaderId()) {
764 LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
765 persistenceId(), type, waitTillReadyCountdownLatch.getCount());
767 waitTillReadyCountdownLatch.countDown();
771 private void onLeaderStateChanged(final ShardLeaderStateChanged leaderStateChanged) {
772 LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
774 ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
775 if (shardInformation != null) {
776 shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
777 shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
778 if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
779 primaryShardInfoCache.remove(shardInformation.getShardName());
781 notifyShardAvailabilityCallbacks(shardInformation);
786 LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
790 private void notifyShardAvailabilityCallbacks(final ShardInformation shardInformation) {
791 shardAvailabilityCallbacks.forEach(callback -> callback.accept(shardInformation.getShardName()));
794 private void onShardNotInitializedTimeout(final ShardNotInitializedTimeout message) {
795 ShardInformation shardInfo = message.getShardInfo();
797 LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
798 shardInfo.getShardName());
800 shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
802 if (!shardInfo.isShardInitialized()) {
803 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
804 message.getSender().tell(createNotInitializedException(shardInfo.getShardId()), getSelf());
806 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
807 message.getSender().tell(new NoShardLeaderException(shardInfo.getShardId()), getSelf());
811 private void onFollowerInitialSyncStatus(final FollowerInitialSyncUpStatus status) {
812 LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
813 status.getName(), status.isInitialSyncDone());
815 ShardInformation shardInformation = findShardInformation(status.getName());
817 if (shardInformation != null) {
818 shardInformation.setFollowerSyncStatus(status.isInitialSyncDone());
820 shardManagerMBean.setSyncStatus(isInSync());
825 private void onRoleChangeNotification(final RoleChangeNotification roleChanged) {
826 LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
827 roleChanged.getOldRole(), roleChanged.getNewRole());
829 ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
830 if (shardInformation != null) {
831 shardInformation.setRole(roleChanged.getNewRole());
833 shardManagerMBean.setSyncStatus(isInSync());
838 private ShardInformation findShardInformation(final String memberId) {
839 for (ShardInformation info : localShards.values()) {
840 if (info.getShardId().toString().equals(memberId)) {
848 private boolean isReadyWithLeaderId() {
849 boolean isReady = true;
850 for (ShardInformation info : localShards.values()) {
851 if (!info.isShardReadyWithLeaderId()) {
859 private boolean isInSync() {
860 for (ShardInformation info : localShards.values()) {
861 if (!info.isInSync()) {
868 private void onActorInitialized(final Object message) {
869 final ActorRef sender = getSender();
871 if (sender == null) {
872 // why is a non-actor sending this message? Just ignore.
876 String actorName = sender.path().name();
877 //find shard name from actor name; actor name is stringified shardId
879 final ShardIdentifier shardId;
881 shardId = ShardIdentifier.fromShardIdString(actorName);
882 } catch (IllegalArgumentException e) {
883 LOG.debug("{}: ignoring actor {}", persistenceId, actorName, e);
887 markShardAsInitialized(shardId.getShardName());
890 private void markShardAsInitialized(final String shardName) {
891 LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
893 ShardInformation shardInformation = localShards.get(shardName);
894 if (shardInformation != null) {
895 shardInformation.setActorInitialized();
897 shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
902 protected void handleRecover(final Object message) throws Exception {
903 if (message instanceof RecoveryCompleted) {
904 onRecoveryCompleted();
905 } else if (message instanceof SnapshotOffer) {
906 applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
910 @SuppressWarnings("checkstyle:IllegalCatch")
911 private void onRecoveryCompleted() {
912 LOG.info("Recovery complete : {}", persistenceId());
914 if (currentSnapshot == null && restoreFromSnapshot != null
915 && restoreFromSnapshot.getShardManagerSnapshot() != null) {
916 ShardManagerSnapshot snapshot = restoreFromSnapshot.getShardManagerSnapshot();
918 LOG.debug("{}: Restoring from ShardManagerSnapshot: {}", persistenceId(), snapshot);
920 applyShardManagerSnapshot(snapshot);
926 private void sendResponse(final ShardInformation shardInformation, final boolean doWait,
927 final boolean wantShardReady, final Supplier<Object> messageSupplier) {
928 if (!shardInformation.isShardInitialized() || wantShardReady && !shardInformation.isShardReadyWithLeaderId()) {
930 final ActorRef sender = getSender();
931 final ActorRef self = self();
933 Runnable replyRunnable = () -> sender.tell(messageSupplier.get(), self);
935 OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
936 new OnShardInitialized(replyRunnable);
938 shardInformation.addOnShardInitialized(onShardInitialized);
940 FiniteDuration timeout = shardInformation.getDatastoreContext()
941 .getShardInitializationTimeout().duration();
942 if (shardInformation.isShardInitialized()) {
943 // If the shard is already initialized then we'll wait enough time for the shard to
944 // elect a leader, ie 2 times the election timeout.
945 timeout = FiniteDuration.create(shardInformation.getDatastoreContext().getShardRaftConfig()
946 .getElectionTimeOutInterval().toMillis() * 2, TimeUnit.MILLISECONDS);
949 LOG.debug("{}: Scheduling {} ms timer to wait for shard {}", persistenceId(), timeout.toMillis(),
952 Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
954 new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
955 getContext().dispatcher(), getSelf());
957 onShardInitialized.setTimeoutSchedule(timeoutSchedule);
959 } else if (!shardInformation.isShardInitialized()) {
960 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
961 shardInformation.getShardName());
962 getSender().tell(createNotInitializedException(shardInformation.getShardId()), getSelf());
964 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
965 shardInformation.getShardName());
966 getSender().tell(new NoShardLeaderException(shardInformation.getShardId()), getSelf());
972 getSender().tell(messageSupplier.get(), getSelf());
975 private static NotInitializedException createNotInitializedException(final ShardIdentifier shardId) {
976 return new NotInitializedException(String.format(
977 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
981 static MemberName memberToName(final Member member) {
982 return MemberName.forName(member.roles().iterator().next());
985 private void memberRemoved(final ClusterEvent.MemberRemoved message) {
986 MemberName memberName = memberToName(message.member());
988 LOG.info("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
989 message.member().address());
991 peerAddressResolver.removePeerAddress(memberName);
993 for (ShardInformation info : localShards.values()) {
994 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
998 private void memberExited(final ClusterEvent.MemberExited message) {
999 MemberName memberName = memberToName(message.member());
1001 LOG.info("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
1002 message.member().address());
1004 peerAddressResolver.removePeerAddress(memberName);
1006 for (ShardInformation info : localShards.values()) {
1007 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
1011 private void memberUp(final ClusterEvent.MemberUp message) {
1012 MemberName memberName = memberToName(message.member());
1014 LOG.info("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
1015 message.member().address());
1017 memberUp(memberName, message.member().address());
1020 private void memberUp(final MemberName memberName, final Address address) {
1021 addPeerAddress(memberName, address);
1025 private void memberWeaklyUp(final MemberWeaklyUp message) {
1026 MemberName memberName = memberToName(message.member());
1028 LOG.info("{}: Received MemberWeaklyUp: memberName: {}, address: {}", persistenceId(), memberName,
1029 message.member().address());
1031 memberUp(memberName, message.member().address());
1034 private void addPeerAddress(final MemberName memberName, final Address address) {
1035 peerAddressResolver.addPeerAddress(memberName, address);
1037 for (ShardInformation info : localShards.values()) {
1038 String shardName = info.getShardName();
1039 String peerId = getShardIdentifier(memberName, shardName).toString();
1040 info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
1042 info.peerUp(memberName, peerId, getSelf());
1046 private void memberReachable(final ClusterEvent.ReachableMember message) {
1047 MemberName memberName = memberToName(message.member());
1048 LOG.info("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
1050 addPeerAddress(memberName, message.member().address());
1052 markMemberAvailable(memberName);
1055 private void memberUnreachable(final ClusterEvent.UnreachableMember message) {
1056 MemberName memberName = memberToName(message.member());
1057 LOG.info("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
1059 markMemberUnavailable(memberName);
1062 private void markMemberUnavailable(final MemberName memberName) {
1063 for (ShardInformation info : localShards.values()) {
1064 String leaderId = info.getLeaderId();
1065 if (leaderId != null && ShardIdentifier.fromShardIdString(leaderId).getMemberName().equals(memberName)) {
1066 LOG.debug("Marking Leader {} as unavailable.", leaderId);
1067 info.setLeaderAvailable(false);
1069 primaryShardInfoCache.remove(info.getShardName());
1071 notifyShardAvailabilityCallbacks(info);
1074 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
1078 private void markMemberAvailable(final MemberName memberName) {
1079 for (ShardInformation info : localShards.values()) {
1080 String leaderId = info.getLeaderId();
1081 if (leaderId != null && ShardIdentifier.fromShardIdString(leaderId).getMemberName().equals(memberName)) {
1082 LOG.debug("Marking Leader {} as available.", leaderId);
1083 info.setLeaderAvailable(true);
1086 info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
1090 private void onDatastoreContextFactory(final DatastoreContextFactory factory) {
1091 datastoreContextFactory = factory;
1092 for (ShardInformation info : localShards.values()) {
1093 info.setDatastoreContext(newShardDatastoreContext(info.getShardName()), getSelf());
1097 private void onGetLocalShardIds() {
1098 final List<String> response = new ArrayList<>(localShards.size());
1100 for (ShardInformation info : localShards.values()) {
1101 response.add(info.getShardId().toString());
1104 getSender().tell(new Status.Success(response), getSelf());
1107 private void onSwitchShardBehavior(final SwitchShardBehavior message) {
1108 final ShardIdentifier identifier = message.getShardId();
1110 if (identifier != null) {
1111 final ShardInformation info = localShards.get(identifier.getShardName());
1113 getSender().tell(new Status.Failure(
1114 new IllegalArgumentException("Shard " + identifier + " is not local")), getSelf());
1118 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
1120 for (ShardInformation info : localShards.values()) {
1121 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
1125 getSender().tell(new Status.Success(null), getSelf());
1128 private void switchShardBehavior(final ShardInformation info, final SwitchBehavior switchBehavior) {
1129 final ActorRef actor = info.getActor();
1130 if (actor != null) {
1131 actor.tell(switchBehavior, getSelf());
1133 LOG.warn("Could not switch the behavior of shard {} to {} - shard is not yet available",
1134 info.getShardName(), switchBehavior.getNewState());
1139 * Notifies all the local shards of a change in the schema context.
1141 * @param message the message to send
1143 private void updateSchemaContext(final Object message) {
1144 schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
1146 LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getModules().size());
1148 for (ShardInformation info : localShards.values()) {
1149 info.setSchemaContext(schemaContext);
1151 if (info.getActor() == null) {
1152 LOG.debug("Creating Shard {}", info.getShardId());
1153 info.setActor(newShardActor(info));
1154 // Update peer address for every existing peer memeber to avoid missing sending
1155 // PeerAddressResolved and PeerUp to this shard while UpdateSchemaContext comes after MemberUp.
1156 String shardName = info.getShardName();
1157 for (MemberName memberName : peerAddressResolver.getPeerMembers()) {
1158 String peerId = getShardIdentifier(memberName, shardName).toString() ;
1159 String peerAddress = peerAddressResolver.getShardActorAddress(shardName, memberName);
1160 info.updatePeerAddress(peerId, peerAddress, getSelf());
1161 info.peerUp(memberName, peerId, getSelf());
1162 LOG.debug("{}: updated peer {} on member {} with address {} on shard {} whose actor address is {}",
1163 persistenceId(), peerId, memberName, peerAddress, info.getShardId(), info.getActor());
1166 info.getActor().tell(message, getSelf());
1172 protected ClusterWrapper getCluster() {
1177 protected ActorRef newShardActor(final ShardInformation info) {
1178 return getContext().actorOf(info.newProps().withDispatcher(shardDispatcherPath),
1179 info.getShardId().toString());
1182 private void findPrimary(final FindPrimary message) {
1183 LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
1185 final String shardName = message.getShardName();
1186 final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
1188 // First see if the there is a local replica for the shard
1189 final ShardInformation info = localShards.get(shardName);
1190 if (info != null && info.isActiveMember()) {
1191 sendResponse(info, message.isWaitUntilReady(), true, () -> {
1192 String primaryPath = info.getSerializedLeaderActor();
1193 Object found = canReturnLocalShardState && info.isLeader()
1194 ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
1195 new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
1197 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
1204 final Collection<String> visitedAddresses;
1205 if (message instanceof RemoteFindPrimary) {
1206 visitedAddresses = ((RemoteFindPrimary)message).getVisitedAddresses();
1208 visitedAddresses = new ArrayList<>(1);
1211 visitedAddresses.add(peerAddressResolver.getShardManagerActorPathBuilder(cluster.getSelfAddress()).toString());
1213 for (String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
1214 if (visitedAddresses.contains(address)) {
1218 LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}, visitedAddresses: {}",
1219 persistenceId(), shardName, address, visitedAddresses);
1221 getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
1222 message.isWaitUntilReady(), visitedAddresses), getContext());
1226 LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
1228 getSender().tell(new PrimaryNotFoundException(
1229 String.format("No primary shard found for %s.", shardName)), getSelf());
1232 private void findPrimary(final String shardName, final FindPrimaryResponseHandler handler) {
1233 Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1234 .getShardInitializationTimeout().duration().$times(2));
1236 Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
1237 futureObj.onComplete(new OnComplete<Object>() {
1239 public void onComplete(final Throwable failure, final Object response) {
1240 if (failure != null) {
1241 handler.onFailure(failure);
1243 if (response instanceof RemotePrimaryShardFound) {
1244 handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
1245 } else if (response instanceof LocalPrimaryShardFound) {
1246 handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
1248 handler.onUnknownResponse(response);
1252 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1256 * Construct the name of the shard actor given the name of the member on
1257 * which the shard resides and the name of the shard.
1259 * @param memberName the member name
1260 * @param shardName the shard name
1263 private ShardIdentifier getShardIdentifier(final MemberName memberName, final String shardName) {
1264 return peerAddressResolver.getShardIdentifier(memberName, shardName);
1268 * Create shards that are local to the member on which the ShardManager runs.
1270 private void createLocalShards() {
1271 MemberName memberName = this.cluster.getCurrentMemberName();
1272 Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
1274 Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
1275 if (restoreFromSnapshot != null) {
1276 for (DatastoreSnapshot.ShardSnapshot snapshot: restoreFromSnapshot.getShardSnapshots()) {
1277 shardSnapshots.put(snapshot.getName(), snapshot);
1282 restoreFromSnapshot = null;
1284 for (String shardName : memberShardNames) {
1285 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1287 LOG.debug("{}: Creating local shard: {}", persistenceId(), shardId);
1289 Map<String, String> peerAddresses = getPeerAddresses(shardName);
1290 localShards.put(shardName, createShardInfoFor(shardName, shardId, peerAddresses,
1291 newShardDatastoreContext(shardName), shardSnapshots));
1296 ShardInformation createShardInfoFor(final String shardName, final ShardIdentifier shardId,
1297 final Map<String, String> peerAddresses,
1298 final DatastoreContext datastoreContext,
1299 final Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots) {
1300 return new ShardInformation(shardName, shardId, peerAddresses,
1301 datastoreContext, Shard.builder().restoreFromSnapshot(shardSnapshots.get(shardName)),
1302 peerAddressResolver);
1306 * Given the name of the shard find the addresses of all it's peers.
1308 * @param shardName the shard name
1310 Map<String, String> getPeerAddresses(final String shardName) {
1311 final Collection<MemberName> members = configuration.getMembersFromShardName(shardName);
1312 return getPeerAddresses(shardName, members);
1315 private Map<String, String> getPeerAddresses(final String shardName, final Collection<MemberName> members) {
1316 Map<String, String> peerAddresses = new HashMap<>();
1317 MemberName currentMemberName = this.cluster.getCurrentMemberName();
1319 for (MemberName memberName : members) {
1320 if (!currentMemberName.equals(memberName)) {
1321 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1322 String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
1323 peerAddresses.put(shardId.toString(), address);
1326 return peerAddresses;
1330 public SupervisorStrategy supervisorStrategy() {
1332 return new OneForOneStrategy(10, FiniteDuration.create(1, TimeUnit.MINUTES),
1333 (Function<Throwable, Directive>) t -> {
1334 LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
1335 return SupervisorStrategy.resume();
1340 public String persistenceId() {
1341 return persistenceId;
1345 ShardManagerInfoMBean getMBean() {
1346 return shardManagerMBean;
1349 private boolean isShardReplicaOperationInProgress(final String shardName, final ActorRef sender) {
1350 if (shardReplicaOperationsInProgress.contains(shardName)) {
1351 LOG.debug("{}: A shard replica operation for {} is already in progress", persistenceId(), shardName);
1352 sender.tell(new Status.Failure(new IllegalStateException(
1353 String.format("A shard replica operation for %s is already in progress", shardName))), getSelf());
1360 private void onAddPrefixShardReplica(final AddPrefixShardReplica message) {
1361 LOG.debug("{}: onAddPrefixShardReplica: {}", persistenceId(), message);
1363 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
1364 ClusterUtils.getCleanShardName(message.getShardPrefix()));
1365 final String shardName = shardId.getShardName();
1367 // Create the localShard
1368 if (schemaContext == null) {
1369 LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
1370 persistenceId(), shardName);
1371 getSender().tell(new Status.Failure(new IllegalStateException(
1372 "No SchemaContext is available in order to create a local shard instance for " + shardName)),
1377 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
1380 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1381 final RunnableMessage runnable = (RunnableMessage) () -> addPrefixShard(getShardName(),
1382 message.getShardPrefix(), response, getSender());
1383 if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
1384 getSelf().tell(runnable, getTargetActor());
1389 public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
1390 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1395 private void onAddShardReplica(final AddShardReplica shardReplicaMsg) {
1396 final String shardName = shardReplicaMsg.getShardName();
1398 LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
1400 // verify the shard with the specified name is present in the cluster configuration
1401 if (!this.configuration.isShardConfigured(shardName)) {
1402 LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
1403 getSender().tell(new Status.Failure(new IllegalArgumentException(
1404 "No module configuration exists for shard " + shardName)), getSelf());
1408 // Create the localShard
1409 if (schemaContext == null) {
1410 LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
1411 persistenceId(), shardName);
1412 getSender().tell(new Status.Failure(new IllegalStateException(
1413 "No SchemaContext is available in order to create a local shard instance for " + shardName)),
1418 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
1421 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1422 final RunnableMessage runnable = (RunnableMessage) () ->
1423 addShard(getShardName(), response, getSender());
1424 if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
1425 getSelf().tell(runnable, getTargetActor());
1430 public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
1431 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1436 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
1437 justification = "https://github.com/spotbugs/spotbugs/issues/811")
1438 private void sendLocalReplicaAlreadyExistsReply(final String shardName, final ActorRef sender) {
1439 LOG.debug("{}: Local shard {} already exists", persistenceId(), shardName);
1440 sender.tell(new Status.Failure(new AlreadyExistsException(
1441 String.format("Local shard %s already exists", shardName))), getSelf());
1444 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
1445 justification = "https://github.com/spotbugs/spotbugs/issues/811")
1446 private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
1447 final RemotePrimaryShardFound response, final ActorRef sender) {
1448 if (isShardReplicaOperationInProgress(shardName, sender)) {
1452 shardReplicaOperationsInProgress.add(shardName);
1454 final ShardInformation shardInfo;
1455 final boolean removeShardOnFailure;
1456 ShardInformation existingShardInfo = localShards.get(shardName);
1457 if (existingShardInfo == null) {
1458 removeShardOnFailure = true;
1459 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1461 final Builder builder = newShardDatastoreContextBuilder(shardName);
1462 builder.storeRoot(shardPrefix).customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
1464 DatastoreContext datastoreContext = builder.build();
1466 shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
1467 Shard.builder(), peerAddressResolver);
1468 shardInfo.setActiveMember(false);
1469 shardInfo.setSchemaContext(schemaContext);
1470 localShards.put(shardName, shardInfo);
1471 shardInfo.setActor(newShardActor(shardInfo));
1473 removeShardOnFailure = false;
1474 shardInfo = existingShardInfo;
1477 execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
1480 @SuppressFBWarnings(value = "UPM_UNCALLED_PRIVATE_METHOD",
1481 justification = "https://github.com/spotbugs/spotbugs/issues/811")
1482 private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
1483 if (isShardReplicaOperationInProgress(shardName, sender)) {
1487 shardReplicaOperationsInProgress.add(shardName);
1489 final ShardInformation shardInfo;
1490 final boolean removeShardOnFailure;
1491 ShardInformation existingShardInfo = localShards.get(shardName);
1492 if (existingShardInfo == null) {
1493 removeShardOnFailure = true;
1494 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1496 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName)
1497 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
1499 shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
1500 Shard.builder(), peerAddressResolver);
1501 shardInfo.setActiveMember(false);
1502 shardInfo.setSchemaContext(schemaContext);
1503 localShards.put(shardName, shardInfo);
1504 shardInfo.setActor(newShardActor(shardInfo));
1506 removeShardOnFailure = false;
1507 shardInfo = existingShardInfo;
1510 execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
1513 private void execAddShard(final String shardName,
1514 final ShardInformation shardInfo,
1515 final RemotePrimaryShardFound response,
1516 final boolean removeShardOnFailure,
1517 final ActorRef sender) {
1519 final String localShardAddress =
1520 peerAddressResolver.getShardActorAddress(shardName, cluster.getCurrentMemberName());
1522 //inform ShardLeader to add this shard as a replica by sending an AddServer message
1523 LOG.debug("{}: Sending AddServer message to peer {} for shard {}", persistenceId(),
1524 response.getPrimaryPath(), shardInfo.getShardId());
1526 final Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
1527 .getShardLeaderElectionTimeout().duration());
1528 final Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
1529 new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
1531 futureObj.onComplete(new OnComplete<Object>() {
1533 public void onComplete(final Throwable failure, final Object addServerResponse) {
1534 if (failure != null) {
1535 LOG.debug("{}: AddServer request to {} for {} failed", persistenceId(),
1536 response.getPrimaryPath(), shardName, failure);
1538 final String msg = String.format("AddServer request to leader %s for shard %s failed",
1539 response.getPrimaryPath(), shardName);
1540 self().tell(new ForwardedAddServerFailure(shardName, msg, failure, removeShardOnFailure), sender);
1542 self().tell(new ForwardedAddServerReply(shardInfo, (AddServerReply)addServerResponse,
1543 response.getPrimaryPath(), removeShardOnFailure), sender);
1546 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1549 private void onAddServerFailure(final String shardName, final String message, final Throwable failure,
1550 final ActorRef sender, final boolean removeShardOnFailure) {
1551 shardReplicaOperationsInProgress.remove(shardName);
1553 if (removeShardOnFailure) {
1554 ShardInformation shardInfo = localShards.remove(shardName);
1555 if (shardInfo.getActor() != null) {
1556 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
1560 sender.tell(new Status.Failure(message == null ? failure :
1561 new RuntimeException(message, failure)), getSelf());
1564 private void onAddServerReply(final ShardInformation shardInfo, final AddServerReply replyMsg,
1565 final ActorRef sender, final String leaderPath, final boolean removeShardOnFailure) {
1566 String shardName = shardInfo.getShardName();
1567 shardReplicaOperationsInProgress.remove(shardName);
1569 LOG.debug("{}: Received {} for shard {} from leader {}", persistenceId(), replyMsg, shardName, leaderPath);
1571 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1572 LOG.debug("{}: Leader shard successfully added the replica shard {}", persistenceId(), shardName);
1574 // Make the local shard voting capable
1575 shardInfo.setDatastoreContext(newShardDatastoreContext(shardName), getSelf());
1576 shardInfo.setActiveMember(true);
1579 sender.tell(new Status.Success(null), getSelf());
1580 } else if (replyMsg.getStatus() == ServerChangeStatus.ALREADY_EXISTS) {
1581 sendLocalReplicaAlreadyExistsReply(shardName, sender);
1583 LOG.warn("{}: Leader failed to add shard replica {} with status {}",
1584 persistenceId(), shardName, replyMsg.getStatus());
1586 Exception failure = getServerChangeException(AddServer.class, replyMsg.getStatus(), leaderPath,
1587 shardInfo.getShardId());
1589 onAddServerFailure(shardName, null, failure, sender, removeShardOnFailure);
1593 private static Exception getServerChangeException(final Class<?> serverChange,
1594 final ServerChangeStatus serverChangeStatus, final String leaderPath, final ShardIdentifier shardId) {
1595 switch (serverChangeStatus) {
1597 return new TimeoutException(String.format(
1598 "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
1599 + "Possible causes - there was a problem replicating the data or shard leadership changed "
1600 + "while replicating the shard data", leaderPath, shardId.getShardName()));
1602 return new NoShardLeaderException(shardId);
1604 return new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
1605 serverChange.getSimpleName(), shardId.getShardName()));
1607 return new RuntimeException(String.format("%s request to leader %s for shard %s failed with status %s",
1608 serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
1612 private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
1613 LOG.debug("{}: onRemoveShardReplica: {}", persistenceId(), shardReplicaMsg);
1615 findPrimary(shardReplicaMsg.getShardName(), new AutoFindPrimaryFailureResponseHandler(getSender(),
1616 shardReplicaMsg.getShardName(), persistenceId(), getSelf()) {
1618 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1619 doRemoveShardReplicaAsync(response.getPrimaryPath());
1623 public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
1624 doRemoveShardReplicaAsync(response.getPrimaryPath());
1627 private void doRemoveShardReplicaAsync(final String primaryPath) {
1628 getSelf().tell((RunnableMessage) () -> removeShardReplica(shardReplicaMsg, getShardName(),
1629 primaryPath, getSender()), getTargetActor());
1634 private void onRemovePrefixShardReplica(final RemovePrefixShardReplica message) {
1635 LOG.debug("{}: onRemovePrefixShardReplica: {}", persistenceId(), message);
1637 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
1638 ClusterUtils.getCleanShardName(message.getShardPrefix()));
1639 final String shardName = shardId.getShardName();
1641 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(),
1642 shardName, persistenceId(), getSelf()) {
1644 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1645 doRemoveShardReplicaAsync(response.getPrimaryPath());
1649 public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
1650 doRemoveShardReplicaAsync(response.getPrimaryPath());
1653 private void doRemoveShardReplicaAsync(final String primaryPath) {
1654 getSelf().tell((RunnableMessage) () -> removePrefixShardReplica(message, getShardName(),
1655 primaryPath, getSender()), getTargetActor());
1660 private void persistShardList() {
1661 List<String> shardList = new ArrayList<>(localShards.keySet());
1662 for (ShardInformation shardInfo : localShards.values()) {
1663 if (!shardInfo.isActiveMember()) {
1664 shardList.remove(shardInfo.getShardName());
1667 LOG.debug("{}: persisting the shard list {}", persistenceId(), shardList);
1668 saveSnapshot(updateShardManagerSnapshot(shardList, configuration.getAllPrefixShardConfigurations()));
1671 private ShardManagerSnapshot updateShardManagerSnapshot(
1672 final List<String> shardList,
1673 final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> allPrefixShardConfigurations) {
1674 currentSnapshot = new ShardManagerSnapshot(shardList, allPrefixShardConfigurations);
1675 return currentSnapshot;
1678 private void applyShardManagerSnapshot(final ShardManagerSnapshot snapshot) {
1679 currentSnapshot = snapshot;
1681 LOG.debug("{}: onSnapshotOffer: {}", persistenceId(), currentSnapshot);
1683 final MemberName currentMember = cluster.getCurrentMemberName();
1684 Set<String> configuredShardList =
1685 new HashSet<>(configuration.getMemberShardNames(currentMember));
1686 for (String shard : currentSnapshot.getShardList()) {
1687 if (!configuredShardList.contains(shard)) {
1688 // add the current member as a replica for the shard
1689 LOG.debug("{}: adding shard {}", persistenceId(), shard);
1690 configuration.addMemberReplicaForShard(shard, currentMember);
1692 configuredShardList.remove(shard);
1695 for (String shard : configuredShardList) {
1696 // remove the member as a replica for the shard
1697 LOG.debug("{}: removing shard {}", persistenceId(), shard);
1698 configuration.removeMemberReplicaForShard(shard, currentMember);
1702 private void onSaveSnapshotSuccess(final SaveSnapshotSuccess successMessage) {
1703 LOG.debug("{} saved ShardManager snapshot successfully. Deleting the prev snapshot if available",
1705 deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), successMessage.metadata().timestamp() - 1,
1709 private void onChangeShardServersVotingStatus(final ChangeShardMembersVotingStatus changeMembersVotingStatus) {
1710 LOG.debug("{}: onChangeShardServersVotingStatus: {}", persistenceId(), changeMembersVotingStatus);
1712 String shardName = changeMembersVotingStatus.getShardName();
1713 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1714 for (Entry<String, Boolean> e: changeMembersVotingStatus.getMeberVotingStatusMap().entrySet()) {
1715 serverVotingStatusMap.put(getShardIdentifier(MemberName.forName(e.getKey()), shardName).toString(),
1719 ChangeServersVotingStatus changeServersVotingStatus = new ChangeServersVotingStatus(serverVotingStatusMap);
1721 findLocalShard(shardName, getSender(),
1722 localShardFound -> changeShardMembersVotingStatus(changeServersVotingStatus, shardName,
1723 localShardFound.getPath(), getSender()));
1726 private void onFlipShardMembersVotingStatus(final FlipShardMembersVotingStatus flipMembersVotingStatus) {
1727 LOG.debug("{}: onFlipShardMembersVotingStatus: {}", persistenceId(), flipMembersVotingStatus);
1729 ActorRef sender = getSender();
1730 final String shardName = flipMembersVotingStatus.getShardName();
1731 findLocalShard(shardName, sender, localShardFound -> {
1732 Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
1733 Timeout.apply(30, TimeUnit.SECONDS));
1735 future.onComplete(new OnComplete<Object>() {
1737 public void onComplete(final Throwable failure, final Object response) {
1738 if (failure != null) {
1739 sender.tell(new Status.Failure(new RuntimeException(
1740 String.format("Failed to access local shard %s", shardName), failure)), self());
1744 OnDemandRaftState raftState = (OnDemandRaftState) response;
1745 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1746 for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
1747 serverVotingStatusMap.put(e.getKey(), !e.getValue());
1750 serverVotingStatusMap.put(getShardIdentifier(cluster.getCurrentMemberName(), shardName)
1751 .toString(), !raftState.isVoting());
1753 changeShardMembersVotingStatus(new ChangeServersVotingStatus(serverVotingStatusMap),
1754 shardName, localShardFound.getPath(), sender);
1756 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1761 private void findLocalShard(final FindLocalShard message) {
1762 LOG.debug("{}: findLocalShard : {}", persistenceId(), message.getShardName());
1764 final ShardInformation shardInformation = localShards.get(message.getShardName());
1766 if (shardInformation == null) {
1767 LOG.debug("{}: Local shard {} not found - shards present: {}",
1768 persistenceId(), message.getShardName(), localShards.keySet());
1770 getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
1774 sendResponse(shardInformation, message.isWaitUntilInitialized(), false,
1775 () -> new LocalShardFound(shardInformation.getActor()));
1778 private void findLocalShard(final String shardName, final ActorRef sender,
1779 final Consumer<LocalShardFound> onLocalShardFound) {
1780 Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1781 .getShardInitializationTimeout().duration().$times(2));
1783 Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
1784 futureObj.onComplete(new OnComplete<Object>() {
1786 public void onComplete(final Throwable failure, final Object response) {
1787 if (failure != null) {
1788 LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
1790 sender.tell(new Status.Failure(new RuntimeException(
1791 String.format("Failed to find local shard %s", shardName), failure)), self());
1793 if (response instanceof LocalShardFound) {
1794 getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
1796 } else if (response instanceof LocalShardNotFound) {
1797 LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
1798 sender.tell(new Status.Failure(new IllegalArgumentException(
1799 String.format("Local shard %s does not exist", shardName))), self());
1801 LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
1803 sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
1804 : new RuntimeException(
1805 String.format("Failed to find local shard %s: received response: %s", shardName,
1806 response))), self());
1810 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1813 private void changeShardMembersVotingStatus(final ChangeServersVotingStatus changeServersVotingStatus,
1814 final String shardName, final ActorRef shardActorRef, final ActorRef sender) {
1815 if (isShardReplicaOperationInProgress(shardName, sender)) {
1819 shardReplicaOperationsInProgress.add(shardName);
1821 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
1822 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1824 LOG.debug("{}: Sending ChangeServersVotingStatus message {} to local shard {}", persistenceId(),
1825 changeServersVotingStatus, shardActorRef.path());
1827 Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
1828 Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
1830 futureObj.onComplete(new OnComplete<Object>() {
1832 public void onComplete(final Throwable failure, final Object response) {
1833 shardReplicaOperationsInProgress.remove(shardName);
1834 if (failure != null) {
1835 LOG.debug("{}: ChangeServersVotingStatus request to local shard {} failed", persistenceId(),
1836 shardActorRef.path(), failure);
1837 sender.tell(new Status.Failure(new RuntimeException(
1838 String.format("ChangeServersVotingStatus request to local shard %s failed",
1839 shardActorRef.path()), failure)), self());
1841 LOG.debug("{}: Received {} from local shard {}", persistenceId(), response, shardActorRef.path());
1843 ServerChangeReply replyMsg = (ServerChangeReply) response;
1844 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1845 LOG.debug("{}: ChangeServersVotingStatus succeeded for shard {}", persistenceId(), shardName);
1846 sender.tell(new Status.Success(null), getSelf());
1847 } else if (replyMsg.getStatus() == ServerChangeStatus.INVALID_REQUEST) {
1848 sender.tell(new Status.Failure(new IllegalArgumentException(String.format(
1849 "The requested voting state change for shard %s is invalid. At least one member "
1850 + "must be voting", shardId.getShardName()))), getSelf());
1852 LOG.warn("{}: ChangeServersVotingStatus failed for shard {} with status {}",
1853 persistenceId(), shardName, replyMsg.getStatus());
1855 Exception error = getServerChangeException(ChangeServersVotingStatus.class,
1856 replyMsg.getStatus(), shardActorRef.path().toString(), shardId);
1857 sender.tell(new Status.Failure(error), getSelf());
1861 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1864 private static final class ForwardedAddServerReply {
1865 ShardInformation shardInfo;
1866 AddServerReply addServerReply;
1868 boolean removeShardOnFailure;
1870 ForwardedAddServerReply(final ShardInformation shardInfo, final AddServerReply addServerReply,
1871 final String leaderPath, final boolean removeShardOnFailure) {
1872 this.shardInfo = shardInfo;
1873 this.addServerReply = addServerReply;
1874 this.leaderPath = leaderPath;
1875 this.removeShardOnFailure = removeShardOnFailure;
1879 private static final class ForwardedAddServerFailure {
1881 String failureMessage;
1883 boolean removeShardOnFailure;
1885 ForwardedAddServerFailure(final String shardName, final String failureMessage, final Throwable failure,
1886 final boolean removeShardOnFailure) {
1887 this.shardName = shardName;
1888 this.failureMessage = failureMessage;
1889 this.failure = failure;
1890 this.removeShardOnFailure = removeShardOnFailure;
1894 static class OnShardInitialized {
1895 private final Runnable replyRunnable;
1896 private Cancellable timeoutSchedule;
1898 OnShardInitialized(final Runnable replyRunnable) {
1899 this.replyRunnable = replyRunnable;
1902 Runnable getReplyRunnable() {
1903 return replyRunnable;
1906 Cancellable getTimeoutSchedule() {
1907 return timeoutSchedule;
1910 void setTimeoutSchedule(final Cancellable timeoutSchedule) {
1911 this.timeoutSchedule = timeoutSchedule;
1915 static class OnShardReady extends OnShardInitialized {
1916 OnShardReady(final Runnable replyRunnable) {
1917 super(replyRunnable);
1921 private interface RunnableMessage extends Runnable {
1925 * The FindPrimaryResponseHandler provides specific callback methods which are invoked when a response to the
1926 * a remote or local find primary message is processed.
1928 private interface FindPrimaryResponseHandler {
1930 * Invoked when a Failure message is received as a response.
1932 * @param failure the failure exception
1934 void onFailure(Throwable failure);
1937 * Invoked when a RemotePrimaryShardFound response is received.
1939 * @param response the response
1941 void onRemotePrimaryShardFound(RemotePrimaryShardFound response);
1944 * Invoked when a LocalPrimaryShardFound response is received.
1946 * @param response the response
1948 void onLocalPrimaryFound(LocalPrimaryShardFound response);
1951 * Invoked when an unknown response is received. This is another type of failure.
1953 * @param response the response
1955 void onUnknownResponse(Object response);
1959 * The AutoFindPrimaryFailureResponseHandler automatically processes Failure responses when finding a primary
1960 * replica and sends a wrapped Failure response to some targetActor.
1962 private abstract static class AutoFindPrimaryFailureResponseHandler implements FindPrimaryResponseHandler {
1963 private final ActorRef targetActor;
1964 private final String shardName;
1965 private final String persistenceId;
1966 private final ActorRef shardManagerActor;
1969 * Constructs an instance.
1971 * @param targetActor The actor to whom the Failure response should be sent when a FindPrimary failure occurs
1972 * @param shardName The name of the shard for which the primary replica had to be found
1973 * @param persistenceId The persistenceId for the ShardManager
1974 * @param shardManagerActor The ShardManager actor which triggered the call to FindPrimary
1976 protected AutoFindPrimaryFailureResponseHandler(final ActorRef targetActor, final String shardName,
1977 final String persistenceId, final ActorRef shardManagerActor) {
1978 this.targetActor = requireNonNull(targetActor);
1979 this.shardName = requireNonNull(shardName);
1980 this.persistenceId = requireNonNull(persistenceId);
1981 this.shardManagerActor = requireNonNull(shardManagerActor);
1984 public ActorRef getTargetActor() {
1988 public String getShardName() {
1993 public void onFailure(final Throwable failure) {
1994 LOG.debug("{}: Received failure from FindPrimary for shard {}", persistenceId, shardName, failure);
1995 targetActor.tell(new Status.Failure(new RuntimeException(
1996 String.format("Failed to find leader for shard %s", shardName), failure)), shardManagerActor);
2000 public void onUnknownResponse(final Object response) {
2001 LOG.debug("{}: Failed to find leader for shard {}: received response: {}", persistenceId, shardName,
2003 targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
2004 : new RuntimeException(String.format("Failed to find leader for shard %s: received response: %s",
2005 shardName, response))), shardManagerActor);
2010 * The WrappedShardResponse class wraps a response from a Shard.
2012 private static final class WrappedShardResponse {
2013 private final ShardIdentifier shardId;
2014 private final Object response;
2015 private final String leaderPath;
2017 WrappedShardResponse(final ShardIdentifier shardId, final Object response, final String leaderPath) {
2018 this.shardId = shardId;
2019 this.response = response;
2020 this.leaderPath = leaderPath;
2023 ShardIdentifier getShardId() {
2027 Object getResponse() {
2031 String getLeaderPath() {
2036 private static final class ShardNotInitializedTimeout {
2037 private final ActorRef sender;
2038 private final ShardInformation shardInfo;
2039 private final OnShardInitialized onShardInitialized;
2041 ShardNotInitializedTimeout(final ShardInformation shardInfo, final OnShardInitialized onShardInitialized,
2042 final ActorRef sender) {
2043 this.sender = sender;
2044 this.shardInfo = shardInfo;
2045 this.onShardInitialized = onShardInitialized;
2048 ActorRef getSender() {
2052 ShardInformation getShardInfo() {
2056 OnShardInitialized getOnShardInitialized() {
2057 return onShardInitialized;