2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore.shardmanager;
11 import static akka.pattern.Patterns.ask;
13 import akka.actor.ActorRef;
14 import akka.actor.Address;
15 import akka.actor.Cancellable;
16 import akka.actor.OneForOneStrategy;
17 import akka.actor.PoisonPill;
18 import akka.actor.Status;
19 import akka.actor.SupervisorStrategy;
20 import akka.actor.SupervisorStrategy.Directive;
21 import akka.cluster.ClusterEvent;
22 import akka.cluster.ClusterEvent.MemberWeaklyUp;
23 import akka.cluster.Member;
24 import akka.dispatch.Futures;
25 import akka.dispatch.OnComplete;
26 import akka.japi.Function;
27 import akka.pattern.Patterns;
28 import akka.persistence.RecoveryCompleted;
29 import akka.persistence.SaveSnapshotFailure;
30 import akka.persistence.SaveSnapshotSuccess;
31 import akka.persistence.SnapshotOffer;
32 import akka.persistence.SnapshotSelectionCriteria;
33 import akka.util.Timeout;
34 import com.google.common.annotations.VisibleForTesting;
35 import com.google.common.base.Preconditions;
36 import java.io.ByteArrayInputStream;
37 import java.io.IOException;
38 import java.io.ObjectInputStream;
39 import java.util.ArrayList;
40 import java.util.Collection;
41 import java.util.Collections;
42 import java.util.HashMap;
43 import java.util.HashSet;
44 import java.util.List;
46 import java.util.Map.Entry;
48 import java.util.concurrent.CountDownLatch;
49 import java.util.concurrent.TimeUnit;
50 import java.util.concurrent.TimeoutException;
51 import java.util.function.Consumer;
52 import java.util.function.Supplier;
53 import org.apache.commons.lang3.SerializationUtils;
54 import org.opendaylight.controller.cluster.access.concepts.MemberName;
55 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
56 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
57 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
58 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
59 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
60 import org.opendaylight.controller.cluster.datastore.Shard;
61 import org.opendaylight.controller.cluster.datastore.config.Configuration;
62 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
63 import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
64 import org.opendaylight.controller.cluster.datastore.exceptions.AlreadyExistsException;
65 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
66 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
67 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
68 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
69 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
70 import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
71 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
72 import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
73 import org.opendaylight.controller.cluster.datastore.messages.CreatePrefixedShard;
74 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
75 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
76 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
77 import org.opendaylight.controller.cluster.datastore.messages.FlipShardMembersVotingStatus;
78 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
79 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
80 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
81 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
82 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
83 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
84 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
85 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
86 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
87 import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
88 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
89 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
90 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
91 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
92 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
93 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
94 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
95 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
96 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
97 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
98 import org.opendaylight.controller.cluster.raft.messages.AddServer;
99 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
100 import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
101 import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
102 import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
103 import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
104 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
105 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
106 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
107 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
108 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
109 import org.slf4j.Logger;
110 import org.slf4j.LoggerFactory;
111 import scala.concurrent.ExecutionContext;
112 import scala.concurrent.Future;
113 import scala.concurrent.duration.Duration;
114 import scala.concurrent.duration.FiniteDuration;
117 * Manages the shards for a data store. The ShardManager has the following jobs:
119 * <li> Create all the local shard replicas that belong on this cluster member
120 * <li> Find the address of the local shard
121 * <li> Find the primary replica for any given shard
122 * <li> Monitor the cluster members and store their addresses
125 class ShardManager extends AbstractUntypedPersistentActorWithMetering {
126 private static final Logger LOG = LoggerFactory.getLogger(ShardManager.class);
128 // Stores a mapping between a shard name and it's corresponding information
129 // Shard names look like inventory, topology etc and are as specified in
131 private final Map<String, ShardInformation> localShards = new HashMap<>();
133 // The type of a ShardManager reflects the type of the datastore itself
134 // A data store could be of type config/operational
135 private final String type;
137 private final ClusterWrapper cluster;
139 private final Configuration configuration;
141 private final String shardDispatcherPath;
143 private final ShardManagerInfo shardManagerMBean;
145 private DatastoreContextFactory datastoreContextFactory;
147 private final CountDownLatch waitTillReadyCountdownLatch;
149 private final PrimaryShardInfoFutureCache primaryShardInfoCache;
151 private final ShardPeerAddressResolver peerAddressResolver;
153 private SchemaContext schemaContext;
155 private DatastoreSnapshot restoreFromSnapshot;
157 private ShardManagerSnapshot currentSnapshot;
159 private final Set<String> shardReplicaOperationsInProgress = new HashSet<>();
161 private final String persistenceId;
163 ShardManager(AbstractShardManagerCreator<?> builder) {
164 this.cluster = builder.getCluster();
165 this.configuration = builder.getConfiguration();
166 this.datastoreContextFactory = builder.getDatastoreContextFactory();
167 this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
168 this.shardDispatcherPath =
169 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
170 this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
171 this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
172 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
174 String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
175 persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
177 peerAddressResolver = new ShardPeerAddressResolver(type, cluster.getCurrentMemberName());
179 // Subscribe this actor to cluster member events
180 cluster.subscribeToMemberEvents(getSelf());
182 shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
183 "shard-manager-" + this.type,
184 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
185 shardManagerMBean.registerMBean();
189 public void postStop() {
190 LOG.info("Stopping ShardManager {}", persistenceId());
192 shardManagerMBean.unregisterMBean();
196 public void handleCommand(Object message) throws Exception {
197 if (message instanceof FindPrimary) {
198 findPrimary((FindPrimary)message);
199 } else if (message instanceof FindLocalShard) {
200 findLocalShard((FindLocalShard) message);
201 } else if (message instanceof UpdateSchemaContext) {
202 updateSchemaContext(message);
203 } else if (message instanceof ActorInitialized) {
204 onActorInitialized(message);
205 } else if (message instanceof ClusterEvent.MemberUp) {
206 memberUp((ClusterEvent.MemberUp) message);
207 } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
208 memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
209 } else if (message instanceof ClusterEvent.MemberExited) {
210 memberExited((ClusterEvent.MemberExited) message);
211 } else if (message instanceof ClusterEvent.MemberRemoved) {
212 memberRemoved((ClusterEvent.MemberRemoved) message);
213 } else if (message instanceof ClusterEvent.UnreachableMember) {
214 memberUnreachable((ClusterEvent.UnreachableMember) message);
215 } else if (message instanceof ClusterEvent.ReachableMember) {
216 memberReachable((ClusterEvent.ReachableMember) message);
217 } else if (message instanceof DatastoreContextFactory) {
218 onDatastoreContextFactory((DatastoreContextFactory) message);
219 } else if (message instanceof RoleChangeNotification) {
220 onRoleChangeNotification((RoleChangeNotification) message);
221 } else if (message instanceof FollowerInitialSyncUpStatus) {
222 onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
223 } else if (message instanceof ShardNotInitializedTimeout) {
224 onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
225 } else if (message instanceof ShardLeaderStateChanged) {
226 onLeaderStateChanged((ShardLeaderStateChanged) message);
227 } else if (message instanceof SwitchShardBehavior) {
228 onSwitchShardBehavior((SwitchShardBehavior) message);
229 } else if (message instanceof CreateShard) {
230 onCreateShard((CreateShard)message);
231 } else if (message instanceof AddShardReplica) {
232 onAddShardReplica((AddShardReplica) message);
233 } else if (message instanceof CreatePrefixedShard) {
234 onCreatePrefixedShard((CreatePrefixedShard) message);
235 } else if (message instanceof AddPrefixShardReplica) {
236 onAddPrefixShardReplica((AddPrefixShardReplica) message);
237 } else if (message instanceof ForwardedAddServerReply) {
238 ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
239 onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
240 msg.removeShardOnFailure);
241 } else if (message instanceof ForwardedAddServerFailure) {
242 ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
243 onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
244 } else if (message instanceof RemoveShardReplica) {
245 onRemoveShardReplica((RemoveShardReplica) message);
246 } else if (message instanceof WrappedShardResponse) {
247 onWrappedShardResponse((WrappedShardResponse) message);
248 } else if (message instanceof GetSnapshot) {
250 } else if (message instanceof ServerRemoved) {
251 onShardReplicaRemoved((ServerRemoved) message);
252 } else if (message instanceof ChangeShardMembersVotingStatus) {
253 onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
254 } else if (message instanceof FlipShardMembersVotingStatus) {
255 onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
256 } else if (message instanceof SaveSnapshotSuccess) {
257 onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
258 } else if (message instanceof SaveSnapshotFailure) {
259 LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
260 ((SaveSnapshotFailure) message).cause());
261 } else if (message instanceof Shutdown) {
263 } else if (message instanceof GetLocalShardIds) {
264 onGetLocalShardIds();
265 } else if (message instanceof RunnableMessage) {
266 ((RunnableMessage)message).run();
268 unknownMessage(message);
272 private void onShutDown() {
273 List<Future<Boolean>> stopFutures = new ArrayList<>(localShards.size());
274 for (ShardInformation info : localShards.values()) {
275 if (info.getActor() != null) {
276 LOG.debug("{}: Issuing gracefulStop to shard {}", persistenceId(), info.getShardId());
278 FiniteDuration duration = info.getDatastoreContext().getShardRaftConfig()
279 .getElectionTimeOutInterval().$times(2);
280 stopFutures.add(Patterns.gracefulStop(info.getActor(), duration, Shutdown.INSTANCE));
284 LOG.info("Shutting down ShardManager {} - waiting on {} shards", persistenceId(), stopFutures.size());
286 ExecutionContext dispatcher = new Dispatchers(context().system().dispatchers())
287 .getDispatcher(Dispatchers.DispatcherType.Client);
288 Future<Iterable<Boolean>> combinedFutures = Futures.sequence(stopFutures, dispatcher);
290 combinedFutures.onComplete(new OnComplete<Iterable<Boolean>>() {
292 public void onComplete(Throwable failure, Iterable<Boolean> results) {
293 LOG.debug("{}: All shards shutdown - sending PoisonPill to self", persistenceId());
295 self().tell(PoisonPill.getInstance(), self());
297 if (failure != null) {
298 LOG.warn("{}: An error occurred attempting to shut down the shards", persistenceId(), failure);
301 for (Boolean result : results) {
308 LOG.warn("{}: {} shards did not shut down gracefully", persistenceId(), nfailed);
315 private void onWrappedShardResponse(WrappedShardResponse message) {
316 if (message.getResponse() instanceof RemoveServerReply) {
317 onRemoveServerReply(getSender(), message.getShardId(), (RemoveServerReply) message.getResponse(),
318 message.getLeaderPath());
322 private void onRemoveServerReply(ActorRef originalSender, ShardIdentifier shardId, RemoveServerReply replyMsg,
324 shardReplicaOperationsInProgress.remove(shardId.getShardName());
326 LOG.debug("{}: Received {} for shard {}", persistenceId(), replyMsg, shardId.getShardName());
328 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
329 LOG.debug("{}: Leader shard successfully removed the replica shard {}", persistenceId(),
330 shardId.getShardName());
331 originalSender.tell(new Status.Success(null), getSelf());
333 LOG.warn("{}: Leader failed to remove shard replica {} with status {}",
334 persistenceId(), shardId, replyMsg.getStatus());
336 Exception failure = getServerChangeException(RemoveServer.class, replyMsg.getStatus(), leaderPath, shardId);
337 originalSender.tell(new Status.Failure(failure), getSelf());
341 private void removeShardReplica(RemoveShardReplica contextMessage, final String shardName, final String primaryPath,
342 final ActorRef sender) {
343 if (isShardReplicaOperationInProgress(shardName, sender)) {
347 shardReplicaOperationsInProgress.add(shardName);
349 final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
351 final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
353 //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
354 LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
355 primaryPath, shardId);
357 Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
358 Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
359 new RemoveServer(shardId.toString()), removeServerTimeout);
361 futureObj.onComplete(new OnComplete<Object>() {
363 public void onComplete(Throwable failure, Object response) {
364 if (failure != null) {
365 shardReplicaOperationsInProgress.remove(shardName);
366 String msg = String.format("RemoveServer request to leader %s for shard %s failed",
367 primaryPath, shardName);
369 LOG.debug("{}: {}", persistenceId(), msg, failure);
372 sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
375 self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
378 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
381 private void onShardReplicaRemoved(ServerRemoved message) {
382 final ShardIdentifier shardId = new ShardIdentifier.Builder().fromShardIdString(message.getServerId()).build();
383 final ShardInformation shardInformation = localShards.remove(shardId.getShardName());
384 if (shardInformation == null) {
385 LOG.debug("{} : Shard replica {} is not present in list", persistenceId(), shardId.toString());
387 } else if (shardInformation.getActor() != null) {
388 LOG.debug("{} : Sending Shutdown to Shard actor {}", persistenceId(), shardInformation.getActor());
389 shardInformation.getActor().tell(Shutdown.INSTANCE, self());
391 LOG.debug("{} : Local Shard replica for shard {} has been removed", persistenceId(), shardId.getShardName());
395 private void onGetSnapshot() {
396 LOG.debug("{}: onGetSnapshot", persistenceId());
398 List<String> notInitialized = null;
399 for (ShardInformation shardInfo : localShards.values()) {
400 if (!shardInfo.isShardInitialized()) {
401 if (notInitialized == null) {
402 notInitialized = new ArrayList<>();
405 notInitialized.add(shardInfo.getShardName());
409 if (notInitialized != null) {
410 getSender().tell(new Status.Failure(new IllegalStateException(String.format(
411 "%d shard(s) %s are not initialized", notInitialized.size(), notInitialized))), getSelf());
415 byte[] shardManagerSnapshot = null;
416 if (currentSnapshot != null) {
417 shardManagerSnapshot = SerializationUtils.serialize(currentSnapshot);
420 ActorRef replyActor = getContext().actorOf(ShardManagerGetSnapshotReplyActor.props(
421 new ArrayList<>(localShards.keySet()), type, shardManagerSnapshot , getSender(), persistenceId(),
422 datastoreContextFactory.getBaseDatastoreContext().getShardInitializationTimeout().duration()));
424 for (ShardInformation shardInfo: localShards.values()) {
425 shardInfo.getActor().tell(GetSnapshot.INSTANCE, replyActor);
429 @SuppressWarnings("checkstyle:IllegalCatch")
430 private void onCreatePrefixedShard(final CreatePrefixedShard createPrefixedShard) {
431 LOG.debug("{}: onCreatePrefixedShard: {}", persistenceId(), createPrefixedShard);
435 final ShardIdentifier shardId = ClusterUtils.getShardIdentifier(cluster.getCurrentMemberName(),
436 createPrefixedShard.getConfig().getPrefix());
437 if (localShards.containsKey(shardId.getShardName())) {
438 LOG.debug("{}: Shard {} already exists", persistenceId(), shardId);
439 reply = new Status.Success(String.format("Shard with name %s already exists", shardId));
441 doCreatePrefixedShard(createPrefixedShard);
442 reply = new Status.Success(null);
444 } catch (final Exception e) {
445 LOG.error("{}: onCreateShard failed", persistenceId(), e);
446 reply = new Status.Failure(e);
449 if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
450 getSender().tell(reply, getSelf());
454 @SuppressWarnings("checkstyle:IllegalCatch")
455 private void onCreateShard(CreateShard createShard) {
456 LOG.debug("{}: onCreateShard: {}", persistenceId(), createShard);
460 String shardName = createShard.getModuleShardConfig().getShardName();
461 if (localShards.containsKey(shardName)) {
462 LOG.debug("{}: Shard {} already exists", persistenceId(), shardName);
463 reply = new Status.Success(String.format("Shard with name %s already exists", shardName));
465 doCreateShard(createShard);
466 reply = new Status.Success(null);
468 } catch (Exception e) {
469 LOG.error("{}: onCreateShard failed", persistenceId(), e);
470 reply = new Status.Failure(e);
473 if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
474 getSender().tell(reply, getSelf());
478 private void doCreatePrefixedShard(final CreatePrefixedShard createPrefixedShard) {
479 final PrefixShardConfiguration config = createPrefixedShard.getConfig();
481 final ShardIdentifier shardId = ClusterUtils.getShardIdentifier(cluster.getCurrentMemberName(),
482 createPrefixedShard.getConfig().getPrefix());
483 final String shardName = shardId.getShardName();
485 configuration.addPrefixShardConfiguration(config);
487 DatastoreContext shardDatastoreContext = createPrefixedShard.getContext();
489 if (shardDatastoreContext == null) {
490 final Builder builder = newShardDatastoreContextBuilder(shardName);
491 builder.logicalStoreType(LogicalDatastoreType.valueOf(config.getPrefix().getDatastoreType().name()))
492 .storeRoot(config.getPrefix().getRootIdentifier());
493 shardDatastoreContext = builder.build();
495 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
496 peerAddressResolver).build();
499 final boolean shardWasInRecoveredSnapshot = currentSnapshot != null
500 && currentSnapshot.getShardList().contains(shardName);
502 final Map<String, String> peerAddresses = Collections.emptyMap();
503 final boolean isActiveMember = true;
504 LOG.debug("{} doCreatePrefixedShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
505 persistenceId(), shardId, peerAddresses, isActiveMember);
507 final ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
508 shardDatastoreContext, createPrefixedShard.getShardBuilder(), peerAddressResolver);
509 info.setActiveMember(isActiveMember);
510 localShards.put(info.getShardName(), info);
512 if (schemaContext != null) {
513 info.setActor(newShardActor(schemaContext, info));
517 private void doCreateShard(final CreateShard createShard) {
518 final ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
519 final String shardName = moduleShardConfig.getShardName();
521 configuration.addModuleShardConfiguration(moduleShardConfig);
523 DatastoreContext shardDatastoreContext = createShard.getDatastoreContext();
524 if (shardDatastoreContext == null) {
525 shardDatastoreContext = newShardDatastoreContext(shardName);
527 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext).shardPeerAddressResolver(
528 peerAddressResolver).build();
531 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
533 boolean shardWasInRecoveredSnapshot = currentSnapshot != null
534 && currentSnapshot.getShardList().contains(shardName);
536 Map<String, String> peerAddresses;
537 boolean isActiveMember;
538 if (shardWasInRecoveredSnapshot || configuration.getMembersFromShardName(shardName)
539 .contains(cluster.getCurrentMemberName())) {
540 peerAddresses = getPeerAddresses(shardName);
541 isActiveMember = true;
543 // The local member is not in the static shard member configuration and the shard did not
544 // previously exist (ie !shardWasInRecoveredSnapshot). In this case we'll create
545 // the shard with no peers and with elections disabled so it stays as follower. A
546 // subsequent AddServer request will be needed to make it an active member.
547 isActiveMember = false;
548 peerAddresses = Collections.emptyMap();
549 shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
550 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
553 LOG.debug("{} doCreateShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
554 persistenceId(), shardId, moduleShardConfig.getShardMemberNames(), peerAddresses,
557 ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
558 shardDatastoreContext, createShard.getShardBuilder(), peerAddressResolver);
559 info.setActiveMember(isActiveMember);
560 localShards.put(info.getShardName(), info);
562 if (schemaContext != null) {
563 info.setActor(newShardActor(schemaContext, info));
567 private DatastoreContext.Builder newShardDatastoreContextBuilder(String shardName) {
568 return DatastoreContext.newBuilderFrom(datastoreContextFactory.getShardDatastoreContext(shardName))
569 .shardPeerAddressResolver(peerAddressResolver);
572 private DatastoreContext newShardDatastoreContext(String shardName) {
573 return newShardDatastoreContextBuilder(shardName).build();
576 private void checkReady() {
577 if (isReadyWithLeaderId()) {
578 LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
579 persistenceId(), type, waitTillReadyCountdownLatch.getCount());
581 waitTillReadyCountdownLatch.countDown();
585 private void onLeaderStateChanged(ShardLeaderStateChanged leaderStateChanged) {
586 LOG.info("{}: Received LeaderStateChanged message: {}", persistenceId(), leaderStateChanged);
588 ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
589 if (shardInformation != null) {
590 shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
591 shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
592 if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
593 primaryShardInfoCache.remove(shardInformation.getShardName());
598 LOG.debug("No shard found with member Id {}", leaderStateChanged.getMemberId());
602 private void onShardNotInitializedTimeout(ShardNotInitializedTimeout message) {
603 ShardInformation shardInfo = message.getShardInfo();
605 LOG.debug("{}: Received ShardNotInitializedTimeout message for shard {}", persistenceId(),
606 shardInfo.getShardName());
608 shardInfo.removeOnShardInitialized(message.getOnShardInitialized());
610 if (!shardInfo.isShardInitialized()) {
611 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(), shardInfo.getShardName());
612 message.getSender().tell(createNotInitializedException(shardInfo.getShardId()), getSelf());
614 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
615 message.getSender().tell(createNoShardLeaderException(shardInfo.getShardId()), getSelf());
619 private void onFollowerInitialSyncStatus(FollowerInitialSyncUpStatus status) {
620 LOG.info("{} Received follower initial sync status for {} status sync done {}", persistenceId(),
621 status.getName(), status.isInitialSyncDone());
623 ShardInformation shardInformation = findShardInformation(status.getName());
625 if (shardInformation != null) {
626 shardInformation.setFollowerSyncStatus(status.isInitialSyncDone());
628 shardManagerMBean.setSyncStatus(isInSync());
633 private void onRoleChangeNotification(RoleChangeNotification roleChanged) {
634 LOG.info("{}: Received role changed for {} from {} to {}", persistenceId(), roleChanged.getMemberId(),
635 roleChanged.getOldRole(), roleChanged.getNewRole());
637 ShardInformation shardInformation = findShardInformation(roleChanged.getMemberId());
638 if (shardInformation != null) {
639 shardInformation.setRole(roleChanged.getNewRole());
641 shardManagerMBean.setSyncStatus(isInSync());
646 private ShardInformation findShardInformation(String memberId) {
647 for (ShardInformation info : localShards.values()) {
648 if (info.getShardId().toString().equals(memberId)) {
656 private boolean isReadyWithLeaderId() {
657 boolean isReady = true;
658 for (ShardInformation info : localShards.values()) {
659 if (!info.isShardReadyWithLeaderId()) {
667 private boolean isInSync() {
668 for (ShardInformation info : localShards.values()) {
669 if (!info.isInSync()) {
676 private void onActorInitialized(Object message) {
677 final ActorRef sender = getSender();
679 if (sender == null) {
680 return; //why is a non-actor sending this message? Just ignore.
683 String actorName = sender.path().name();
684 //find shard name from actor name; actor name is stringified shardId
686 final ShardIdentifier shardId;
688 shardId = ShardIdentifier.fromShardIdString(actorName);
689 } catch (IllegalArgumentException e) {
690 LOG.debug("{}: ignoring actor {}", actorName, e);
694 markShardAsInitialized(shardId.getShardName());
697 private void markShardAsInitialized(String shardName) {
698 LOG.debug("{}: Initializing shard [{}]", persistenceId(), shardName);
700 ShardInformation shardInformation = localShards.get(shardName);
701 if (shardInformation != null) {
702 shardInformation.setActorInitialized();
704 shardInformation.getActor().tell(new RegisterRoleChangeListener(), self());
709 protected void handleRecover(Object message) throws Exception {
710 if (message instanceof RecoveryCompleted) {
711 onRecoveryCompleted();
712 } else if (message instanceof SnapshotOffer) {
713 applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
717 @SuppressWarnings("checkstyle:IllegalCatch")
718 private void onRecoveryCompleted() {
719 LOG.info("Recovery complete : {}", persistenceId());
721 // We no longer persist SchemaContext modules so delete all the prior messages from the akka
722 // journal on upgrade from Helium.
723 deleteMessages(lastSequenceNr());
725 if (currentSnapshot == null && restoreFromSnapshot != null
726 && restoreFromSnapshot.getShardManagerSnapshot() != null) {
727 try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(
728 restoreFromSnapshot.getShardManagerSnapshot()))) {
729 ShardManagerSnapshot snapshot = (ShardManagerSnapshot) ois.readObject();
731 LOG.debug("{}: Deserialized restored ShardManagerSnapshot: {}", persistenceId(), snapshot);
733 applyShardManagerSnapshot(snapshot);
734 } catch (ClassNotFoundException | IOException e) {
735 LOG.error("{}: Error deserializing restored ShardManagerSnapshot", persistenceId(), e);
742 private void sendResponse(ShardInformation shardInformation, boolean doWait,
743 boolean wantShardReady, final Supplier<Object> messageSupplier) {
744 if (!shardInformation.isShardInitialized() || wantShardReady && !shardInformation.isShardReadyWithLeaderId()) {
746 final ActorRef sender = getSender();
747 final ActorRef self = self();
749 Runnable replyRunnable = () -> sender.tell(messageSupplier.get(), self);
751 OnShardInitialized onShardInitialized = wantShardReady ? new OnShardReady(replyRunnable) :
752 new OnShardInitialized(replyRunnable);
754 shardInformation.addOnShardInitialized(onShardInitialized);
756 FiniteDuration timeout = shardInformation.getDatastoreContext()
757 .getShardInitializationTimeout().duration();
758 if (shardInformation.isShardInitialized()) {
759 // If the shard is already initialized then we'll wait enough time for the shard to
760 // elect a leader, ie 2 times the election timeout.
761 timeout = FiniteDuration.create(shardInformation.getDatastoreContext().getShardRaftConfig()
762 .getElectionTimeOutInterval().toMillis() * 2, TimeUnit.MILLISECONDS);
765 LOG.debug("{}: Scheduling {} ms timer to wait for shard {}", persistenceId(), timeout.toMillis(),
766 shardInformation.getShardName());
768 Cancellable timeoutSchedule = getContext().system().scheduler().scheduleOnce(
770 new ShardNotInitializedTimeout(shardInformation, onShardInitialized, sender),
771 getContext().dispatcher(), getSelf());
773 onShardInitialized.setTimeoutSchedule(timeoutSchedule);
775 } else if (!shardInformation.isShardInitialized()) {
776 LOG.debug("{}: Returning NotInitializedException for shard {}", persistenceId(),
777 shardInformation.getShardName());
778 getSender().tell(createNotInitializedException(shardInformation.getShardId()), getSelf());
780 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
781 shardInformation.getShardName());
782 getSender().tell(createNoShardLeaderException(shardInformation.getShardId()), getSelf());
788 getSender().tell(messageSupplier.get(), getSelf());
791 private static NoShardLeaderException createNoShardLeaderException(ShardIdentifier shardId) {
792 return new NoShardLeaderException(null, shardId.toString());
795 private static NotInitializedException createNotInitializedException(ShardIdentifier shardId) {
796 return new NotInitializedException(String.format(
797 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
801 static MemberName memberToName(final Member member) {
802 return MemberName.forName(member.roles().iterator().next());
805 private void memberRemoved(ClusterEvent.MemberRemoved message) {
806 MemberName memberName = memberToName(message.member());
808 LOG.info("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
809 message.member().address());
811 peerAddressResolver.removePeerAddress(memberName);
813 for (ShardInformation info : localShards.values()) {
814 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
818 private void memberExited(ClusterEvent.MemberExited message) {
819 MemberName memberName = memberToName(message.member());
821 LOG.info("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
822 message.member().address());
824 peerAddressResolver.removePeerAddress(memberName);
826 for (ShardInformation info : localShards.values()) {
827 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
831 private void memberUp(ClusterEvent.MemberUp message) {
832 MemberName memberName = memberToName(message.member());
834 LOG.info("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
835 message.member().address());
837 memberUp(memberName, message.member().address());
840 private void memberUp(MemberName memberName, Address address) {
841 addPeerAddress(memberName, address);
845 private void memberWeaklyUp(MemberWeaklyUp message) {
846 MemberName memberName = memberToName(message.member());
848 LOG.info("{}: Received MemberWeaklyUp: memberName: {}, address: {}", persistenceId(), memberName,
849 message.member().address());
851 memberUp(memberName, message.member().address());
854 private void addPeerAddress(MemberName memberName, Address address) {
855 peerAddressResolver.addPeerAddress(memberName, address);
857 for (ShardInformation info : localShards.values()) {
858 String shardName = info.getShardName();
859 String peerId = getShardIdentifier(memberName, shardName).toString();
860 info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
862 info.peerUp(memberName, peerId, getSelf());
866 private void memberReachable(ClusterEvent.ReachableMember message) {
867 MemberName memberName = memberToName(message.member());
868 LOG.info("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
870 addPeerAddress(memberName, message.member().address());
872 markMemberAvailable(memberName);
875 private void memberUnreachable(ClusterEvent.UnreachableMember message) {
876 MemberName memberName = memberToName(message.member());
877 LOG.info("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
879 markMemberUnavailable(memberName);
882 private void markMemberUnavailable(final MemberName memberName) {
883 final String memberStr = memberName.getName();
884 for (ShardInformation info : localShards.values()) {
885 String leaderId = info.getLeaderId();
886 // XXX: why are we using String#contains() here?
887 if (leaderId != null && leaderId.contains(memberStr)) {
888 LOG.debug("Marking Leader {} as unavailable.", leaderId);
889 info.setLeaderAvailable(false);
891 primaryShardInfoCache.remove(info.getShardName());
894 info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
898 private void markMemberAvailable(final MemberName memberName) {
899 final String memberStr = memberName.getName();
900 for (ShardInformation info : localShards.values()) {
901 String leaderId = info.getLeaderId();
902 // XXX: why are we using String#contains() here?
903 if (leaderId != null && leaderId.contains(memberStr)) {
904 LOG.debug("Marking Leader {} as available.", leaderId);
905 info.setLeaderAvailable(true);
908 info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
912 private void onDatastoreContextFactory(DatastoreContextFactory factory) {
913 datastoreContextFactory = factory;
914 for (ShardInformation info : localShards.values()) {
915 info.setDatastoreContext(newShardDatastoreContext(info.getShardName()), getSelf());
919 private void onGetLocalShardIds() {
920 final List<String> response = new ArrayList<>(localShards.size());
922 for (ShardInformation info : localShards.values()) {
923 response.add(info.getShardId().toString());
926 getSender().tell(new Status.Success(response), getSelf());
929 private void onSwitchShardBehavior(final SwitchShardBehavior message) {
930 final ShardIdentifier identifier = message.getShardId();
932 if (identifier != null) {
933 final ShardInformation info = localShards.get(identifier.getShardName());
935 getSender().tell(new Status.Failure(
936 new IllegalArgumentException("Shard " + identifier + " is not local")), getSelf());
940 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
942 for (ShardInformation info : localShards.values()) {
943 switchShardBehavior(info, new SwitchBehavior(message.getNewState(), message.getTerm()));
947 getSender().tell(new Status.Success(null), getSelf());
950 private void switchShardBehavior(final ShardInformation info, final SwitchBehavior switchBehavior) {
951 final ActorRef actor = info.getActor();
953 actor.tell(switchBehavior, getSelf());
955 LOG.warn("Could not switch the behavior of shard {} to {} - shard is not yet available",
956 info.getShardName(), switchBehavior.getNewState());
961 * Notifies all the local shards of a change in the schema context.
963 * @param message the message to send
965 private void updateSchemaContext(final Object message) {
966 schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
968 LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getAllModuleIdentifiers().size());
970 for (ShardInformation info : localShards.values()) {
971 if (info.getActor() == null) {
972 LOG.debug("Creating Shard {}", info.getShardId());
973 info.setActor(newShardActor(schemaContext, info));
975 info.getActor().tell(message, getSelf());
981 protected ClusterWrapper getCluster() {
986 protected ActorRef newShardActor(final SchemaContext schemaContext, ShardInformation info) {
987 return getContext().actorOf(info.newProps(schemaContext)
988 .withDispatcher(shardDispatcherPath), info.getShardId().toString());
991 private void findPrimary(FindPrimary message) {
992 LOG.debug("{}: In findPrimary: {}", persistenceId(), message);
994 final String shardName = message.getShardName();
995 final boolean canReturnLocalShardState = !(message instanceof RemoteFindPrimary);
997 // First see if the there is a local replica for the shard
998 final ShardInformation info = localShards.get(shardName);
999 if (info != null && info.isActiveMember()) {
1000 sendResponse(info, message.isWaitUntilReady(), true, () -> {
1001 String primaryPath = info.getSerializedLeaderActor();
1002 Object found = canReturnLocalShardState && info.isLeader()
1003 ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
1004 new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
1006 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
1013 final Collection<String> visitedAddresses;
1014 if (message instanceof RemoteFindPrimary) {
1015 visitedAddresses = ((RemoteFindPrimary)message).getVisitedAddresses();
1017 visitedAddresses = new ArrayList<>(1);
1020 visitedAddresses.add(peerAddressResolver.getShardManagerActorPathBuilder(cluster.getSelfAddress()).toString());
1022 for (String address: peerAddressResolver.getShardManagerPeerActorAddresses()) {
1023 if (visitedAddresses.contains(address)) {
1027 LOG.debug("{}: findPrimary for {} forwarding to remote ShardManager {}, visitedAddresses: {}",
1028 persistenceId(), shardName, address, visitedAddresses);
1030 getContext().actorSelection(address).forward(new RemoteFindPrimary(shardName,
1031 message.isWaitUntilReady(), visitedAddresses), getContext());
1035 LOG.debug("{}: No shard found for {}", persistenceId(), shardName);
1037 getSender().tell(new PrimaryNotFoundException(
1038 String.format("No primary shard found for %s.", shardName)), getSelf());
1041 private void findPrimary(final String shardName, final FindPrimaryResponseHandler handler) {
1042 Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1043 .getShardInitializationTimeout().duration().$times(2));
1045 Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
1046 futureObj.onComplete(new OnComplete<Object>() {
1048 public void onComplete(Throwable failure, Object response) {
1049 if (failure != null) {
1050 handler.onFailure(failure);
1052 if (response instanceof RemotePrimaryShardFound) {
1053 handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
1054 } else if (response instanceof LocalPrimaryShardFound) {
1055 handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
1057 handler.onUnknownResponse(response);
1061 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1065 * Construct the name of the shard actor given the name of the member on
1066 * which the shard resides and the name of the shard.
1068 * @param memberName the member name
1069 * @param shardName the shard name
1072 private ShardIdentifier getShardIdentifier(MemberName memberName, String shardName) {
1073 return peerAddressResolver.getShardIdentifier(memberName, shardName);
1077 * Create shards that are local to the member on which the ShardManager runs.
1079 private void createLocalShards() {
1080 MemberName memberName = this.cluster.getCurrentMemberName();
1081 Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
1083 Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
1084 if (restoreFromSnapshot != null) {
1085 for (DatastoreSnapshot.ShardSnapshot snapshot: restoreFromSnapshot.getShardSnapshots()) {
1086 shardSnapshots.put(snapshot.getName(), snapshot);
1090 restoreFromSnapshot = null; // null out to GC
1092 for (String shardName : memberShardNames) {
1093 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1095 LOG.debug("{}: Creating local shard: {}", persistenceId(), shardId);
1097 Map<String, String> peerAddresses = getPeerAddresses(shardName);
1098 localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses,
1099 newShardDatastoreContext(shardName), Shard.builder().restoreFromSnapshot(
1100 shardSnapshots.get(shardName)), peerAddressResolver));
1105 * Given the name of the shard find the addresses of all it's peers.
1107 * @param shardName the shard name
1109 private Map<String, String> getPeerAddresses(String shardName) {
1110 Collection<MemberName> members = configuration.getMembersFromShardName(shardName);
1111 Map<String, String> peerAddresses = new HashMap<>();
1113 MemberName currentMemberName = this.cluster.getCurrentMemberName();
1115 for (MemberName memberName : members) {
1116 if (!currentMemberName.equals(memberName)) {
1117 ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
1118 String address = peerAddressResolver.getShardActorAddress(shardName, memberName);
1119 peerAddresses.put(shardId.toString(), address);
1122 return peerAddresses;
1126 public SupervisorStrategy supervisorStrategy() {
1128 return new OneForOneStrategy(10, Duration.create("1 minute"),
1129 (Function<Throwable, Directive>) t -> {
1130 LOG.warn("Supervisor Strategy caught unexpected exception - resuming", t);
1131 return SupervisorStrategy.resume();
1136 public String persistenceId() {
1137 return persistenceId;
1141 ShardManagerInfoMBean getMBean() {
1142 return shardManagerMBean;
1145 private boolean isShardReplicaOperationInProgress(final String shardName, final ActorRef sender) {
1146 if (shardReplicaOperationsInProgress.contains(shardName)) {
1147 String msg = String.format("A shard replica operation for %s is already in progress", shardName);
1148 LOG.debug("{}: {}", persistenceId(), msg);
1149 sender.tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1157 // With this message the shard does NOT have to be preconfigured
1158 // do a dynamic lookup if the shard exists somewhere and replicate
1159 private void onAddPrefixShardReplica(final AddPrefixShardReplica shardReplicaMsg) {
1160 final String shardName = ClusterUtils.getCleanShardName(shardReplicaMsg.getPrefix());
1162 LOG.debug("{}: onAddPrefixShardReplica: {}", persistenceId(), shardReplicaMsg);
1164 if (schemaContext == null) {
1165 final String msg = String.format(
1166 "No SchemaContext is available in order to create a local shard instance for %s", shardName);
1167 LOG.debug("{}: {}", persistenceId(), msg);
1168 getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1172 findPrimary(shardName,
1173 new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(), getSelf()) {
1175 public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
1176 getSelf().tell((RunnableMessage) () -> addShard(getShardName(), response, getSender()),
1181 public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
1182 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1188 private void onAddShardReplica(final AddShardReplica shardReplicaMsg) {
1189 final String shardName = shardReplicaMsg.getShardName();
1191 LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
1193 // verify the shard with the specified name is present in the cluster configuration
1194 if (!this.configuration.isShardConfigured(shardName)) {
1195 String msg = String.format("No module configuration exists for shard %s", shardName);
1196 LOG.debug("{}: {}", persistenceId(), msg);
1197 getSender().tell(new Status.Failure(new IllegalArgumentException(msg)), getSelf());
1201 // Create the localShard
1202 if (schemaContext == null) {
1203 String msg = String.format(
1204 "No SchemaContext is available in order to create a local shard instance for %s", shardName);
1205 LOG.debug("{}: {}", persistenceId(), msg);
1206 getSender().tell(new Status.Failure(new IllegalStateException(msg)), getSelf());
1210 findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
1213 public void onRemotePrimaryShardFound(RemotePrimaryShardFound response) {
1214 getSelf().tell((RunnableMessage) () -> addShard(getShardName(), response, getSender()),
1219 public void onLocalPrimaryFound(LocalPrimaryShardFound message) {
1220 sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
1226 private void sendLocalReplicaAlreadyExistsReply(String shardName, ActorRef sender) {
1227 String msg = String.format("Local shard %s already exists", shardName);
1228 LOG.debug("{}: {}", persistenceId(), msg);
1229 sender.tell(new Status.Failure(new AlreadyExistsException(msg)), getSelf());
1232 private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
1233 if (isShardReplicaOperationInProgress(shardName, sender)) {
1237 shardReplicaOperationsInProgress.add(shardName);
1239 final ShardInformation shardInfo;
1240 final boolean removeShardOnFailure;
1241 ShardInformation existingShardInfo = localShards.get(shardName);
1242 if (existingShardInfo == null) {
1243 removeShardOnFailure = true;
1244 ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1246 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName)
1247 .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
1249 shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
1250 Shard.builder(), peerAddressResolver);
1251 shardInfo.setActiveMember(false);
1252 localShards.put(shardName, shardInfo);
1253 shardInfo.setActor(newShardActor(schemaContext, shardInfo));
1255 removeShardOnFailure = false;
1256 shardInfo = existingShardInfo;
1259 String localShardAddress = peerAddressResolver.getShardActorAddress(shardName, cluster.getCurrentMemberName());
1261 //inform ShardLeader to add this shard as a replica by sending an AddServer message
1262 LOG.debug("{}: Sending AddServer message to peer {} for shard {}", persistenceId(),
1263 response.getPrimaryPath(), shardInfo.getShardId());
1265 Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
1266 .getShardLeaderElectionTimeout().duration());
1267 Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
1268 new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
1270 futureObj.onComplete(new OnComplete<Object>() {
1272 public void onComplete(Throwable failure, Object addServerResponse) {
1273 if (failure != null) {
1274 LOG.debug("{}: AddServer request to {} for {} failed", persistenceId(),
1275 response.getPrimaryPath(), shardName, failure);
1277 String msg = String.format("AddServer request to leader %s for shard %s failed",
1278 response.getPrimaryPath(), shardName);
1279 self().tell(new ForwardedAddServerFailure(shardName, msg, failure, removeShardOnFailure), sender);
1281 self().tell(new ForwardedAddServerReply(shardInfo, (AddServerReply)addServerResponse,
1282 response.getPrimaryPath(), removeShardOnFailure), sender);
1285 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1288 private void onAddServerFailure(String shardName, String message, Throwable failure, ActorRef sender,
1289 boolean removeShardOnFailure) {
1290 shardReplicaOperationsInProgress.remove(shardName);
1292 if (removeShardOnFailure) {
1293 ShardInformation shardInfo = localShards.remove(shardName);
1294 if (shardInfo.getActor() != null) {
1295 shardInfo.getActor().tell(PoisonPill.getInstance(), getSelf());
1299 sender.tell(new Status.Failure(message == null ? failure :
1300 new RuntimeException(message, failure)), getSelf());
1303 private void onAddServerReply(ShardInformation shardInfo, AddServerReply replyMsg, ActorRef sender,
1304 String leaderPath, boolean removeShardOnFailure) {
1305 String shardName = shardInfo.getShardName();
1306 shardReplicaOperationsInProgress.remove(shardName);
1308 LOG.debug("{}: Received {} for shard {} from leader {}", persistenceId(), replyMsg, shardName, leaderPath);
1310 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1311 LOG.debug("{}: Leader shard successfully added the replica shard {}", persistenceId(), shardName);
1313 // Make the local shard voting capable
1314 shardInfo.setDatastoreContext(newShardDatastoreContext(shardName), getSelf());
1315 shardInfo.setActiveMember(true);
1318 sender.tell(new Status.Success(null), getSelf());
1319 } else if (replyMsg.getStatus() == ServerChangeStatus.ALREADY_EXISTS) {
1320 sendLocalReplicaAlreadyExistsReply(shardName, sender);
1322 LOG.warn("{}: Leader failed to add shard replica {} with status {}",
1323 persistenceId(), shardName, replyMsg.getStatus());
1325 Exception failure = getServerChangeException(AddServer.class, replyMsg.getStatus(), leaderPath,
1326 shardInfo.getShardId());
1328 onAddServerFailure(shardName, null, failure, sender, removeShardOnFailure);
1332 private static Exception getServerChangeException(Class<?> serverChange, ServerChangeStatus serverChangeStatus,
1333 String leaderPath, ShardIdentifier shardId) {
1335 switch (serverChangeStatus) {
1337 failure = new TimeoutException(String.format(
1338 "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
1339 + "Possible causes - there was a problem replicating the data or shard leadership changed "
1340 + "while replicating the shard data", leaderPath, shardId.getShardName()));
1343 failure = createNoShardLeaderException(shardId);
1346 failure = new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
1347 serverChange.getSimpleName(), shardId.getShardName()));
1350 failure = new RuntimeException(String.format(
1351 "%s request to leader %s for shard %s failed with status %s",
1352 serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
1357 private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
1358 LOG.debug("{}: onRemoveShardReplica: {}", persistenceId(), shardReplicaMsg);
1360 findPrimary(shardReplicaMsg.getShardName(), new AutoFindPrimaryFailureResponseHandler(getSender(),
1361 shardReplicaMsg.getShardName(), persistenceId(), getSelf()) {
1363 public void onRemotePrimaryShardFound(RemotePrimaryShardFound response) {
1364 doRemoveShardReplicaAsync(response.getPrimaryPath());
1368 public void onLocalPrimaryFound(LocalPrimaryShardFound response) {
1369 doRemoveShardReplicaAsync(response.getPrimaryPath());
1372 private void doRemoveShardReplicaAsync(final String primaryPath) {
1373 getSelf().tell((RunnableMessage) () -> removeShardReplica(shardReplicaMsg, getShardName(),
1374 primaryPath, getSender()), getTargetActor());
1379 private void persistShardList() {
1380 List<String> shardList = new ArrayList<>(localShards.keySet());
1381 for (ShardInformation shardInfo : localShards.values()) {
1382 if (!shardInfo.isActiveMember()) {
1383 shardList.remove(shardInfo.getShardName());
1386 LOG.debug("{}: persisting the shard list {}", persistenceId(), shardList);
1387 saveSnapshot(updateShardManagerSnapshot(shardList));
1390 private ShardManagerSnapshot updateShardManagerSnapshot(List<String> shardList) {
1391 currentSnapshot = new ShardManagerSnapshot(shardList);
1392 return currentSnapshot;
1395 private void applyShardManagerSnapshot(ShardManagerSnapshot snapshot) {
1396 currentSnapshot = snapshot;
1398 LOG.debug("{}: onSnapshotOffer: {}", persistenceId(), currentSnapshot);
1400 final MemberName currentMember = cluster.getCurrentMemberName();
1401 Set<String> configuredShardList =
1402 new HashSet<>(configuration.getMemberShardNames(currentMember));
1403 for (String shard : currentSnapshot.getShardList()) {
1404 if (!configuredShardList.contains(shard)) {
1405 // add the current member as a replica for the shard
1406 LOG.debug("{}: adding shard {}", persistenceId(), shard);
1407 configuration.addMemberReplicaForShard(shard, currentMember);
1409 configuredShardList.remove(shard);
1412 for (String shard : configuredShardList) {
1413 // remove the member as a replica for the shard
1414 LOG.debug("{}: removing shard {}", persistenceId(), shard);
1415 configuration.removeMemberReplicaForShard(shard, currentMember);
1419 private void onSaveSnapshotSuccess(SaveSnapshotSuccess successMessage) {
1420 LOG.debug("{} saved ShardManager snapshot successfully. Deleting the prev snapshot if available",
1422 deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), successMessage.metadata().timestamp() - 1,
1426 private void onChangeShardServersVotingStatus(final ChangeShardMembersVotingStatus changeMembersVotingStatus) {
1427 LOG.debug("{}: onChangeShardServersVotingStatus: {}", persistenceId(), changeMembersVotingStatus);
1429 String shardName = changeMembersVotingStatus.getShardName();
1430 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1431 for (Entry<String, Boolean> e: changeMembersVotingStatus.getMeberVotingStatusMap().entrySet()) {
1432 serverVotingStatusMap.put(getShardIdentifier(MemberName.forName(e.getKey()), shardName).toString(),
1436 ChangeServersVotingStatus changeServersVotingStatus = new ChangeServersVotingStatus(serverVotingStatusMap);
1438 findLocalShard(shardName, getSender(),
1439 localShardFound -> changeShardMembersVotingStatus(changeServersVotingStatus, shardName,
1440 localShardFound.getPath(), getSender()));
1443 private void onFlipShardMembersVotingStatus(FlipShardMembersVotingStatus flipMembersVotingStatus) {
1444 LOG.debug("{}: onFlipShardMembersVotingStatus: {}", persistenceId(), flipMembersVotingStatus);
1446 ActorRef sender = getSender();
1447 final String shardName = flipMembersVotingStatus.getShardName();
1448 findLocalShard(shardName, sender, localShardFound -> {
1449 Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
1450 Timeout.apply(30, TimeUnit.SECONDS));
1452 future.onComplete(new OnComplete<Object>() {
1454 public void onComplete(Throwable failure, Object response) {
1455 if (failure != null) {
1456 sender.tell(new Status.Failure(new RuntimeException(
1457 String.format("Failed to access local shard %s", shardName), failure)), self());
1461 OnDemandRaftState raftState = (OnDemandRaftState) response;
1462 Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
1463 for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
1464 serverVotingStatusMap.put(e.getKey(), !e.getValue());
1467 serverVotingStatusMap.put(getShardIdentifier(cluster.getCurrentMemberName(), shardName)
1468 .toString(), !raftState.isVoting());
1470 changeShardMembersVotingStatus(new ChangeServersVotingStatus(serverVotingStatusMap),
1471 shardName, localShardFound.getPath(), sender);
1473 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1478 private void findLocalShard(FindLocalShard message) {
1479 final ShardInformation shardInformation = localShards.get(message.getShardName());
1481 if (shardInformation == null) {
1482 getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
1486 sendResponse(shardInformation, message.isWaitUntilInitialized(), false,
1487 () -> new LocalShardFound(shardInformation.getActor()));
1490 private void findLocalShard(final String shardName, final ActorRef sender,
1491 final Consumer<LocalShardFound> onLocalShardFound) {
1492 Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
1493 .getShardInitializationTimeout().duration().$times(2));
1495 Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
1496 futureObj.onComplete(new OnComplete<Object>() {
1498 public void onComplete(Throwable failure, Object response) {
1499 if (failure != null) {
1500 LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
1502 sender.tell(new Status.Failure(new RuntimeException(
1503 String.format("Failed to find local shard %s", shardName), failure)), self());
1505 if (response instanceof LocalShardFound) {
1506 getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
1508 } else if (response instanceof LocalShardNotFound) {
1509 String msg = String.format("Local shard %s does not exist", shardName);
1510 LOG.debug("{}: {}", persistenceId, msg);
1511 sender.tell(new Status.Failure(new IllegalArgumentException(msg)), self());
1513 String msg = String.format("Failed to find local shard %s: received response: %s",
1514 shardName, response);
1515 LOG.debug("{}: {}", persistenceId, msg);
1516 sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
1517 new RuntimeException(msg)), self());
1521 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1524 private void changeShardMembersVotingStatus(ChangeServersVotingStatus changeServersVotingStatus,
1525 final String shardName, final ActorRef shardActorRef, final ActorRef sender) {
1526 if (isShardReplicaOperationInProgress(shardName, sender)) {
1530 shardReplicaOperationsInProgress.add(shardName);
1532 DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
1533 final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
1535 LOG.debug("{}: Sending ChangeServersVotingStatus message {} to local shard {}", persistenceId(),
1536 changeServersVotingStatus, shardActorRef.path());
1538 Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
1539 Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
1541 futureObj.onComplete(new OnComplete<Object>() {
1543 public void onComplete(Throwable failure, Object response) {
1544 shardReplicaOperationsInProgress.remove(shardName);
1545 if (failure != null) {
1546 String msg = String.format("ChangeServersVotingStatus request to local shard %s failed",
1547 shardActorRef.path());
1548 LOG.debug("{}: {}", persistenceId(), msg, failure);
1549 sender.tell(new Status.Failure(new RuntimeException(msg, failure)), self());
1551 LOG.debug("{}: Received {} from local shard {}", persistenceId(), response, shardActorRef.path());
1553 ServerChangeReply replyMsg = (ServerChangeReply) response;
1554 if (replyMsg.getStatus() == ServerChangeStatus.OK) {
1555 LOG.debug("{}: ChangeServersVotingStatus succeeded for shard {}", persistenceId(), shardName);
1556 sender.tell(new Status.Success(null), getSelf());
1557 } else if (replyMsg.getStatus() == ServerChangeStatus.INVALID_REQUEST) {
1558 sender.tell(new Status.Failure(new IllegalArgumentException(String.format(
1559 "The requested voting state change for shard %s is invalid. At least one member "
1560 + "must be voting", shardId.getShardName()))), getSelf());
1562 LOG.warn("{}: ChangeServersVotingStatus failed for shard {} with status {}",
1563 persistenceId(), shardName, replyMsg.getStatus());
1565 Exception error = getServerChangeException(ChangeServersVotingStatus.class,
1566 replyMsg.getStatus(), shardActorRef.path().toString(), shardId);
1567 sender.tell(new Status.Failure(error), getSelf());
1571 }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
1574 private static final class ForwardedAddServerReply {
1575 ShardInformation shardInfo;
1576 AddServerReply addServerReply;
1578 boolean removeShardOnFailure;
1580 ForwardedAddServerReply(ShardInformation shardInfo, AddServerReply addServerReply, String leaderPath,
1581 boolean removeShardOnFailure) {
1582 this.shardInfo = shardInfo;
1583 this.addServerReply = addServerReply;
1584 this.leaderPath = leaderPath;
1585 this.removeShardOnFailure = removeShardOnFailure;
1589 private static final class ForwardedAddServerFailure {
1591 String failureMessage;
1593 boolean removeShardOnFailure;
1595 ForwardedAddServerFailure(String shardName, String failureMessage, Throwable failure,
1596 boolean removeShardOnFailure) {
1597 this.shardName = shardName;
1598 this.failureMessage = failureMessage;
1599 this.failure = failure;
1600 this.removeShardOnFailure = removeShardOnFailure;
1604 static class OnShardInitialized {
1605 private final Runnable replyRunnable;
1606 private Cancellable timeoutSchedule;
1608 OnShardInitialized(Runnable replyRunnable) {
1609 this.replyRunnable = replyRunnable;
1612 Runnable getReplyRunnable() {
1613 return replyRunnable;
1616 Cancellable getTimeoutSchedule() {
1617 return timeoutSchedule;
1620 void setTimeoutSchedule(Cancellable timeoutSchedule) {
1621 this.timeoutSchedule = timeoutSchedule;
1625 static class OnShardReady extends OnShardInitialized {
1626 OnShardReady(Runnable replyRunnable) {
1627 super(replyRunnable);
1631 private interface RunnableMessage extends Runnable {
1635 * The FindPrimaryResponseHandler provides specific callback methods which are invoked when a response to the
1636 * a remote or local find primary message is processed.
1638 private interface FindPrimaryResponseHandler {
1640 * Invoked when a Failure message is received as a response.
1642 * @param failure the failure exception
1644 void onFailure(Throwable failure);
1647 * Invoked when a RemotePrimaryShardFound response is received.
1649 * @param response the response
1651 void onRemotePrimaryShardFound(RemotePrimaryShardFound response);
1654 * Invoked when a LocalPrimaryShardFound response is received.
1656 * @param response the response
1658 void onLocalPrimaryFound(LocalPrimaryShardFound response);
1661 * Invoked when an unknown response is received. This is another type of failure.
1663 * @param response the response
1665 void onUnknownResponse(Object response);
1669 * The AutoFindPrimaryFailureResponseHandler automatically processes Failure responses when finding a primary
1670 * replica and sends a wrapped Failure response to some targetActor.
1672 private abstract static class AutoFindPrimaryFailureResponseHandler implements FindPrimaryResponseHandler {
1673 private final ActorRef targetActor;
1674 private final String shardName;
1675 private final String persistenceId;
1676 private final ActorRef shardManagerActor;
1679 * Constructs an instance.
1681 * @param targetActor The actor to whom the Failure response should be sent when a FindPrimary failure occurs
1682 * @param shardName The name of the shard for which the primary replica had to be found
1683 * @param persistenceId The persistenceId for the ShardManager
1684 * @param shardManagerActor The ShardManager actor which triggered the call to FindPrimary
1686 protected AutoFindPrimaryFailureResponseHandler(ActorRef targetActor, String shardName, String persistenceId,
1687 ActorRef shardManagerActor) {
1688 this.targetActor = Preconditions.checkNotNull(targetActor);
1689 this.shardName = Preconditions.checkNotNull(shardName);
1690 this.persistenceId = Preconditions.checkNotNull(persistenceId);
1691 this.shardManagerActor = Preconditions.checkNotNull(shardManagerActor);
1694 public ActorRef getTargetActor() {
1698 public String getShardName() {
1703 public void onFailure(Throwable failure) {
1704 LOG.debug("{}: Received failure from FindPrimary for shard {}", persistenceId, shardName, failure);
1705 targetActor.tell(new Status.Failure(new RuntimeException(
1706 String.format("Failed to find leader for shard %s", shardName), failure)), shardManagerActor);
1710 public void onUnknownResponse(Object response) {
1711 String msg = String.format("Failed to find leader for shard %s: received response: %s",
1712 shardName, response);
1713 LOG.debug("{}: {}", persistenceId, msg);
1714 targetActor.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response :
1715 new RuntimeException(msg)), shardManagerActor);
1720 * The WrappedShardResponse class wraps a response from a Shard.
1722 private static final class WrappedShardResponse {
1723 private final ShardIdentifier shardId;
1724 private final Object response;
1725 private final String leaderPath;
1727 WrappedShardResponse(ShardIdentifier shardId, Object response, String leaderPath) {
1728 this.shardId = shardId;
1729 this.response = response;
1730 this.leaderPath = leaderPath;
1733 ShardIdentifier getShardId() {
1737 Object getResponse() {
1741 String getLeaderPath() {
1746 private static final class ShardNotInitializedTimeout {
1747 private final ActorRef sender;
1748 private final ShardInformation shardInfo;
1749 private final OnShardInitialized onShardInitialized;
1751 ShardNotInitializedTimeout(ShardInformation shardInfo, OnShardInitialized onShardInitialized, ActorRef sender) {
1752 this.sender = sender;
1753 this.shardInfo = shardInfo;
1754 this.onShardInitialized = onShardInitialized;
1757 ActorRef getSender() {
1761 ShardInformation getShardInfo() {
1765 OnShardInitialized getOnShardInitialized() {
1766 return onShardInitialized;