2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.actor.ActorSelection;
17 import akka.actor.Cancellable;
18 import akka.actor.ExtendedActorSystem;
19 import akka.actor.PoisonPill;
20 import akka.actor.Props;
21 import akka.actor.Status;
22 import akka.actor.Status.Failure;
23 import akka.persistence.RecoveryCompleted;
24 import akka.persistence.SnapshotOffer;
25 import akka.serialization.JavaSerializer;
26 import akka.serialization.Serialization;
27 import com.google.common.annotations.VisibleForTesting;
28 import com.google.common.base.Ticker;
29 import com.google.common.collect.ImmutableList;
30 import com.google.common.collect.ImmutableMap;
31 import com.google.common.collect.ImmutableSet;
32 import com.google.common.collect.Range;
33 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
34 import java.io.IOException;
35 import java.util.Arrays;
36 import java.util.Collection;
37 import java.util.Collections;
38 import java.util.HashMap;
40 import java.util.Optional;
41 import java.util.OptionalLong;
42 import java.util.concurrent.TimeUnit;
43 import org.eclipse.jdt.annotation.NonNull;
44 import org.eclipse.jdt.annotation.Nullable;
45 import org.opendaylight.controller.cluster.access.ABIVersion;
46 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
47 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
48 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
49 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
50 import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
51 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
52 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
53 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
54 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
55 import org.opendaylight.controller.cluster.access.concepts.Request;
56 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
57 import org.opendaylight.controller.cluster.access.concepts.RequestException;
58 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
59 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
60 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
61 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
62 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
63 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
64 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
65 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
66 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
67 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
68 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
69 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
70 import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor;
71 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
72 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
73 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
74 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
75 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
76 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
77 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
78 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
79 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
80 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
81 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
82 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
83 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
84 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
85 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
86 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
87 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
88 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
89 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
90 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
91 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
92 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
93 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
94 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
95 import org.opendaylight.controller.cluster.datastore.persisted.DisableTrackingPayload;
96 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
97 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
98 import org.opendaylight.controller.cluster.messaging.SliceOptions;
99 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
100 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
101 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
102 import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
103 import org.opendaylight.controller.cluster.raft.RaftActor;
104 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
105 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
106 import org.opendaylight.controller.cluster.raft.RaftState;
107 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
108 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
109 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
110 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
111 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
112 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
113 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
114 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
115 import org.opendaylight.yangtools.concepts.Identifier;
116 import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
117 import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
118 import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
119 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
120 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
121 import scala.concurrent.duration.FiniteDuration;
124 * A Shard represents a portion of the logical data tree.
127 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
129 // FIXME: non-final for testing?
130 public class Shard extends RaftActor {
133 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
135 public String toString() {
136 return "txCommitTimeoutCheck";
141 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
143 public String toString() {
144 return "getShardMBeanMessage";
148 static final Object RESUME_NEXT_PENDING_TRANSACTION = new Object() {
150 public String toString() {
151 return "resumeNextPendingTransaction";
155 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
156 public static final String DEFAULT_NAME = "default";
158 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
160 // Make sure to keep this in sync with the journal configuration in factory-akka.conf
161 public static final String NON_PERSISTENT_JOURNAL_ID = "akka.persistence.non-persistent.journal";
164 final ABIVersion[] values = ABIVersion.values();
165 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
166 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
169 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
170 private static final int CLIENT_MAX_MESSAGES = 1000;
172 // The state of this Shard
173 private final ShardDataTree store;
175 /// The name of this shard
176 private final String name;
178 private final String shardName;
180 private final ShardStats shardMBean;
182 private final ShardDataTreeListenerInfoMXBeanImpl listenerInfoMXBean;
184 private DatastoreContext datastoreContext;
186 private final ShardCommitCoordinator commitCoordinator;
188 private long transactionCommitTimeout;
190 private Cancellable txCommitTimeoutCheckSchedule;
192 private final Optional<ActorRef> roleChangeNotifier;
194 private final MessageTracker appendEntriesReplyTracker;
196 private final ShardTransactionActorFactory transactionActorFactory;
198 private final ShardSnapshotCohort snapshotCohort;
200 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
202 private ShardSnapshot restoreFromSnapshot;
204 private final ShardTransactionMessageRetrySupport messageRetrySupport;
207 final FrontendMetadata frontendMetadata;
209 private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
210 private boolean paused;
212 private final MessageSlicer responseMessageSlicer;
213 private final Dispatchers dispatchers;
215 private final MessageAssembler requestMessageAssembler;
217 private final ExportOnRecovery exportOnRecovery;
219 private final ActorRef exportActor;
221 @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
222 Shard(final AbstractBuilder<?, ?> builder) {
223 super(builder.getId().toString(), builder.getPeerAddresses(),
224 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
226 name = builder.getId().toString();
227 shardName = builder.getId().getShardName();
228 datastoreContext = builder.getDatastoreContext();
229 restoreFromSnapshot = builder.getRestoreFromSnapshot();
230 frontendMetadata = new FrontendMetadata(name);
231 exportOnRecovery = datastoreContext.getExportOnRecovery();
233 switch (exportOnRecovery) {
235 exportActor = getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
236 datastoreContext.getRecoveryExportBaseDir()));
244 setPersistence(datastoreContext.isPersistent());
246 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
248 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
249 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
250 if (builder.getDataTree() != null) {
251 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
252 treeChangeListenerPublisher, name,
255 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
256 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name,
260 shardMBean = ShardStats.create(name, datastoreContext.getDataStoreMXBeanType(), this);
262 if (isMetricsCaptureEnabled()) {
263 getContext().become(new MeteringBehavior(this));
266 commitCoordinator = new ShardCommitCoordinator(store, LOG, name);
268 setTransactionCommitTimeout();
270 // create a notifier actor for each cluster member
271 roleChangeNotifier = createRoleChangeNotifier(name);
273 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
274 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
276 dispatchers = new Dispatchers(context().system().dispatchers());
277 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
278 dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction),
279 self(), getContext(), shardMBean, builder.getId().getShardName());
281 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
282 name, datastoreContext);
284 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
286 responseMessageSlicer = MessageSlicer.builder().logContext(name)
287 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
288 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
289 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
291 requestMessageAssembler = MessageAssembler.builder().logContext(name)
292 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
293 .assembledMessageCallback((message, sender) -> self().tell(message, sender))
294 .expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
296 listenerInfoMXBean = new ShardDataTreeListenerInfoMXBeanImpl(name, datastoreContext.getDataStoreMXBeanType(),
298 listenerInfoMXBean.register();
301 private void setTransactionCommitTimeout() {
302 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
303 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
306 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
307 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
308 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
309 return Optional.of(shardRoleChangeNotifier);
313 public final void postStop() throws Exception {
314 LOG.info("Stopping Shard {}", persistenceId());
318 messageRetrySupport.close();
320 if (txCommitTimeoutCheckSchedule != null) {
321 txCommitTimeoutCheckSchedule.cancel();
324 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
326 shardMBean.unregisterMBean();
327 listenerInfoMXBean.unregister();
331 protected final void handleRecover(final Object message) {
332 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
335 super.handleRecover(message);
337 switch (exportOnRecovery) {
339 if (message instanceof SnapshotOffer) {
340 exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().get(), name),
341 ActorRef.noSender());
342 } else if (message instanceof ReplicatedLogEntry) {
343 exportActor.tell(new JsonExportActor.ExportJournal((ReplicatedLogEntry) message),
344 ActorRef.noSender());
345 } else if (message instanceof RecoveryCompleted) {
346 exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender());
347 exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
355 if (LOG.isTraceEnabled()) {
356 appendEntriesReplyTracker.begin();
361 // non-final for TestShard
362 protected void handleNonRaftCommand(final Object message) {
363 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
364 final Optional<Error> maybeError = context.error();
365 if (maybeError.isPresent()) {
366 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
370 store.resetTransactionBatch();
372 if (message instanceof RequestEnvelope) {
373 handleRequestEnvelope((RequestEnvelope)message);
374 } else if (MessageAssembler.isHandledMessage(message)) {
375 handleRequestAssemblerMessage(message);
376 } else if (message instanceof ConnectClientRequest) {
377 handleConnectClient((ConnectClientRequest)message);
378 } else if (CreateTransaction.isSerializedType(message)) {
379 handleCreateTransaction(message);
380 } else if (message instanceof BatchedModifications) {
381 handleBatchedModifications((BatchedModifications)message);
382 } else if (message instanceof ForwardedReadyTransaction) {
383 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
384 } else if (message instanceof ReadyLocalTransaction) {
385 handleReadyLocalTransaction((ReadyLocalTransaction)message);
386 } else if (CanCommitTransaction.isSerializedType(message)) {
387 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
388 } else if (CommitTransaction.isSerializedType(message)) {
389 handleCommitTransaction(CommitTransaction.fromSerializable(message));
390 } else if (AbortTransaction.isSerializedType(message)) {
391 handleAbortTransaction(AbortTransaction.fromSerializable(message));
392 } else if (CloseTransactionChain.isSerializedType(message)) {
393 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
394 } else if (message instanceof DataTreeChangedReply) {
396 } else if (message instanceof RegisterDataTreeChangeListener) {
397 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
398 } else if (message instanceof UpdateSchemaContext) {
399 updateSchemaContext((UpdateSchemaContext) message);
400 } else if (message instanceof PeerAddressResolved) {
401 PeerAddressResolved resolved = (PeerAddressResolved) message;
402 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
403 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
404 commitTimeoutCheck();
405 } else if (message instanceof DatastoreContext) {
406 onDatastoreContext((DatastoreContext)message);
407 } else if (message instanceof RegisterRoleChangeListener) {
408 roleChangeNotifier.get().forward(message, context());
409 } else if (message instanceof FollowerInitialSyncUpStatus) {
410 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
411 context().parent().tell(message, self());
412 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
413 sender().tell(getShardMBean(), self());
414 } else if (message instanceof GetShardDataTree) {
415 sender().tell(store.getDataTree(), self());
416 } else if (message instanceof ServerRemoved) {
417 context().parent().forward(message, context());
418 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
419 messageRetrySupport.onTimerMessage(message);
420 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
421 store.processCohortRegistryCommand(getSender(),
422 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
423 } else if (message instanceof MakeLeaderLocal) {
425 } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
426 store.resumeNextPendingTransaction();
427 } else if (GetKnownClients.INSTANCE.equals(message)) {
428 handleGetKnownClients();
429 } else if (!responseMessageSlicer.handleMessage(message)) {
430 super.handleNonRaftCommand(message);
435 private void handleRequestAssemblerMessage(final Object message) {
436 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() -> {
437 JavaSerializer.currentSystem().value_$eq((ExtendedActorSystem) context().system());
438 requestMessageAssembler.handleMessage(message, self());
442 @SuppressWarnings("checkstyle:IllegalCatch")
443 private void handleRequestEnvelope(final RequestEnvelope envelope) {
444 final long now = ticker().read();
446 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
447 if (success != null) {
448 final long executionTimeNanos = ticker().read() - now;
449 if (success instanceof SliceableMessage) {
450 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() ->
451 responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
452 .message(envelope.newSuccessEnvelope(success, executionTimeNanos))
453 .sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
454 .onFailureCallback(t -> LOG.warn("Error slicing response {}", success, t)).build()));
456 envelope.sendSuccess(success, executionTimeNanos);
459 } catch (RequestException e) {
460 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
461 envelope.sendFailure(e, ticker().read() - now);
462 } catch (Exception e) {
463 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
464 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
465 ticker().read() - now);
469 private void commitTimeoutCheck() {
470 store.checkForExpiredTransactions(transactionCommitTimeout, this::updateAccess);
471 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
472 requestMessageAssembler.checkExpiredAssembledMessageState();
475 private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
476 final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
477 final LeaderFrontendState state = knownFrontends.get(frontend);
479 // Not tell-based protocol, do nothing
480 return OptionalLong.empty();
483 if (isIsolatedLeader()) {
484 // We are isolated and no new request can come through until we emerge from it. We are still updating
485 // liveness of frontend when we see it attempting to communicate. Use the last access timer.
486 return OptionalLong.of(state.getLastSeenTicks());
489 // If this frontend has freshly connected, give it some time to catch up before killing its transactions.
490 return OptionalLong.of(state.getLastConnectTicks());
493 private void disableTracking(final DisableTrackingPayload payload) {
494 final ClientIdentifier clientId = payload.getIdentifier();
495 LOG.debug("{}: disabling tracking of {}", persistenceId(), clientId);
496 frontendMetadata.disableTracking(clientId);
499 final FrontendIdentifier frontendId = clientId.getFrontendId();
500 final LeaderFrontendState frontend = knownFrontends.get(frontendId);
501 if (frontend != null) {
502 if (clientId.equals(frontend.getIdentifier())) {
503 if (!(frontend instanceof LeaderFrontendState.Disabled)) {
504 verify(knownFrontends.replace(frontendId, frontend,
505 new LeaderFrontendState.Disabled(persistenceId(), clientId, store)));
506 LOG.debug("{}: leader state for {} disabled", persistenceId(), clientId);
508 LOG.debug("{}: leader state {} is already disabled", persistenceId(), frontend);
511 LOG.debug("{}: leader state {} does not match {}", persistenceId(), frontend, clientId);
514 LOG.debug("{}: leader state for {} not found", persistenceId(), clientId);
515 knownFrontends.put(frontendId, new LeaderFrontendState.Disabled(persistenceId(), clientId,
521 private void onMakeLeaderLocal() {
522 LOG.debug("{}: onMakeLeaderLocal received", persistenceId());
524 getSender().tell(new Status.Success(null), getSelf());
528 final ActorSelection leader = getLeader();
530 if (leader == null) {
531 // Leader is not present. The cluster is most likely trying to
532 // elect a leader and we should let that run its normal course
534 // TODO we can wait for the election to complete and retry the
535 // request. We can also let the caller retry by sending a flag
536 // in the response indicating the request is "reTryable".
537 getSender().tell(new Failure(
538 new LeadershipTransferFailedException("We cannot initiate leadership transfer to local node. "
539 + "Currently there is no leader for " + persistenceId())),
544 leader.tell(new RequestLeadership(getId(), getSender()), getSelf());
547 // Acquire our frontend tracking handle and verify generation matches
548 private @Nullable LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
549 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
550 if (existing != null) {
551 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
557 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
558 throw new RetiredGenerationException(clientId.getGeneration(),
559 existing.getIdentifier().getGeneration());
562 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
564 knownFrontends.remove(clientId.getFrontendId());
566 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
572 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
573 final LeaderFrontendState ret = findFrontend(clientId);
578 // TODO: a dedicated exception would be better, but this is technically true, too
579 throw new OutOfSequenceEnvelopeException(0);
582 private static @NonNull ABIVersion selectVersion(final ConnectClientRequest message) {
583 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
584 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
585 if (clientRange.contains(v)) {
590 throw new IllegalArgumentException(String.format(
591 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
595 @SuppressWarnings("checkstyle:IllegalCatch")
596 private void handleConnectClient(final ConnectClientRequest message) {
598 final ClientIdentifier clientId = message.getTarget();
599 final LeaderFrontendState existing = findFrontend(clientId);
600 if (existing != null) {
604 if (!isLeader() || !isLeaderActive()) {
605 LOG.info("{}: not currently leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
606 + "isLeadershipTransferInProgress: {}.",
607 persistenceId(), message, isLeader(), isLeaderActive(), isLeadershipTransferInProgress());
608 throw new NotLeaderException(getSelf());
611 final ABIVersion selectedVersion = selectVersion(message);
612 final LeaderFrontendState frontend;
613 if (existing == null) {
614 frontend = new LeaderFrontendState.Enabled(persistenceId(), clientId, store);
615 knownFrontends.put(clientId.getFrontendId(), frontend);
616 LOG.debug("{}: created state {} for client {}", persistenceId(), frontend, clientId);
621 frontend.reconnect();
622 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
623 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
624 ActorRef.noSender());
625 } catch (RequestException | RuntimeException e) {
626 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
630 private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
631 throws RequestException {
632 // We are not the leader, hence we want to fail-fast.
633 if (!isLeader() || paused || !isLeaderActive()) {
634 LOG.debug("{}: not currently active leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
635 + "isLeadershipTransferInProgress: {}, paused: {}",
636 persistenceId(), envelope, isLeader(), isLeaderActive(), isLeadershipTransferInProgress(), paused);
637 throw new NotLeaderException(getSelf());
640 final Request<?, ?> request = envelope.getMessage();
641 if (request instanceof TransactionRequest) {
642 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
643 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
644 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
645 } else if (request instanceof LocalHistoryRequest) {
646 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
647 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
648 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
650 LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
651 throw new UnsupportedRequestException(request);
655 private void handleGetKnownClients() {
656 final ImmutableSet<ClientIdentifier> clients;
658 clients = knownFrontends.values().stream()
659 .map(LeaderFrontendState::getIdentifier)
660 .collect(ImmutableSet.toImmutableSet());
662 clients = frontendMetadata.getClients();
664 sender().tell(new GetKnownClientsReply(clients), self());
667 private boolean hasLeader() {
668 return getLeaderId() != null;
671 final int getPendingTxCommitQueueSize() {
672 return store.getQueueSize();
675 final int getCohortCacheSize() {
676 return commitCoordinator.getCohortCacheSize();
680 protected final Optional<ActorRef> getRoleChangeNotifier() {
681 return roleChangeNotifier;
684 final String getShardName() {
689 protected final LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
690 final short leaderPayloadVersion) {
691 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
692 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
695 private void onDatastoreContext(final DatastoreContext context) {
696 datastoreContext = verifyNotNull(context);
698 setTransactionCommitTimeout();
700 setPersistence(datastoreContext.isPersistent());
702 updateConfigParams(datastoreContext.getShardRaftConfig());
705 // applyState() will be invoked once consensus is reached on the payload
706 // non-final for mocking
707 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
708 final boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
709 if (canSkipPayload) {
710 applyState(self(), id, payload);
712 // We are faking the sender
713 persistData(self(), id, payload, batchHint);
717 private void handleCommitTransaction(final CommitTransaction commit) {
718 final TransactionIdentifier txId = commit.getTransactionId();
720 askProtocolEncountered(txId);
721 commitCoordinator.handleCommit(txId, getSender(), this);
723 ActorSelection leader = getLeader();
724 if (leader == null) {
725 messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
727 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
728 leader.forward(commit, getContext());
733 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
734 final TransactionIdentifier txId = canCommit.getTransactionId();
735 LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
738 askProtocolEncountered(txId);
739 commitCoordinator.handleCanCommit(txId, getSender(), this);
741 ActorSelection leader = getLeader();
742 if (leader == null) {
743 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
744 "Could not canCommit transaction " + txId);
746 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
747 leader.forward(canCommit, getContext());
752 @SuppressWarnings("checkstyle:IllegalCatch")
753 private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
754 askProtocolEncountered(batched.getTransactionId());
757 commitCoordinator.handleBatchedModifications(batched, sender, this);
758 } catch (Exception e) {
759 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
760 batched.getTransactionId(), e);
761 sender.tell(new Failure(e), getSelf());
765 private void handleBatchedModifications(final BatchedModifications batched) {
766 // This message is sent to prepare the modifications transaction directly on the Shard as an
767 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
768 // BatchedModifications message, the caller sets the ready flag in the message indicating
769 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
770 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
771 // ReadyTransaction message.
773 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
774 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
775 // the primary/leader shard. However with timing and caching on the front-end, there's a small
776 // window where it could have a stale leader during leadership transitions.
778 boolean isLeaderActive = isLeaderActive();
779 if (isLeader() && isLeaderActive) {
780 handleBatchedModificationsLocal(batched, getSender());
782 ActorSelection leader = getLeader();
783 if (!isLeaderActive || leader == null) {
784 messageRetrySupport.addMessageToRetry(batched, getSender(),
785 "Could not process BatchedModifications " + batched.getTransactionId());
787 // If this is not the first batch and leadership changed in between batched messages,
788 // we need to reconstruct previous BatchedModifications from the transaction
789 // DataTreeModification, honoring the max batched modification count, and forward all the
790 // previous BatchedModifications to the new leader.
791 Collection<BatchedModifications> newModifications = commitCoordinator
792 .createForwardedBatchedModifications(batched,
793 datastoreContext.getShardBatchedModificationCount());
795 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
796 newModifications.size(), leader);
798 for (BatchedModifications bm : newModifications) {
799 leader.forward(bm, getContext());
805 private boolean failIfIsolatedLeader(final ActorRef sender) {
806 if (isIsolatedLeader()) {
807 sender.tell(new Failure(new NoShardLeaderException(String.format(
808 "Shard %s was the leader but has lost contact with all of its followers. Either all"
809 + " other follower nodes are down or this node is isolated by a network partition.",
810 persistenceId()))), getSelf());
817 protected boolean isIsolatedLeader() {
818 return getRaftState() == RaftState.IsolatedLeader;
821 @SuppressWarnings("checkstyle:IllegalCatch")
822 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
823 final TransactionIdentifier txId = message.getTransactionId();
824 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), txId);
826 boolean isLeaderActive = isLeaderActive();
827 if (isLeader() && isLeaderActive) {
828 askProtocolEncountered(txId);
830 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
831 } catch (Exception e) {
832 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(), txId, e);
833 getSender().tell(new Failure(e), getSelf());
836 ActorSelection leader = getLeader();
837 if (!isLeaderActive || leader == null) {
838 messageRetrySupport.addMessageToRetry(message, getSender(),
839 "Could not process ready local transaction " + txId);
841 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
842 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
843 leader.forward(message, getContext());
848 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
849 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
851 boolean isLeaderActive = isLeaderActive();
852 if (isLeader() && isLeaderActive) {
853 askProtocolEncountered(forwardedReady.getTransactionId());
854 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
856 ActorSelection leader = getLeader();
857 if (!isLeaderActive || leader == null) {
858 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
859 "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
861 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
863 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
864 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
865 forwardedReady.getParticipatingShardNames());
866 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
867 leader.forward(readyLocal, getContext());
872 private void handleAbortTransaction(final AbortTransaction abort) {
873 final TransactionIdentifier transactionId = abort.getTransactionId();
874 askProtocolEncountered(transactionId);
875 doAbortTransaction(transactionId, getSender());
878 final void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
879 commitCoordinator.handleAbort(transactionID, sender, this);
882 private void handleCreateTransaction(final Object message) {
884 createTransaction(CreateTransaction.fromSerializable(message));
885 } else if (getLeader() != null) {
886 getLeader().forward(message, getContext());
888 getSender().tell(new Failure(new NoShardLeaderException(
889 "Could not create a shard transaction", persistenceId())), getSelf());
893 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
895 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
896 askProtocolEncountered(id.getClientId());
897 store.closeTransactionChain(id);
898 } else if (getLeader() != null) {
899 getLeader().forward(closeTransactionChain, getContext());
901 LOG.warn("{}: Could not close transaction {}", persistenceId(), closeTransactionChain.getIdentifier());
905 @SuppressWarnings("checkstyle:IllegalCatch")
906 private void createTransaction(final CreateTransaction createTransaction) {
907 askProtocolEncountered(createTransaction.getTransactionId());
910 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
911 && failIfIsolatedLeader(getSender())) {
915 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
916 createTransaction.getTransactionId());
918 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
919 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
920 } catch (Exception e) {
921 getSender().tell(new Failure(e), getSelf());
925 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
926 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
927 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
931 // Called on leader only
932 private void askProtocolEncountered(final TransactionIdentifier transactionId) {
933 askProtocolEncountered(transactionId.getHistoryId().getClientId());
936 // Called on leader only
937 private void askProtocolEncountered(final ClientIdentifier clientId) {
938 final FrontendIdentifier frontend = clientId.getFrontendId();
939 final LeaderFrontendState state = knownFrontends.get(frontend);
940 if (!(state instanceof LeaderFrontendState.Disabled)) {
941 LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
942 if (knownFrontends.isEmpty()) {
943 knownFrontends = new HashMap<>();
945 knownFrontends.put(frontend, new LeaderFrontendState.Disabled(persistenceId(), clientId, getDataStore()));
947 persistPayload(clientId, DisableTrackingPayload.create(clientId,
948 datastoreContext.getInitialPayloadSerializedBufferCapacity()), false);
952 private void updateSchemaContext(final UpdateSchemaContext message) {
953 updateSchemaContext(message.getEffectiveModelContext());
957 void updateSchemaContext(final @NonNull EffectiveModelContext schemaContext) {
958 store.updateSchemaContext(schemaContext);
961 private boolean isMetricsCaptureEnabled() {
962 CommonConfig config = new CommonConfig(getContext().system().settings().config());
963 return config.isMetricCaptureEnabled();
967 protected final RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
968 return snapshotCohort;
972 protected final RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
973 if (restoreFromSnapshot == null) {
974 return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
977 return ShardRecoveryCoordinator.forSnapshot(store, persistenceId(), LOG, restoreFromSnapshot.getSnapshot());
981 // non-final for testing
982 protected void onRecoveryComplete() {
983 restoreFromSnapshot = null;
985 //notify shard manager
986 getContext().parent().tell(new ActorInitialized(), getSelf());
988 // Being paranoid here - this method should only be called once but just in case...
989 if (txCommitTimeoutCheckSchedule == null) {
990 // Schedule a message to be periodically sent to check if the current in-progress
991 // transaction should be expired and aborted.
992 FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
993 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
994 period, period, getSelf(),
995 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
1000 protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
1001 if (data instanceof Payload) {
1002 if (data instanceof DisableTrackingPayload) {
1003 disableTracking((DisableTrackingPayload) data);
1008 store.applyReplicatedPayload(identifier, (Payload)data);
1009 } catch (DataValidationFailedException | IOException e) {
1010 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
1013 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
1018 protected final void onStateChanged() {
1019 boolean isLeader = isLeader();
1020 boolean hasLeader = hasLeader();
1021 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
1023 // If this actor is no longer the leader close all the transaction chains
1025 if (LOG.isDebugEnabled()) {
1027 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
1028 persistenceId(), getId());
1032 store.purgeLeaderState();
1035 if (hasLeader && !isIsolatedLeader()) {
1036 messageRetrySupport.retryMessages();
1041 protected final void onLeaderChanged(final String oldLeader, final String newLeader) {
1042 shardMBean.incrementLeadershipChangeCount();
1046 if (!knownFrontends.isEmpty()) {
1047 LOG.debug("{}: removing frontend state for {}", persistenceId(), knownFrontends.keySet());
1048 knownFrontends = ImmutableMap.of();
1051 requestMessageAssembler.close();
1054 // No leader anywhere, nothing else to do
1058 // Another leader was elected. If we were the previous leader and had pending transactions, convert
1059 // them to transaction messages and send to the new leader.
1060 ActorSelection leader = getLeader();
1061 if (leader != null) {
1062 // Clears all pending transactions and converts them to messages to be forwarded to a new leader.
1063 Collection<?> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
1064 datastoreContext.getShardBatchedModificationCount());
1066 if (!messagesToForward.isEmpty()) {
1067 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
1068 messagesToForward.size(), leader);
1070 for (Object message : messagesToForward) {
1071 LOG.debug("{}: Forwarding pending transaction message {}", persistenceId(), message);
1073 leader.tell(message, self());
1077 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
1078 + "change and the leader address isn't available.", this);
1081 // We have become the leader, we need to reconstruct frontend state
1082 knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
1083 LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
1086 if (!isIsolatedLeader()) {
1087 messageRetrySupport.retryMessages();
1092 protected final void pauseLeader(final Runnable operation) {
1093 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
1096 // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
1097 if (datastoreContext.isUseTellBasedProtocol()) {
1098 knownFrontends.values().forEach(LeaderFrontendState::retire);
1099 knownFrontends = ImmutableMap.of();
1102 store.setRunOnPendingTransactionsComplete(operation);
1106 protected final void unpauseLeader() {
1107 LOG.debug("{}: In unpauseLeader", persistenceId());
1110 store.setRunOnPendingTransactionsComplete(null);
1112 // Restore tell-based protocol state as if we were becoming the leader
1113 knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
1117 protected final OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
1118 return OnDemandShardState.newBuilder()
1119 .treeChangeListenerActors(treeChangeSupport.getListenerActors())
1120 .commitCohortActors(store.getCohortActors());
1124 public final String persistenceId() {
1129 public final String journalPluginId() {
1130 // This method may be invoked from super constructor (wonderful), hence we also need to handle the case of
1131 // the field being uninitialized because our constructor is not finished.
1132 if (datastoreContext != null && !datastoreContext.isPersistent()) {
1133 return NON_PERSISTENT_JOURNAL_ID;
1135 return super.journalPluginId();
1139 final ShardCommitCoordinator getCommitCoordinator() {
1140 return commitCoordinator;
1143 // non-final for mocking
1144 DatastoreContext getDatastoreContext() {
1145 return datastoreContext;
1149 final ShardDataTree getDataStore() {
1154 // non-final for mocking
1155 ShardStats getShardMBean() {
1159 public static Builder builder() {
1160 return new Builder();
1163 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
1164 private final Class<? extends S> shardClass;
1165 private ShardIdentifier id;
1166 private Map<String, String> peerAddresses = Collections.emptyMap();
1167 private DatastoreContext datastoreContext;
1168 private EffectiveModelContextProvider schemaContextProvider;
1169 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
1170 private DataTree dataTree;
1172 private volatile boolean sealed;
1174 AbstractBuilder(final Class<? extends S> shardClass) {
1175 this.shardClass = shardClass;
1178 final void checkSealed() {
1179 checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
1182 @SuppressWarnings("unchecked")
1187 public T id(final ShardIdentifier newId) {
1193 public T peerAddresses(final Map<String, String> newPeerAddresses) {
1195 this.peerAddresses = newPeerAddresses;
1199 public T datastoreContext(final DatastoreContext newDatastoreContext) {
1201 this.datastoreContext = newDatastoreContext;
1205 public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) {
1207 this.schemaContextProvider = requireNonNull(newSchemaContextProvider);
1211 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
1213 this.restoreFromSnapshot = newRestoreFromSnapshot;
1217 public T dataTree(final DataTree newDataTree) {
1219 this.dataTree = newDataTree;
1223 public ShardIdentifier getId() {
1227 public Map<String, String> getPeerAddresses() {
1228 return peerAddresses;
1231 public DatastoreContext getDatastoreContext() {
1232 return datastoreContext;
1235 public EffectiveModelContext getSchemaContext() {
1236 return verifyNotNull(schemaContextProvider.getEffectiveModelContext());
1239 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
1240 return restoreFromSnapshot;
1243 public DataTree getDataTree() {
1247 public TreeType getTreeType() {
1248 switch (datastoreContext.getLogicalStoreType()) {
1250 return TreeType.CONFIGURATION;
1252 return TreeType.OPERATIONAL;
1254 throw new IllegalStateException("Unhandled logical store type "
1255 + datastoreContext.getLogicalStoreType());
1259 protected void verify() {
1260 requireNonNull(id, "id should not be null");
1261 requireNonNull(peerAddresses, "peerAddresses should not be null");
1262 requireNonNull(datastoreContext, "dataStoreContext should not be null");
1263 requireNonNull(schemaContextProvider, "schemaContextProvider should not be null");
1266 public Props props() {
1269 return Props.create(shardClass, this);
1273 public static class Builder extends AbstractBuilder<Builder, Shard> {
1278 Builder(final Class<? extends Shard> shardClass) {
1284 return Ticker.systemTicker();
1287 void scheduleNextPendingTransaction() {
1288 self().tell(RESUME_NEXT_PENDING_TRANSACTION, ActorRef.noSender());