2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.actor.ActorSelection;
17 import akka.actor.Cancellable;
18 import akka.actor.ExtendedActorSystem;
19 import akka.actor.PoisonPill;
20 import akka.actor.Props;
21 import akka.actor.Status;
22 import akka.actor.Status.Failure;
23 import akka.persistence.RecoveryCompleted;
24 import akka.persistence.SnapshotOffer;
25 import akka.serialization.JavaSerializer;
26 import akka.serialization.Serialization;
27 import com.google.common.annotations.VisibleForTesting;
28 import com.google.common.base.Ticker;
29 import com.google.common.collect.ImmutableList;
30 import com.google.common.collect.ImmutableMap;
31 import com.google.common.collect.ImmutableSet;
32 import com.google.common.collect.Range;
33 import java.io.IOException;
34 import java.util.Arrays;
35 import java.util.Collection;
36 import java.util.Collections;
37 import java.util.HashMap;
39 import java.util.Optional;
40 import java.util.OptionalLong;
41 import java.util.concurrent.TimeUnit;
42 import org.eclipse.jdt.annotation.NonNull;
43 import org.eclipse.jdt.annotation.Nullable;
44 import org.opendaylight.controller.cluster.access.ABIVersion;
45 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
46 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
47 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
48 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
49 import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
50 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
51 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
52 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
53 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
54 import org.opendaylight.controller.cluster.access.concepts.Request;
55 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
56 import org.opendaylight.controller.cluster.access.concepts.RequestException;
57 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
58 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
59 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
60 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
61 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
62 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
63 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
64 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
65 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
66 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
67 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
68 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
69 import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor;
70 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
71 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
72 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
73 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
74 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
75 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
76 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
77 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
78 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
79 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
80 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
81 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
82 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
83 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
84 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
85 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
86 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
87 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
88 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
89 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
90 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
91 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
92 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
93 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
94 import org.opendaylight.controller.cluster.datastore.persisted.DisableTrackingPayload;
95 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
96 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
97 import org.opendaylight.controller.cluster.messaging.SliceOptions;
98 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
99 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
100 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
101 import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
102 import org.opendaylight.controller.cluster.raft.RaftActor;
103 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
104 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
105 import org.opendaylight.controller.cluster.raft.RaftState;
106 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
107 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
108 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
109 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
110 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
111 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
112 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
113 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties.ExportOnRecovery;
114 import org.opendaylight.yangtools.concepts.Identifier;
115 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
116 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
117 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
118 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
119 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
120 import scala.concurrent.duration.FiniteDuration;
123 * A Shard represents a portion of the logical data tree.
126 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
128 // FIXME: non-final for testing?
129 public class Shard extends RaftActor {
132 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
134 public String toString() {
135 return "txCommitTimeoutCheck";
140 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
142 public String toString() {
143 return "getShardMBeanMessage";
147 static final Object RESUME_NEXT_PENDING_TRANSACTION = new Object() {
149 public String toString() {
150 return "resumeNextPendingTransaction";
154 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
155 public static final String DEFAULT_NAME = "default";
157 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
159 // Make sure to keep this in sync with the journal configuration in factory-akka.conf
160 public static final String NON_PERSISTENT_JOURNAL_ID = "akka.persistence.non-persistent.journal";
163 final ABIVersion[] values = ABIVersion.values();
164 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
165 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
168 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
169 private static final int CLIENT_MAX_MESSAGES = 1000;
171 // The state of this Shard
172 private final ShardDataTree store;
174 /// The name of this shard
175 private final String name;
177 private final String shardName;
179 private final ShardStats shardMBean;
181 private final ShardDataTreeListenerInfoMXBeanImpl listenerInfoMXBean;
183 private DatastoreContext datastoreContext;
185 private final ShardCommitCoordinator commitCoordinator;
187 private long transactionCommitTimeout;
189 private Cancellable txCommitTimeoutCheckSchedule;
191 private final Optional<ActorRef> roleChangeNotifier;
193 private final MessageTracker appendEntriesReplyTracker;
195 private final ShardTransactionActorFactory transactionActorFactory;
197 private final ShardSnapshotCohort snapshotCohort;
199 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
201 private ShardSnapshot restoreFromSnapshot;
203 private final ShardTransactionMessageRetrySupport messageRetrySupport;
206 final FrontendMetadata frontendMetadata;
208 private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
209 private boolean paused;
211 private final MessageSlicer responseMessageSlicer;
212 private final Dispatchers dispatchers;
214 private final MessageAssembler requestMessageAssembler;
216 private final ExportOnRecovery exportOnRecovery;
218 private final ActorRef exportActor;
220 Shard(final AbstractBuilder<?, ?> builder) {
221 super(builder.getId().toString(), builder.getPeerAddresses(),
222 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
224 this.name = builder.getId().toString();
225 this.shardName = builder.getId().getShardName();
226 this.datastoreContext = builder.getDatastoreContext();
227 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
228 this.frontendMetadata = new FrontendMetadata(name);
229 this.exportOnRecovery = datastoreContext.getExportOnRecovery();
231 switch (exportOnRecovery) {
233 exportActor = getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
234 datastoreContext.getRecoveryExportBaseDir()));
242 setPersistence(datastoreContext.isPersistent());
244 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
246 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
247 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
248 if (builder.getDataTree() != null) {
249 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
250 treeChangeListenerPublisher, name,
253 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
254 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name,
258 shardMBean = ShardStats.create(name, datastoreContext.getDataStoreMXBeanType(), this);
260 if (isMetricsCaptureEnabled()) {
261 getContext().become(new MeteringBehavior(this));
264 commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
266 setTransactionCommitTimeout();
268 // create a notifier actor for each cluster member
269 roleChangeNotifier = createRoleChangeNotifier(name);
271 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
272 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
274 dispatchers = new Dispatchers(context().system().dispatchers());
275 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
276 dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction),
277 self(), getContext(), shardMBean, builder.getId().getShardName());
279 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
280 this.name, datastoreContext);
282 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
284 responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
285 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
286 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
287 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
289 requestMessageAssembler = MessageAssembler.builder().logContext(this.name)
290 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
291 .assembledMessageCallback((message, sender) -> self().tell(message, sender))
292 .expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
294 listenerInfoMXBean = new ShardDataTreeListenerInfoMXBeanImpl(name, datastoreContext.getDataStoreMXBeanType(),
296 listenerInfoMXBean.register();
299 private void setTransactionCommitTimeout() {
300 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
301 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
304 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
305 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
306 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
307 return Optional.of(shardRoleChangeNotifier);
311 public final void postStop() throws Exception {
312 LOG.info("Stopping Shard {}", persistenceId());
316 messageRetrySupport.close();
318 if (txCommitTimeoutCheckSchedule != null) {
319 txCommitTimeoutCheckSchedule.cancel();
322 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
324 shardMBean.unregisterMBean();
325 listenerInfoMXBean.unregister();
329 protected final void handleRecover(final Object message) {
330 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
333 super.handleRecover(message);
335 switch (exportOnRecovery) {
337 if (message instanceof SnapshotOffer) {
338 exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().get(), name),
339 ActorRef.noSender());
340 } else if (message instanceof ReplicatedLogEntry) {
341 exportActor.tell(new JsonExportActor.ExportJournal((ReplicatedLogEntry) message),
342 ActorRef.noSender());
343 } else if (message instanceof RecoveryCompleted) {
344 exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender());
345 exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
353 if (LOG.isTraceEnabled()) {
354 appendEntriesReplyTracker.begin();
359 // non-final for TestShard
360 protected void handleNonRaftCommand(final Object message) {
361 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
362 final Optional<Error> maybeError = context.error();
363 if (maybeError.isPresent()) {
364 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
368 store.resetTransactionBatch();
370 if (message instanceof RequestEnvelope) {
371 handleRequestEnvelope((RequestEnvelope)message);
372 } else if (MessageAssembler.isHandledMessage(message)) {
373 handleRequestAssemblerMessage(message);
374 } else if (message instanceof ConnectClientRequest) {
375 handleConnectClient((ConnectClientRequest)message);
376 } else if (CreateTransaction.isSerializedType(message)) {
377 handleCreateTransaction(message);
378 } else if (message instanceof BatchedModifications) {
379 handleBatchedModifications((BatchedModifications)message);
380 } else if (message instanceof ForwardedReadyTransaction) {
381 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
382 } else if (message instanceof ReadyLocalTransaction) {
383 handleReadyLocalTransaction((ReadyLocalTransaction)message);
384 } else if (CanCommitTransaction.isSerializedType(message)) {
385 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
386 } else if (CommitTransaction.isSerializedType(message)) {
387 handleCommitTransaction(CommitTransaction.fromSerializable(message));
388 } else if (AbortTransaction.isSerializedType(message)) {
389 handleAbortTransaction(AbortTransaction.fromSerializable(message));
390 } else if (CloseTransactionChain.isSerializedType(message)) {
391 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
392 } else if (message instanceof DataTreeChangedReply) {
394 } else if (message instanceof RegisterDataTreeChangeListener) {
395 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
396 } else if (message instanceof UpdateSchemaContext) {
397 updateSchemaContext((UpdateSchemaContext) message);
398 } else if (message instanceof PeerAddressResolved) {
399 PeerAddressResolved resolved = (PeerAddressResolved) message;
400 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
401 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
402 commitTimeoutCheck();
403 } else if (message instanceof DatastoreContext) {
404 onDatastoreContext((DatastoreContext)message);
405 } else if (message instanceof RegisterRoleChangeListener) {
406 roleChangeNotifier.get().forward(message, context());
407 } else if (message instanceof FollowerInitialSyncUpStatus) {
408 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
409 context().parent().tell(message, self());
410 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
411 sender().tell(getShardMBean(), self());
412 } else if (message instanceof GetShardDataTree) {
413 sender().tell(store.getDataTree(), self());
414 } else if (message instanceof ServerRemoved) {
415 context().parent().forward(message, context());
416 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
417 messageRetrySupport.onTimerMessage(message);
418 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
419 store.processCohortRegistryCommand(getSender(),
420 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
421 } else if (message instanceof MakeLeaderLocal) {
423 } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
424 store.resumeNextPendingTransaction();
425 } else if (GetKnownClients.INSTANCE.equals(message)) {
426 handleGetKnownClients();
427 } else if (!responseMessageSlicer.handleMessage(message)) {
428 super.handleNonRaftCommand(message);
433 private void handleRequestAssemblerMessage(final Object message) {
434 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() -> {
435 JavaSerializer.currentSystem().value_$eq((ExtendedActorSystem) context().system());
436 requestMessageAssembler.handleMessage(message, self());
440 @SuppressWarnings("checkstyle:IllegalCatch")
441 private void handleRequestEnvelope(final RequestEnvelope envelope) {
442 final long now = ticker().read();
444 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
445 if (success != null) {
446 final long executionTimeNanos = ticker().read() - now;
447 if (success instanceof SliceableMessage) {
448 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() ->
449 responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
450 .message(envelope.newSuccessEnvelope(success, executionTimeNanos))
451 .sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
452 .onFailureCallback(t -> LOG.warn("Error slicing response {}", success, t)).build()));
454 envelope.sendSuccess(success, executionTimeNanos);
457 } catch (RequestException e) {
458 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
459 envelope.sendFailure(e, ticker().read() - now);
460 } catch (Exception e) {
461 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
462 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
463 ticker().read() - now);
467 private void commitTimeoutCheck() {
468 store.checkForExpiredTransactions(transactionCommitTimeout, this::updateAccess);
469 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
470 requestMessageAssembler.checkExpiredAssembledMessageState();
473 private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
474 final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
475 final LeaderFrontendState state = knownFrontends.get(frontend);
477 // Not tell-based protocol, do nothing
478 return OptionalLong.empty();
481 if (isIsolatedLeader()) {
482 // We are isolated and no new request can come through until we emerge from it. We are still updating
483 // liveness of frontend when we see it attempting to communicate. Use the last access timer.
484 return OptionalLong.of(state.getLastSeenTicks());
487 // If this frontend has freshly connected, give it some time to catch up before killing its transactions.
488 return OptionalLong.of(state.getLastConnectTicks());
491 private void disableTracking(final DisableTrackingPayload payload) {
492 final ClientIdentifier clientId = payload.getIdentifier();
493 LOG.debug("{}: disabling tracking of {}", persistenceId(), clientId);
494 frontendMetadata.disableTracking(clientId);
497 final FrontendIdentifier frontendId = clientId.getFrontendId();
498 final LeaderFrontendState frontend = knownFrontends.get(frontendId);
499 if (frontend != null) {
500 if (clientId.equals(frontend.getIdentifier())) {
501 if (!(frontend instanceof LeaderFrontendState.Disabled)) {
502 verify(knownFrontends.replace(frontendId, frontend,
503 new LeaderFrontendState.Disabled(persistenceId(), clientId, store)));
504 LOG.debug("{}: leader state for {} disabled", persistenceId(), clientId);
506 LOG.debug("{}: leader state {} is already disabled", persistenceId(), frontend);
509 LOG.debug("{}: leader state {} does not match {}", persistenceId(), frontend, clientId);
512 LOG.debug("{}: leader state for {} not found", persistenceId(), clientId);
513 knownFrontends.put(frontendId, new LeaderFrontendState.Disabled(persistenceId(), clientId,
519 private void onMakeLeaderLocal() {
520 LOG.debug("{}: onMakeLeaderLocal received", persistenceId());
522 getSender().tell(new Status.Success(null), getSelf());
526 final ActorSelection leader = getLeader();
528 if (leader == null) {
529 // Leader is not present. The cluster is most likely trying to
530 // elect a leader and we should let that run its normal course
532 // TODO we can wait for the election to complete and retry the
533 // request. We can also let the caller retry by sending a flag
534 // in the response indicating the request is "reTryable".
535 getSender().tell(new Failure(
536 new LeadershipTransferFailedException("We cannot initiate leadership transfer to local node. "
537 + "Currently there is no leader for " + persistenceId())),
542 leader.tell(new RequestLeadership(getId(), getSender()), getSelf());
545 // Acquire our frontend tracking handle and verify generation matches
546 private @Nullable LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
547 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
548 if (existing != null) {
549 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
555 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
556 throw new RetiredGenerationException(clientId.getGeneration(),
557 existing.getIdentifier().getGeneration());
560 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
562 knownFrontends.remove(clientId.getFrontendId());
564 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
570 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
571 final LeaderFrontendState ret = findFrontend(clientId);
576 // TODO: a dedicated exception would be better, but this is technically true, too
577 throw new OutOfSequenceEnvelopeException(0);
580 private static @NonNull ABIVersion selectVersion(final ConnectClientRequest message) {
581 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
582 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
583 if (clientRange.contains(v)) {
588 throw new IllegalArgumentException(String.format(
589 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
593 @SuppressWarnings("checkstyle:IllegalCatch")
594 private void handleConnectClient(final ConnectClientRequest message) {
596 final ClientIdentifier clientId = message.getTarget();
597 final LeaderFrontendState existing = findFrontend(clientId);
598 if (existing != null) {
602 if (!isLeader() || !isLeaderActive()) {
603 LOG.info("{}: not currently leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
604 + "isLeadershipTransferInProgress: {}.",
605 persistenceId(), message, isLeader(), isLeaderActive(), isLeadershipTransferInProgress());
606 throw new NotLeaderException(getSelf());
609 final ABIVersion selectedVersion = selectVersion(message);
610 final LeaderFrontendState frontend;
611 if (existing == null) {
612 frontend = new LeaderFrontendState.Enabled(persistenceId(), clientId, store);
613 knownFrontends.put(clientId.getFrontendId(), frontend);
614 LOG.debug("{}: created state {} for client {}", persistenceId(), frontend, clientId);
619 frontend.reconnect();
620 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
621 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
622 ActorRef.noSender());
623 } catch (RequestException | RuntimeException e) {
624 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
628 private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
629 throws RequestException {
630 // We are not the leader, hence we want to fail-fast.
631 if (!isLeader() || paused || !isLeaderActive()) {
632 LOG.debug("{}: not currently active leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
633 + "isLeadershipTransferInProgress: {}, paused: {}",
634 persistenceId(), envelope, isLeader(), isLeaderActive(), isLeadershipTransferInProgress(), paused);
635 throw new NotLeaderException(getSelf());
638 final Request<?, ?> request = envelope.getMessage();
639 if (request instanceof TransactionRequest) {
640 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
641 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
642 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
643 } else if (request instanceof LocalHistoryRequest) {
644 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
645 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
646 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
648 LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
649 throw new UnsupportedRequestException(request);
653 private void handleGetKnownClients() {
654 final ImmutableSet<ClientIdentifier> clients;
656 clients = knownFrontends.values().stream()
657 .map(LeaderFrontendState::getIdentifier)
658 .collect(ImmutableSet.toImmutableSet());
660 clients = frontendMetadata.getClients();
662 sender().tell(new GetKnownClientsReply(clients), self());
665 private boolean hasLeader() {
666 return getLeaderId() != null;
669 final int getPendingTxCommitQueueSize() {
670 return store.getQueueSize();
673 final int getCohortCacheSize() {
674 return commitCoordinator.getCohortCacheSize();
678 protected final Optional<ActorRef> getRoleChangeNotifier() {
679 return roleChangeNotifier;
682 final String getShardName() {
687 protected final LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
688 final short leaderPayloadVersion) {
689 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
690 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
693 private void onDatastoreContext(final DatastoreContext context) {
694 datastoreContext = verifyNotNull(context);
696 setTransactionCommitTimeout();
698 setPersistence(datastoreContext.isPersistent());
700 updateConfigParams(datastoreContext.getShardRaftConfig());
703 // applyState() will be invoked once consensus is reached on the payload
704 // non-final for mocking
705 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
706 final boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
707 if (canSkipPayload) {
708 applyState(self(), id, payload);
710 // We are faking the sender
711 persistData(self(), id, payload, batchHint);
715 private void handleCommitTransaction(final CommitTransaction commit) {
716 final TransactionIdentifier txId = commit.getTransactionId();
718 askProtocolEncountered(txId);
719 commitCoordinator.handleCommit(txId, getSender(), this);
721 ActorSelection leader = getLeader();
722 if (leader == null) {
723 messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
725 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
726 leader.forward(commit, getContext());
731 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
732 final TransactionIdentifier txId = canCommit.getTransactionId();
733 LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
736 askProtocolEncountered(txId);
737 commitCoordinator.handleCanCommit(txId, getSender(), this);
739 ActorSelection leader = getLeader();
740 if (leader == null) {
741 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
742 "Could not canCommit transaction " + txId);
744 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
745 leader.forward(canCommit, getContext());
750 @SuppressWarnings("checkstyle:IllegalCatch")
751 private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
752 askProtocolEncountered(batched.getTransactionId());
755 commitCoordinator.handleBatchedModifications(batched, sender, this);
756 } catch (Exception e) {
757 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
758 batched.getTransactionId(), e);
759 sender.tell(new Failure(e), getSelf());
763 private void handleBatchedModifications(final BatchedModifications batched) {
764 // This message is sent to prepare the modifications transaction directly on the Shard as an
765 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
766 // BatchedModifications message, the caller sets the ready flag in the message indicating
767 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
768 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
769 // ReadyTransaction message.
771 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
772 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
773 // the primary/leader shard. However with timing and caching on the front-end, there's a small
774 // window where it could have a stale leader during leadership transitions.
776 boolean isLeaderActive = isLeaderActive();
777 if (isLeader() && isLeaderActive) {
778 handleBatchedModificationsLocal(batched, getSender());
780 ActorSelection leader = getLeader();
781 if (!isLeaderActive || leader == null) {
782 messageRetrySupport.addMessageToRetry(batched, getSender(),
783 "Could not process BatchedModifications " + batched.getTransactionId());
785 // If this is not the first batch and leadership changed in between batched messages,
786 // we need to reconstruct previous BatchedModifications from the transaction
787 // DataTreeModification, honoring the max batched modification count, and forward all the
788 // previous BatchedModifications to the new leader.
789 Collection<BatchedModifications> newModifications = commitCoordinator
790 .createForwardedBatchedModifications(batched,
791 datastoreContext.getShardBatchedModificationCount());
793 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
794 newModifications.size(), leader);
796 for (BatchedModifications bm : newModifications) {
797 leader.forward(bm, getContext());
803 private boolean failIfIsolatedLeader(final ActorRef sender) {
804 if (isIsolatedLeader()) {
805 sender.tell(new Failure(new NoShardLeaderException(String.format(
806 "Shard %s was the leader but has lost contact with all of its followers. Either all"
807 + " other follower nodes are down or this node is isolated by a network partition.",
808 persistenceId()))), getSelf());
815 protected boolean isIsolatedLeader() {
816 return getRaftState() == RaftState.IsolatedLeader;
819 @SuppressWarnings("checkstyle:IllegalCatch")
820 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
821 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
823 boolean isLeaderActive = isLeaderActive();
824 if (isLeader() && isLeaderActive) {
826 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
827 } catch (Exception e) {
828 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
829 message.getTransactionId(), e);
830 getSender().tell(new Failure(e), getSelf());
833 ActorSelection leader = getLeader();
834 if (!isLeaderActive || leader == null) {
835 messageRetrySupport.addMessageToRetry(message, getSender(),
836 "Could not process ready local transaction " + message.getTransactionId());
838 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
839 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
840 leader.forward(message, getContext());
845 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
846 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
848 boolean isLeaderActive = isLeaderActive();
849 if (isLeader() && isLeaderActive) {
850 askProtocolEncountered(forwardedReady.getTransactionId());
851 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
853 ActorSelection leader = getLeader();
854 if (!isLeaderActive || leader == null) {
855 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
856 "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
858 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
860 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
861 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
862 forwardedReady.getParticipatingShardNames());
863 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
864 leader.forward(readyLocal, getContext());
869 private void handleAbortTransaction(final AbortTransaction abort) {
870 final TransactionIdentifier transactionId = abort.getTransactionId();
871 askProtocolEncountered(transactionId);
872 doAbortTransaction(transactionId, getSender());
875 final void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
876 commitCoordinator.handleAbort(transactionID, sender, this);
879 private void handleCreateTransaction(final Object message) {
881 createTransaction(CreateTransaction.fromSerializable(message));
882 } else if (getLeader() != null) {
883 getLeader().forward(message, getContext());
885 getSender().tell(new Failure(new NoShardLeaderException(
886 "Could not create a shard transaction", persistenceId())), getSelf());
890 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
892 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
893 askProtocolEncountered(id.getClientId());
894 store.closeTransactionChain(id);
895 } else if (getLeader() != null) {
896 getLeader().forward(closeTransactionChain, getContext());
898 LOG.warn("{}: Could not close transaction {}", persistenceId(), closeTransactionChain.getIdentifier());
902 @SuppressWarnings("checkstyle:IllegalCatch")
903 private void createTransaction(final CreateTransaction createTransaction) {
904 askProtocolEncountered(createTransaction.getTransactionId());
907 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
908 && failIfIsolatedLeader(getSender())) {
912 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
913 createTransaction.getTransactionId());
915 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
916 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
917 } catch (Exception e) {
918 getSender().tell(new Failure(e), getSelf());
922 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
923 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
924 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
928 // Called on leader only
929 private void askProtocolEncountered(final TransactionIdentifier transactionId) {
930 askProtocolEncountered(transactionId.getHistoryId().getClientId());
933 // Called on leader only
934 private void askProtocolEncountered(final ClientIdentifier clientId) {
935 final FrontendIdentifier frontend = clientId.getFrontendId();
936 final LeaderFrontendState state = knownFrontends.get(frontend);
937 if (!(state instanceof LeaderFrontendState.Disabled)) {
938 LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
939 if (knownFrontends.isEmpty()) {
940 knownFrontends = new HashMap<>();
942 knownFrontends.put(frontend, new LeaderFrontendState.Disabled(persistenceId(), clientId, getDataStore()));
944 persistPayload(clientId, DisableTrackingPayload.create(clientId,
945 datastoreContext.getInitialPayloadSerializedBufferCapacity()), false);
949 private void updateSchemaContext(final UpdateSchemaContext message) {
950 updateSchemaContext(message.getEffectiveModelContext());
954 void updateSchemaContext(final @NonNull EffectiveModelContext schemaContext) {
955 store.updateSchemaContext(schemaContext);
958 private boolean isMetricsCaptureEnabled() {
959 CommonConfig config = new CommonConfig(getContext().system().settings().config());
960 return config.isMetricCaptureEnabled();
964 protected final RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
965 return snapshotCohort;
969 protected final RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
970 if (restoreFromSnapshot == null) {
971 return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
974 return ShardRecoveryCoordinator.forSnapshot(store, persistenceId(), LOG, restoreFromSnapshot.getSnapshot());
978 // non-final for testing
979 protected void onRecoveryComplete() {
980 restoreFromSnapshot = null;
982 //notify shard manager
983 getContext().parent().tell(new ActorInitialized(), getSelf());
985 // Being paranoid here - this method should only be called once but just in case...
986 if (txCommitTimeoutCheckSchedule == null) {
987 // Schedule a message to be periodically sent to check if the current in-progress
988 // transaction should be expired and aborted.
989 FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
990 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
991 period, period, getSelf(),
992 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
997 protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
998 if (data instanceof Payload) {
999 if (data instanceof DisableTrackingPayload) {
1000 disableTracking((DisableTrackingPayload) data);
1005 store.applyReplicatedPayload(identifier, (Payload)data);
1006 } catch (DataValidationFailedException | IOException e) {
1007 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
1010 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
1015 protected final void onStateChanged() {
1016 boolean isLeader = isLeader();
1017 boolean hasLeader = hasLeader();
1018 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
1020 // If this actor is no longer the leader close all the transaction chains
1022 if (LOG.isDebugEnabled()) {
1024 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
1025 persistenceId(), getId());
1029 store.purgeLeaderState();
1032 if (hasLeader && !isIsolatedLeader()) {
1033 messageRetrySupport.retryMessages();
1038 protected final void onLeaderChanged(final String oldLeader, final String newLeader) {
1039 shardMBean.incrementLeadershipChangeCount();
1043 if (!knownFrontends.isEmpty()) {
1044 LOG.debug("{}: removing frontend state for {}", persistenceId(), knownFrontends.keySet());
1045 knownFrontends = ImmutableMap.of();
1048 requestMessageAssembler.close();
1051 // No leader anywhere, nothing else to do
1055 // Another leader was elected. If we were the previous leader and had pending transactions, convert
1056 // them to transaction messages and send to the new leader.
1057 ActorSelection leader = getLeader();
1058 if (leader != null) {
1059 // Clears all pending transactions and converts them to messages to be forwarded to a new leader.
1060 Collection<?> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
1061 datastoreContext.getShardBatchedModificationCount());
1063 if (!messagesToForward.isEmpty()) {
1064 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
1065 messagesToForward.size(), leader);
1067 for (Object message : messagesToForward) {
1068 LOG.debug("{}: Forwarding pending transaction message {}", persistenceId(), message);
1070 leader.tell(message, self());
1074 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
1075 + "change and the leader address isn't available.", this);
1078 // We have become the leader, we need to reconstruct frontend state
1079 knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
1080 LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
1083 if (!isIsolatedLeader()) {
1084 messageRetrySupport.retryMessages();
1089 protected final void pauseLeader(final Runnable operation) {
1090 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
1093 // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
1094 if (datastoreContext.isUseTellBasedProtocol()) {
1095 knownFrontends.values().forEach(LeaderFrontendState::retire);
1096 knownFrontends = ImmutableMap.of();
1099 store.setRunOnPendingTransactionsComplete(operation);
1103 protected final void unpauseLeader() {
1104 LOG.debug("{}: In unpauseLeader", persistenceId());
1107 store.setRunOnPendingTransactionsComplete(null);
1109 // Restore tell-based protocol state as if we were becoming the leader
1110 knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
1114 protected final OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
1115 return OnDemandShardState.newBuilder()
1116 .treeChangeListenerActors(treeChangeSupport.getListenerActors())
1117 .commitCohortActors(store.getCohortActors());
1121 public final String persistenceId() {
1126 public final String journalPluginId() {
1127 // This method may be invoked from super constructor (wonderful), hence we also need to handle the case of
1128 // the field being uninitialized because our constructor is not finished.
1129 if (datastoreContext != null && !datastoreContext.isPersistent()) {
1130 return NON_PERSISTENT_JOURNAL_ID;
1132 return super.journalPluginId();
1136 final ShardCommitCoordinator getCommitCoordinator() {
1137 return commitCoordinator;
1140 // non-final for mocking
1141 DatastoreContext getDatastoreContext() {
1142 return datastoreContext;
1146 final ShardDataTree getDataStore() {
1151 // non-final for mocking
1152 ShardStats getShardMBean() {
1156 public static Builder builder() {
1157 return new Builder();
1160 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
1161 private final Class<? extends S> shardClass;
1162 private ShardIdentifier id;
1163 private Map<String, String> peerAddresses = Collections.emptyMap();
1164 private DatastoreContext datastoreContext;
1165 private EffectiveModelContextProvider schemaContextProvider;
1166 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
1167 private DataTree dataTree;
1169 private volatile boolean sealed;
1171 AbstractBuilder(final Class<? extends S> shardClass) {
1172 this.shardClass = shardClass;
1175 final void checkSealed() {
1176 checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
1179 @SuppressWarnings("unchecked")
1184 public T id(final ShardIdentifier newId) {
1190 public T peerAddresses(final Map<String, String> newPeerAddresses) {
1192 this.peerAddresses = newPeerAddresses;
1196 public T datastoreContext(final DatastoreContext newDatastoreContext) {
1198 this.datastoreContext = newDatastoreContext;
1202 public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) {
1204 this.schemaContextProvider = requireNonNull(newSchemaContextProvider);
1208 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
1210 this.restoreFromSnapshot = newRestoreFromSnapshot;
1214 public T dataTree(final DataTree newDataTree) {
1216 this.dataTree = newDataTree;
1220 public ShardIdentifier getId() {
1224 public Map<String, String> getPeerAddresses() {
1225 return peerAddresses;
1228 public DatastoreContext getDatastoreContext() {
1229 return datastoreContext;
1232 public EffectiveModelContext getSchemaContext() {
1233 return verifyNotNull(schemaContextProvider.getEffectiveModelContext());
1236 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
1237 return restoreFromSnapshot;
1240 public DataTree getDataTree() {
1244 public TreeType getTreeType() {
1245 switch (datastoreContext.getLogicalStoreType()) {
1247 return TreeType.CONFIGURATION;
1249 return TreeType.OPERATIONAL;
1251 throw new IllegalStateException("Unhandled logical store type "
1252 + datastoreContext.getLogicalStoreType());
1256 protected void verify() {
1257 requireNonNull(id, "id should not be null");
1258 requireNonNull(peerAddresses, "peerAddresses should not be null");
1259 requireNonNull(datastoreContext, "dataStoreContext should not be null");
1260 requireNonNull(schemaContextProvider, "schemaContextProvider should not be null");
1263 public Props props() {
1266 return Props.create(shardClass, this);
1270 public static class Builder extends AbstractBuilder<Builder, Shard> {
1275 Builder(final Class<? extends Shard> shardClass) {
1281 return Ticker.systemTicker();
1284 void scheduleNextPendingTransaction() {
1285 self().tell(RESUME_NEXT_PENDING_TRANSACTION, ActorRef.noSender());