2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import static com.google.common.base.Preconditions.checkState;
11 import static com.google.common.base.Verify.verify;
12 import static com.google.common.base.Verify.verifyNotNull;
13 import static java.util.Objects.requireNonNull;
15 import akka.actor.ActorRef;
16 import akka.actor.ActorSelection;
17 import akka.actor.Cancellable;
18 import akka.actor.ExtendedActorSystem;
19 import akka.actor.Props;
20 import akka.actor.Status;
21 import akka.actor.Status.Failure;
22 import akka.serialization.JavaSerializer;
23 import akka.serialization.Serialization;
24 import com.google.common.annotations.VisibleForTesting;
25 import com.google.common.base.Ticker;
26 import com.google.common.base.Verify;
27 import com.google.common.collect.ImmutableList;
28 import com.google.common.collect.ImmutableMap;
29 import com.google.common.collect.ImmutableSet;
30 import com.google.common.collect.Range;
31 import java.io.IOException;
32 import java.util.Arrays;
33 import java.util.Collection;
34 import java.util.Collections;
35 import java.util.HashMap;
37 import java.util.Optional;
38 import java.util.OptionalLong;
39 import java.util.concurrent.TimeUnit;
40 import org.eclipse.jdt.annotation.NonNull;
41 import org.eclipse.jdt.annotation.Nullable;
42 import org.opendaylight.controller.cluster.access.ABIVersion;
43 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
44 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
45 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
46 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
47 import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
48 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
49 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
50 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
51 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
52 import org.opendaylight.controller.cluster.access.concepts.Request;
53 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
54 import org.opendaylight.controller.cluster.access.concepts.RequestException;
55 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
56 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
57 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
58 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
59 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
60 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
61 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
62 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
63 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
64 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
65 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
66 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
67 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
68 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
69 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBeanImpl;
70 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
71 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
72 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
73 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
74 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
75 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
76 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
77 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
78 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
79 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
80 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
81 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
82 import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
83 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
84 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
85 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
86 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
87 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
88 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
89 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
90 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
91 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
92 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
93 import org.opendaylight.controller.cluster.datastore.persisted.DisableTrackingPayload;
94 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
95 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
96 import org.opendaylight.controller.cluster.messaging.SliceOptions;
97 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
98 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
99 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
100 import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
101 import org.opendaylight.controller.cluster.raft.RaftActor;
102 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
103 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
104 import org.opendaylight.controller.cluster.raft.RaftState;
105 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
106 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
107 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
108 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
109 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
110 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
111 import org.opendaylight.yangtools.concepts.Identifier;
112 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
113 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
114 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
115 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
116 import org.opendaylight.yangtools.yang.model.api.EffectiveModelContextProvider;
117 import scala.concurrent.duration.FiniteDuration;
120 * A Shard represents a portion of the logical data tree.
123 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
125 public class Shard extends RaftActor {
128 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
130 public String toString() {
131 return "txCommitTimeoutCheck";
136 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
138 public String toString() {
139 return "getShardMBeanMessage";
143 static final Object RESUME_NEXT_PENDING_TRANSACTION = new Object() {
145 public String toString() {
146 return "resumeNextPendingTransaction";
150 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
151 public static final String DEFAULT_NAME = "default";
153 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
155 // Make sure to keep this in sync with the journal configuration in factory-akka.conf
156 public static final String NON_PERSISTENT_JOURNAL_ID = "akka.persistence.non-persistent.journal";
159 final ABIVersion[] values = ABIVersion.values();
160 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
161 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
164 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
165 private static final int CLIENT_MAX_MESSAGES = 1000;
167 // The state of this Shard
168 private final ShardDataTree store;
170 /// The name of this shard
171 private final String name;
173 private final String shardName;
175 private final ShardStats shardMBean;
177 private final ShardDataTreeListenerInfoMXBeanImpl listenerInfoMXBean;
179 private DatastoreContext datastoreContext;
181 private final ShardCommitCoordinator commitCoordinator;
183 private long transactionCommitTimeout;
185 private Cancellable txCommitTimeoutCheckSchedule;
187 private final Optional<ActorRef> roleChangeNotifier;
189 private final MessageTracker appendEntriesReplyTracker;
191 private final ShardTransactionActorFactory transactionActorFactory;
193 private final ShardSnapshotCohort snapshotCohort;
195 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
197 private ShardSnapshot restoreFromSnapshot;
199 private final ShardTransactionMessageRetrySupport messageRetrySupport;
202 final FrontendMetadata frontendMetadata;
204 private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
205 private boolean paused;
207 private final MessageSlicer responseMessageSlicer;
208 private final Dispatchers dispatchers;
210 private final MessageAssembler requestMessageAssembler;
212 protected Shard(final AbstractBuilder<?, ?> builder) {
213 super(builder.getId().toString(), builder.getPeerAddresses(),
214 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
216 this.name = builder.getId().toString();
217 this.shardName = builder.getId().getShardName();
218 this.datastoreContext = builder.getDatastoreContext();
219 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
220 this.frontendMetadata = new FrontendMetadata(name);
222 setPersistence(datastoreContext.isPersistent());
224 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
226 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
227 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
228 if (builder.getDataTree() != null) {
229 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
230 treeChangeListenerPublisher, name,
233 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
234 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name,
238 shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
240 if (isMetricsCaptureEnabled()) {
241 getContext().become(new MeteringBehavior(this));
244 commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
246 setTransactionCommitTimeout();
248 // create a notifier actor for each cluster member
249 roleChangeNotifier = createRoleChangeNotifier(name);
251 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
252 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
254 dispatchers = new Dispatchers(context().system().dispatchers());
255 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
256 dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction),
257 self(), getContext(), shardMBean, builder.getId().getShardName());
259 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
260 this.name, datastoreContext);
262 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
264 responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
265 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
266 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
267 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
269 requestMessageAssembler = MessageAssembler.builder().logContext(this.name)
270 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
271 .assembledMessageCallback((message, sender) -> self().tell(message, sender))
272 .expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
274 listenerInfoMXBean = new ShardDataTreeListenerInfoMXBeanImpl(name, datastoreContext.getDataStoreMXBeanType(),
276 listenerInfoMXBean.register();
279 private void setTransactionCommitTimeout() {
280 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
281 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
284 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
285 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
286 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
287 return Optional.of(shardRoleChangeNotifier);
291 public void postStop() throws Exception {
292 LOG.info("Stopping Shard {}", persistenceId());
296 messageRetrySupport.close();
298 if (txCommitTimeoutCheckSchedule != null) {
299 txCommitTimeoutCheckSchedule.cancel();
302 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
304 shardMBean.unregisterMBean();
305 listenerInfoMXBean.unregister();
309 protected void handleRecover(final Object message) {
310 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
313 super.handleRecover(message);
314 if (LOG.isTraceEnabled()) {
315 appendEntriesReplyTracker.begin();
320 protected void handleNonRaftCommand(final Object message) {
321 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
322 final Optional<Error> maybeError = context.error();
323 if (maybeError.isPresent()) {
324 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
328 store.resetTransactionBatch();
330 if (message instanceof RequestEnvelope) {
331 handleRequestEnvelope((RequestEnvelope)message);
332 } else if (MessageAssembler.isHandledMessage(message)) {
333 handleRequestAssemblerMessage(message);
334 } else if (message instanceof ConnectClientRequest) {
335 handleConnectClient((ConnectClientRequest)message);
336 } else if (CreateTransaction.isSerializedType(message)) {
337 handleCreateTransaction(message);
338 } else if (message instanceof BatchedModifications) {
339 handleBatchedModifications((BatchedModifications)message);
340 } else if (message instanceof ForwardedReadyTransaction) {
341 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
342 } else if (message instanceof ReadyLocalTransaction) {
343 handleReadyLocalTransaction((ReadyLocalTransaction)message);
344 } else if (CanCommitTransaction.isSerializedType(message)) {
345 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
346 } else if (CommitTransaction.isSerializedType(message)) {
347 handleCommitTransaction(CommitTransaction.fromSerializable(message));
348 } else if (AbortTransaction.isSerializedType(message)) {
349 handleAbortTransaction(AbortTransaction.fromSerializable(message));
350 } else if (CloseTransactionChain.isSerializedType(message)) {
351 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
352 } else if (message instanceof RegisterDataTreeChangeListener) {
353 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
354 } else if (message instanceof UpdateSchemaContext) {
355 updateSchemaContext((UpdateSchemaContext) message);
356 } else if (message instanceof PeerAddressResolved) {
357 PeerAddressResolved resolved = (PeerAddressResolved) message;
358 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
359 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
360 commitTimeoutCheck();
361 } else if (message instanceof DatastoreContext) {
362 onDatastoreContext((DatastoreContext)message);
363 } else if (message instanceof RegisterRoleChangeListener) {
364 roleChangeNotifier.get().forward(message, context());
365 } else if (message instanceof FollowerInitialSyncUpStatus) {
366 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
367 context().parent().tell(message, self());
368 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
369 sender().tell(getShardMBean(), self());
370 } else if (message instanceof GetShardDataTree) {
371 sender().tell(store.getDataTree(), self());
372 } else if (message instanceof ServerRemoved) {
373 context().parent().forward(message, context());
374 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
375 messageRetrySupport.onTimerMessage(message);
376 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
377 store.processCohortRegistryCommand(getSender(),
378 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
379 } else if (message instanceof MakeLeaderLocal) {
381 } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
382 store.resumeNextPendingTransaction();
383 } else if (GetKnownClients.INSTANCE.equals(message)) {
384 handleGetKnownClients();
385 } else if (!responseMessageSlicer.handleMessage(message)) {
386 super.handleNonRaftCommand(message);
391 private void handleRequestAssemblerMessage(final Object message) {
392 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() -> {
393 JavaSerializer.currentSystem().value_$eq((ExtendedActorSystem) context().system());
394 requestMessageAssembler.handleMessage(message, self());
398 @SuppressWarnings("checkstyle:IllegalCatch")
399 private void handleRequestEnvelope(final RequestEnvelope envelope) {
400 final long now = ticker().read();
402 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
403 if (success != null) {
404 final long executionTimeNanos = ticker().read() - now;
405 if (success instanceof SliceableMessage) {
406 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() ->
407 responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
408 .message(envelope.newSuccessEnvelope(success, executionTimeNanos))
409 .sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
410 .onFailureCallback(t -> LOG.warn("Error slicing response {}", success, t)).build()));
412 envelope.sendSuccess(success, executionTimeNanos);
415 } catch (RequestException e) {
416 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
417 envelope.sendFailure(e, ticker().read() - now);
418 } catch (Exception e) {
419 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
420 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
421 ticker().read() - now);
425 private void commitTimeoutCheck() {
426 store.checkForExpiredTransactions(transactionCommitTimeout, this::updateAccess);
427 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
428 requestMessageAssembler.checkExpiredAssembledMessageState();
431 private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
432 final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
433 final LeaderFrontendState state = knownFrontends.get(frontend);
435 // Not tell-based protocol, do nothing
436 return OptionalLong.empty();
439 if (isIsolatedLeader()) {
440 // We are isolated and no new request can come through until we emerge from it. We are still updating
441 // liveness of frontend when we see it attempting to communicate. Use the last access timer.
442 return OptionalLong.of(state.getLastSeenTicks());
445 // If this frontend has freshly connected, give it some time to catch up before killing its transactions.
446 return OptionalLong.of(state.getLastConnectTicks());
449 private void disableTracking(final DisableTrackingPayload payload) {
450 final ClientIdentifier clientId = payload.getIdentifier();
451 LOG.debug("{}: disabling tracking of {}", persistenceId(), clientId);
452 frontendMetadata.disableTracking(clientId);
455 final FrontendIdentifier frontendId = clientId.getFrontendId();
456 final LeaderFrontendState frontend = knownFrontends.get(frontendId);
457 if (frontend != null) {
458 if (clientId.equals(frontend.getIdentifier())) {
459 if (!(frontend instanceof LeaderFrontendState.Disabled)) {
460 verify(knownFrontends.replace(frontendId, frontend,
461 new LeaderFrontendState.Disabled(persistenceId(), clientId, store)));
462 LOG.debug("{}: leader state for {} disabled", persistenceId(), clientId);
464 LOG.debug("{}: leader state {} is already disabled", persistenceId(), frontend);
467 LOG.debug("{}: leader state {} does not match {}", persistenceId(), frontend, clientId);
470 LOG.debug("{}: leader state for {} not found", persistenceId(), clientId);
471 knownFrontends.put(frontendId, new LeaderFrontendState.Disabled(persistenceId(), clientId,
477 private void onMakeLeaderLocal() {
478 LOG.debug("{}: onMakeLeaderLocal received", persistenceId());
480 getSender().tell(new Status.Success(null), getSelf());
484 final ActorSelection leader = getLeader();
486 if (leader == null) {
487 // Leader is not present. The cluster is most likely trying to
488 // elect a leader and we should let that run its normal course
490 // TODO we can wait for the election to complete and retry the
491 // request. We can also let the caller retry by sending a flag
492 // in the response indicating the request is "reTryable".
493 getSender().tell(new Failure(
494 new LeadershipTransferFailedException("We cannot initiate leadership transfer to local node. "
495 + "Currently there is no leader for " + persistenceId())),
500 leader.tell(new RequestLeadership(getId(), getSender()), getSelf());
503 // Acquire our frontend tracking handle and verify generation matches
504 private @Nullable LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
505 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
506 if (existing != null) {
507 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
513 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
514 throw new RetiredGenerationException(clientId.getGeneration(),
515 existing.getIdentifier().getGeneration());
518 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
520 knownFrontends.remove(clientId.getFrontendId());
522 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
528 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
529 final LeaderFrontendState ret = findFrontend(clientId);
534 // TODO: a dedicated exception would be better, but this is technically true, too
535 throw new OutOfSequenceEnvelopeException(0);
538 private static @NonNull ABIVersion selectVersion(final ConnectClientRequest message) {
539 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
540 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
541 if (clientRange.contains(v)) {
546 throw new IllegalArgumentException(String.format(
547 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
551 @SuppressWarnings("checkstyle:IllegalCatch")
552 private void handleConnectClient(final ConnectClientRequest message) {
554 final ClientIdentifier clientId = message.getTarget();
555 final LeaderFrontendState existing = findFrontend(clientId);
556 if (existing != null) {
560 if (!isLeader() || !isLeaderActive()) {
561 LOG.info("{}: not currently leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
562 + "isLeadershipTransferInProgress: {}.",
563 persistenceId(), message, isLeader(), isLeaderActive(), isLeadershipTransferInProgress());
564 throw new NotLeaderException(getSelf());
567 final ABIVersion selectedVersion = selectVersion(message);
568 final LeaderFrontendState frontend;
569 if (existing == null) {
570 frontend = new LeaderFrontendState.Enabled(persistenceId(), clientId, store);
571 knownFrontends.put(clientId.getFrontendId(), frontend);
572 LOG.debug("{}: created state {} for client {}", persistenceId(), frontend, clientId);
577 frontend.reconnect();
578 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
579 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
580 ActorRef.noSender());
581 } catch (RequestException | RuntimeException e) {
582 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
586 private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
587 throws RequestException {
588 // We are not the leader, hence we want to fail-fast.
589 if (!isLeader() || paused || !isLeaderActive()) {
590 LOG.debug("{}: not currently active leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
591 + "isLeadershipTransferInProgress: {}, paused: {}",
592 persistenceId(), envelope, isLeader(), isLeaderActive(), isLeadershipTransferInProgress(), paused);
593 throw new NotLeaderException(getSelf());
596 final Request<?, ?> request = envelope.getMessage();
597 if (request instanceof TransactionRequest) {
598 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
599 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
600 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
601 } else if (request instanceof LocalHistoryRequest) {
602 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
603 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
604 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
606 LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
607 throw new UnsupportedRequestException(request);
611 private void handleGetKnownClients() {
612 final ImmutableSet<ClientIdentifier> clients;
614 clients = knownFrontends.values().stream()
615 .map(LeaderFrontendState::getIdentifier)
616 .collect(ImmutableSet.toImmutableSet());
618 clients = frontendMetadata.getClients();
620 sender().tell(new GetKnownClientsReply(clients), self());
623 private boolean hasLeader() {
624 return getLeaderId() != null;
627 public int getPendingTxCommitQueueSize() {
628 return store.getQueueSize();
631 public int getCohortCacheSize() {
632 return commitCoordinator.getCohortCacheSize();
636 protected Optional<ActorRef> getRoleChangeNotifier() {
637 return roleChangeNotifier;
640 String getShardName() {
645 protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
646 final short leaderPayloadVersion) {
647 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
648 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
651 protected void onDatastoreContext(final DatastoreContext context) {
652 datastoreContext = verifyNotNull(context);
654 setTransactionCommitTimeout();
656 setPersistence(datastoreContext.isPersistent());
658 updateConfigParams(datastoreContext.getShardRaftConfig());
661 // applyState() will be invoked once consensus is reached on the payload
662 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
663 boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
664 if (canSkipPayload) {
665 applyState(self(), id, payload);
667 // We are faking the sender
668 persistData(self(), id, payload, batchHint);
672 private void handleCommitTransaction(final CommitTransaction commit) {
673 final TransactionIdentifier txId = commit.getTransactionId();
675 askProtocolEncountered(txId);
676 commitCoordinator.handleCommit(txId, getSender(), this);
678 ActorSelection leader = getLeader();
679 if (leader == null) {
680 messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
682 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
683 leader.forward(commit, getContext());
688 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
689 final TransactionIdentifier txId = canCommit.getTransactionId();
690 LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
693 askProtocolEncountered(txId);
694 commitCoordinator.handleCanCommit(txId, getSender(), this);
696 ActorSelection leader = getLeader();
697 if (leader == null) {
698 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
699 "Could not canCommit transaction " + txId);
701 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
702 leader.forward(canCommit, getContext());
707 @SuppressWarnings("checkstyle:IllegalCatch")
708 protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
709 askProtocolEncountered(batched.getTransactionId());
712 commitCoordinator.handleBatchedModifications(batched, sender, this);
713 } catch (Exception e) {
714 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
715 batched.getTransactionId(), e);
716 sender.tell(new Failure(e), getSelf());
720 private void handleBatchedModifications(final BatchedModifications batched) {
721 // This message is sent to prepare the modifications transaction directly on the Shard as an
722 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
723 // BatchedModifications message, the caller sets the ready flag in the message indicating
724 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
725 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
726 // ReadyTransaction message.
728 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
729 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
730 // the primary/leader shard. However with timing and caching on the front-end, there's a small
731 // window where it could have a stale leader during leadership transitions.
733 boolean isLeaderActive = isLeaderActive();
734 if (isLeader() && isLeaderActive) {
735 handleBatchedModificationsLocal(batched, getSender());
737 ActorSelection leader = getLeader();
738 if (!isLeaderActive || leader == null) {
739 messageRetrySupport.addMessageToRetry(batched, getSender(),
740 "Could not process BatchedModifications " + batched.getTransactionId());
742 // If this is not the first batch and leadership changed in between batched messages,
743 // we need to reconstruct previous BatchedModifications from the transaction
744 // DataTreeModification, honoring the max batched modification count, and forward all the
745 // previous BatchedModifications to the new leader.
746 Collection<BatchedModifications> newModifications = commitCoordinator
747 .createForwardedBatchedModifications(batched,
748 datastoreContext.getShardBatchedModificationCount());
750 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
751 newModifications.size(), leader);
753 for (BatchedModifications bm : newModifications) {
754 leader.forward(bm, getContext());
760 private boolean failIfIsolatedLeader(final ActorRef sender) {
761 if (isIsolatedLeader()) {
762 sender.tell(new Failure(new NoShardLeaderException(String.format(
763 "Shard %s was the leader but has lost contact with all of its followers. Either all"
764 + " other follower nodes are down or this node is isolated by a network partition.",
765 persistenceId()))), getSelf());
772 protected boolean isIsolatedLeader() {
773 return getRaftState() == RaftState.IsolatedLeader;
776 @SuppressWarnings("checkstyle:IllegalCatch")
777 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
778 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
780 boolean isLeaderActive = isLeaderActive();
781 if (isLeader() && isLeaderActive) {
783 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
784 } catch (Exception e) {
785 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
786 message.getTransactionId(), e);
787 getSender().tell(new Failure(e), getSelf());
790 ActorSelection leader = getLeader();
791 if (!isLeaderActive || leader == null) {
792 messageRetrySupport.addMessageToRetry(message, getSender(),
793 "Could not process ready local transaction " + message.getTransactionId());
795 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
796 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
797 leader.forward(message, getContext());
802 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
803 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
805 boolean isLeaderActive = isLeaderActive();
806 if (isLeader() && isLeaderActive) {
807 askProtocolEncountered(forwardedReady.getTransactionId());
808 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
810 ActorSelection leader = getLeader();
811 if (!isLeaderActive || leader == null) {
812 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
813 "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
815 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
817 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
818 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
819 forwardedReady.getParticipatingShardNames());
820 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
821 leader.forward(readyLocal, getContext());
826 private void handleAbortTransaction(final AbortTransaction abort) {
827 final TransactionIdentifier transactionId = abort.getTransactionId();
828 askProtocolEncountered(transactionId);
829 doAbortTransaction(transactionId, getSender());
832 void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
833 commitCoordinator.handleAbort(transactionID, sender, this);
836 private void handleCreateTransaction(final Object message) {
838 createTransaction(CreateTransaction.fromSerializable(message));
839 } else if (getLeader() != null) {
840 getLeader().forward(message, getContext());
842 getSender().tell(new Failure(new NoShardLeaderException(
843 "Could not create a shard transaction", persistenceId())), getSelf());
847 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
849 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
850 askProtocolEncountered(id.getClientId());
851 store.closeTransactionChain(id);
852 } else if (getLeader() != null) {
853 getLeader().forward(closeTransactionChain, getContext());
855 LOG.warn("{}: Could not close transaction {}", persistenceId(), closeTransactionChain.getIdentifier());
859 @SuppressWarnings("checkstyle:IllegalCatch")
860 private void createTransaction(final CreateTransaction createTransaction) {
861 askProtocolEncountered(createTransaction.getTransactionId());
864 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
865 && failIfIsolatedLeader(getSender())) {
869 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
870 createTransaction.getTransactionId());
872 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
873 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
874 } catch (Exception e) {
875 getSender().tell(new Failure(e), getSelf());
879 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
880 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
881 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
885 // Called on leader only
886 private void askProtocolEncountered(final TransactionIdentifier transactionId) {
887 askProtocolEncountered(transactionId.getHistoryId().getClientId());
890 // Called on leader only
891 private void askProtocolEncountered(final ClientIdentifier clientId) {
892 final FrontendIdentifier frontend = clientId.getFrontendId();
893 final LeaderFrontendState state = knownFrontends.get(frontend);
894 if (!(state instanceof LeaderFrontendState.Disabled)) {
895 LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
896 if (knownFrontends.isEmpty()) {
897 knownFrontends = new HashMap<>();
899 knownFrontends.put(frontend, new LeaderFrontendState.Disabled(persistenceId(), clientId, getDataStore()));
901 persistPayload(clientId, DisableTrackingPayload.create(clientId,
902 datastoreContext.getInitialPayloadSerializedBufferCapacity()), false);
906 private void updateSchemaContext(final UpdateSchemaContext message) {
907 updateSchemaContext(message.getEffectiveModelContext());
911 void updateSchemaContext(final @NonNull EffectiveModelContext schemaContext) {
912 store.updateSchemaContext(schemaContext);
915 private boolean isMetricsCaptureEnabled() {
916 CommonConfig config = new CommonConfig(getContext().system().settings().config());
917 return config.isMetricCaptureEnabled();
922 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
923 return snapshotCohort;
927 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
928 if (restoreFromSnapshot == null) {
929 return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
932 return ShardRecoveryCoordinator.forSnapshot(store, persistenceId(), LOG, restoreFromSnapshot.getSnapshot());
936 protected void onRecoveryComplete() {
937 restoreFromSnapshot = null;
939 //notify shard manager
940 getContext().parent().tell(new ActorInitialized(), getSelf());
942 // Being paranoid here - this method should only be called once but just in case...
943 if (txCommitTimeoutCheckSchedule == null) {
944 // Schedule a message to be periodically sent to check if the current in-progress
945 // transaction should be expired and aborted.
946 FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
947 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
948 period, period, getSelf(),
949 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
954 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
955 if (data instanceof Payload) {
956 if (data instanceof DisableTrackingPayload) {
957 disableTracking((DisableTrackingPayload) data);
962 store.applyReplicatedPayload(identifier, (Payload)data);
963 } catch (DataValidationFailedException | IOException e) {
964 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
967 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
972 protected void onStateChanged() {
973 boolean isLeader = isLeader();
974 boolean hasLeader = hasLeader();
975 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
977 // If this actor is no longer the leader close all the transaction chains
979 if (LOG.isDebugEnabled()) {
981 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
982 persistenceId(), getId());
986 store.purgeLeaderState();
989 if (hasLeader && !isIsolatedLeader()) {
990 messageRetrySupport.retryMessages();
995 protected void onLeaderChanged(final String oldLeader, final String newLeader) {
996 shardMBean.incrementLeadershipChangeCount();
1000 if (!knownFrontends.isEmpty()) {
1001 LOG.debug("{}: removing frontend state for {}", persistenceId(), knownFrontends.keySet());
1002 knownFrontends = ImmutableMap.of();
1005 requestMessageAssembler.close();
1008 // No leader anywhere, nothing else to do
1012 // Another leader was elected. If we were the previous leader and had pending transactions, convert
1013 // them to transaction messages and send to the new leader.
1014 ActorSelection leader = getLeader();
1015 if (leader != null) {
1016 Collection<?> messagesToForward = convertPendingTransactionsToMessages();
1018 if (!messagesToForward.isEmpty()) {
1019 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
1020 messagesToForward.size(), leader);
1022 for (Object message : messagesToForward) {
1023 LOG.debug("{}: Forwarding pending transaction message {}", persistenceId(), message);
1025 leader.tell(message, self());
1029 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
1030 + "change and the leader address isn't available.", this);
1033 // We have become the leader, we need to reconstruct frontend state
1034 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
1035 LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
1038 if (!isIsolatedLeader()) {
1039 messageRetrySupport.retryMessages();
1044 * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
1046 * @return the converted messages
1048 public Collection<?> convertPendingTransactionsToMessages() {
1049 return commitCoordinator.convertPendingTransactionsToMessages(
1050 datastoreContext.getShardBatchedModificationCount());
1054 protected void pauseLeader(final Runnable operation) {
1055 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
1058 // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
1059 if (datastoreContext.isUseTellBasedProtocol()) {
1060 knownFrontends.values().forEach(LeaderFrontendState::retire);
1061 knownFrontends = ImmutableMap.of();
1064 store.setRunOnPendingTransactionsComplete(operation);
1068 protected void unpauseLeader() {
1069 LOG.debug("{}: In unpauseLeader", persistenceId());
1072 store.setRunOnPendingTransactionsComplete(null);
1074 // Restore tell-based protocol state as if we were becoming the leader
1075 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
1079 protected OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
1080 return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
1081 .commitCohortActors(store.getCohortActors());
1085 public String persistenceId() {
1090 public String journalPluginId() {
1091 // This method may be invoked from super constructor (wonderful), hence we also need to handle the case of
1092 // the field being uninitialized because our constructor is not finished.
1093 if (datastoreContext != null && !datastoreContext.isPersistent()) {
1094 return NON_PERSISTENT_JOURNAL_ID;
1096 return super.journalPluginId();
1100 ShardCommitCoordinator getCommitCoordinator() {
1101 return commitCoordinator;
1104 public DatastoreContext getDatastoreContext() {
1105 return datastoreContext;
1109 public ShardDataTree getDataStore() {
1114 ShardStats getShardMBean() {
1118 public static Builder builder() {
1119 return new Builder();
1122 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
1123 private final Class<? extends S> shardClass;
1124 private ShardIdentifier id;
1125 private Map<String, String> peerAddresses = Collections.emptyMap();
1126 private DatastoreContext datastoreContext;
1127 private EffectiveModelContextProvider schemaContextProvider;
1128 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
1129 private DataTree dataTree;
1131 private volatile boolean sealed;
1133 protected AbstractBuilder(final Class<? extends S> shardClass) {
1134 this.shardClass = shardClass;
1137 protected void checkSealed() {
1138 checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
1141 @SuppressWarnings("unchecked")
1146 public T id(final ShardIdentifier newId) {
1152 public T peerAddresses(final Map<String, String> newPeerAddresses) {
1154 this.peerAddresses = newPeerAddresses;
1158 public T datastoreContext(final DatastoreContext newDatastoreContext) {
1160 this.datastoreContext = newDatastoreContext;
1164 public T schemaContextProvider(final EffectiveModelContextProvider newSchemaContextProvider) {
1166 this.schemaContextProvider = requireNonNull(newSchemaContextProvider);
1170 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
1172 this.restoreFromSnapshot = newRestoreFromSnapshot;
1176 public T dataTree(final DataTree newDataTree) {
1178 this.dataTree = newDataTree;
1182 public ShardIdentifier getId() {
1186 public Map<String, String> getPeerAddresses() {
1187 return peerAddresses;
1190 public DatastoreContext getDatastoreContext() {
1191 return datastoreContext;
1194 public EffectiveModelContext getSchemaContext() {
1195 return Verify.verifyNotNull(schemaContextProvider.getEffectiveModelContext());
1198 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
1199 return restoreFromSnapshot;
1202 public DataTree getDataTree() {
1206 public TreeType getTreeType() {
1207 switch (datastoreContext.getLogicalStoreType()) {
1209 return TreeType.CONFIGURATION;
1211 return TreeType.OPERATIONAL;
1213 throw new IllegalStateException("Unhandled logical store type "
1214 + datastoreContext.getLogicalStoreType());
1218 protected void verify() {
1219 requireNonNull(id, "id should not be null");
1220 requireNonNull(peerAddresses, "peerAddresses should not be null");
1221 requireNonNull(datastoreContext, "dataStoreContext should not be null");
1222 requireNonNull(schemaContextProvider, "schemaContextProvider should not be null");
1225 public Props props() {
1228 return Props.create(shardClass, this);
1232 public static class Builder extends AbstractBuilder<Builder, Shard> {
1237 Builder(final Class<? extends Shard> shardClass) {
1243 return Ticker.systemTicker();
1246 void scheduleNextPendingTransaction() {
1247 self().tell(RESUME_NEXT_PENDING_TRANSACTION, ActorRef.noSender());