2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.actor.Status;
16 import akka.actor.Status.Failure;
17 import akka.serialization.Serialization;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Optional;
20 import com.google.common.base.Preconditions;
21 import com.google.common.base.Ticker;
22 import com.google.common.base.Verify;
23 import com.google.common.collect.ImmutableList;
24 import com.google.common.collect.ImmutableMap;
25 import com.google.common.collect.Range;
26 import java.io.IOException;
27 import java.util.Arrays;
28 import java.util.Collection;
29 import java.util.Collections;
31 import java.util.concurrent.TimeUnit;
32 import javax.annotation.Nonnull;
33 import javax.annotation.Nullable;
34 import org.opendaylight.controller.cluster.access.ABIVersion;
35 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
36 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
37 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
38 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
39 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
40 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
41 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
42 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
43 import org.opendaylight.controller.cluster.access.concepts.Request;
44 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
45 import org.opendaylight.controller.cluster.access.concepts.RequestException;
46 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
47 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
48 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
49 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
50 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
51 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
52 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
53 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
54 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
55 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
56 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
57 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
58 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
59 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
60 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
61 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
62 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
63 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
64 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
65 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
66 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
67 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
68 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
69 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
70 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
71 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
72 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
73 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
74 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
75 import org.opendaylight.controller.cluster.datastore.messages.PersistAbortTransactionPayload;
76 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
77 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
78 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
79 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
80 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
81 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
82 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
83 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
84 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
85 import org.opendaylight.controller.cluster.messaging.SliceOptions;
86 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
87 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
88 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
89 import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
90 import org.opendaylight.controller.cluster.raft.RaftActor;
91 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
92 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
93 import org.opendaylight.controller.cluster.raft.RaftState;
94 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
95 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
96 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
97 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
98 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
99 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
100 import org.opendaylight.yangtools.concepts.Identifier;
101 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
102 import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree;
103 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
104 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
105 import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
106 import scala.concurrent.duration.Duration;
107 import scala.concurrent.duration.FiniteDuration;
110 * A Shard represents a portion of the logical data tree.
113 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
115 public class Shard extends RaftActor {
118 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
120 public String toString() {
121 return "txCommitTimeoutCheck";
126 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
128 public String toString() {
129 return "getShardMBeanMessage";
133 static final Object RESUME_NEXT_PENDING_TRANSACTION = new Object() {
135 public String toString() {
136 return "resumeNextPendingTransaction";
140 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
141 public static final String DEFAULT_NAME = "default";
143 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
146 final ABIVersion[] values = ABIVersion.values();
147 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
148 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
151 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
152 private static final int CLIENT_MAX_MESSAGES = 1000;
154 // The state of this Shard
155 private final ShardDataTree store;
157 /// The name of this shard
158 private final String name;
160 private final ShardStats shardMBean;
162 private DatastoreContext datastoreContext;
164 private final ShardCommitCoordinator commitCoordinator;
166 private long transactionCommitTimeout;
168 private Cancellable txCommitTimeoutCheckSchedule;
170 private final Optional<ActorRef> roleChangeNotifier;
172 private final MessageTracker appendEntriesReplyTracker;
174 private final ShardTransactionActorFactory transactionActorFactory;
176 private final ShardSnapshotCohort snapshotCohort;
178 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
179 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
182 private ShardSnapshot restoreFromSnapshot;
184 private final ShardTransactionMessageRetrySupport messageRetrySupport;
186 private final FrontendMetadata frontendMetadata;
187 private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
188 private boolean paused;
190 private final MessageSlicer responseMessageSlicer;
191 private final Dispatchers dispatchers;
193 protected Shard(final AbstractBuilder<?, ?> builder) {
194 super(builder.getId().toString(), builder.getPeerAddresses(),
195 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
197 this.name = builder.getId().toString();
198 this.datastoreContext = builder.getDatastoreContext();
199 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
200 this.frontendMetadata = new FrontendMetadata(name);
202 setPersistence(datastoreContext.isPersistent());
204 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
206 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
207 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
208 ShardDataChangeListenerPublisherActorProxy dataChangeListenerPublisher =
209 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher", name);
210 if (builder.getDataTree() != null) {
211 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
212 treeChangeListenerPublisher, dataChangeListenerPublisher, name, frontendMetadata);
214 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
215 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher,
216 dataChangeListenerPublisher, name, frontendMetadata);
219 shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
221 if (isMetricsCaptureEnabled()) {
222 getContext().become(new MeteringBehavior(this));
225 commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
227 setTransactionCommitTimeout();
229 // create a notifier actor for each cluster member
230 roleChangeNotifier = createRoleChangeNotifier(name);
232 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
233 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
235 dispatchers = new Dispatchers(context().system().dispatchers());
236 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
237 dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction),
238 self(), getContext(), shardMBean, builder.getId().getShardName());
240 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
243 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
245 responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
246 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
247 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
248 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
251 private void setTransactionCommitTimeout() {
252 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
253 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
256 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
257 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
258 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
259 return Optional.of(shardRoleChangeNotifier);
263 public void postStop() {
264 LOG.info("Stopping Shard {}", persistenceId());
268 messageRetrySupport.close();
270 if (txCommitTimeoutCheckSchedule != null) {
271 txCommitTimeoutCheckSchedule.cancel();
274 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
276 shardMBean.unregisterMBean();
280 protected void handleRecover(final Object message) {
281 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
284 super.handleRecover(message);
285 if (LOG.isTraceEnabled()) {
286 appendEntriesReplyTracker.begin();
291 protected void handleNonRaftCommand(final Object message) {
292 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
293 final Optional<Error> maybeError = context.error();
294 if (maybeError.isPresent()) {
295 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
299 store.resetTransactionBatch();
301 if (message instanceof RequestEnvelope) {
302 handleRequestEnvelope((RequestEnvelope)message);
303 } else if (message instanceof ConnectClientRequest) {
304 handleConnectClient((ConnectClientRequest)message);
305 } else if (CreateTransaction.isSerializedType(message)) {
306 handleCreateTransaction(message);
307 } else if (message instanceof BatchedModifications) {
308 handleBatchedModifications((BatchedModifications)message);
309 } else if (message instanceof ForwardedReadyTransaction) {
310 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
311 } else if (message instanceof ReadyLocalTransaction) {
312 handleReadyLocalTransaction((ReadyLocalTransaction)message);
313 } else if (CanCommitTransaction.isSerializedType(message)) {
314 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
315 } else if (CommitTransaction.isSerializedType(message)) {
316 handleCommitTransaction(CommitTransaction.fromSerializable(message));
317 } else if (AbortTransaction.isSerializedType(message)) {
318 handleAbortTransaction(AbortTransaction.fromSerializable(message));
319 } else if (CloseTransactionChain.isSerializedType(message)) {
320 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
321 } else if (message instanceof RegisterChangeListener) {
322 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
323 } else if (message instanceof RegisterDataTreeChangeListener) {
324 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
325 } else if (message instanceof UpdateSchemaContext) {
326 updateSchemaContext((UpdateSchemaContext) message);
327 } else if (message instanceof PeerAddressResolved) {
328 PeerAddressResolved resolved = (PeerAddressResolved) message;
329 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
330 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
331 store.checkForExpiredTransactions(transactionCommitTimeout);
332 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
333 } else if (message instanceof DatastoreContext) {
334 onDatastoreContext((DatastoreContext)message);
335 } else if (message instanceof RegisterRoleChangeListener) {
336 roleChangeNotifier.get().forward(message, context());
337 } else if (message instanceof FollowerInitialSyncUpStatus) {
338 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
339 context().parent().tell(message, self());
340 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
341 sender().tell(getShardMBean(), self());
342 } else if (message instanceof GetShardDataTree) {
343 sender().tell(store.getDataTree(), self());
344 } else if (message instanceof ServerRemoved) {
345 context().parent().forward(message, context());
346 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
347 messageRetrySupport.onTimerMessage(message);
348 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
349 store.processCohortRegistryCommand(getSender(),
350 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
351 } else if (message instanceof PersistAbortTransactionPayload) {
352 final TransactionIdentifier txId = ((PersistAbortTransactionPayload) message).getTransactionId();
353 persistPayload(txId, AbortTransactionPayload.create(txId), true);
354 } else if (message instanceof MakeLeaderLocal) {
356 } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
357 store.resumeNextPendingTransaction();
358 } else if (!responseMessageSlicer.handleMessage(message)) {
359 super.handleNonRaftCommand(message);
364 @SuppressWarnings("checkstyle:IllegalCatch")
365 private void handleRequestEnvelope(final RequestEnvelope envelope) {
366 final long now = ticker().read();
368 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
369 if (success != null) {
370 final long executionTimeNanos = ticker().read() - now;
371 if (success instanceof SliceableMessage) {
372 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() ->
373 responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
374 .message(envelope.newSuccessEnvelope(success, executionTimeNanos))
375 .sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
376 .onFailureCallback(t -> {
377 LOG.warn("Error slicing response {}", success, t);
380 envelope.sendSuccess(success, executionTimeNanos);
383 } catch (RequestException e) {
384 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
385 envelope.sendFailure(e, ticker().read() - now);
386 } catch (Exception e) {
387 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
388 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
389 ticker().read() - now);
393 private void onMakeLeaderLocal() {
394 LOG.debug("{}: onMakeLeaderLocal received", persistenceId());
396 getSender().tell(new Status.Success(null), getSelf());
400 final ActorSelection leader = getLeader();
402 if (leader == null) {
403 // Leader is not present. The cluster is most likely trying to
404 // elect a leader and we should let that run its normal course
406 // TODO we can wait for the election to complete and retry the
407 // request. We can also let the caller retry by sending a flag
408 // in the response indicating the request is "reTryable".
409 getSender().tell(new Failure(
410 new LeadershipTransferFailedException("We cannot initiate leadership transfer to local node. "
411 + "Currently there is no leader for " + persistenceId())),
416 leader.tell(new RequestLeadership(getId(), getSender()), getSelf());
419 // Acquire our frontend tracking handle and verify generation matches
420 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
421 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
422 if (existing != null) {
423 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
428 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
429 throw new RetiredGenerationException(existing.getIdentifier().getGeneration());
432 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
434 knownFrontends.remove(clientId.getFrontendId());
436 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
439 final LeaderFrontendState ret = new LeaderFrontendState(persistenceId(), clientId, store);
440 knownFrontends.put(clientId.getFrontendId(), ret);
441 LOG.debug("{}: created state {} for client {}", persistenceId(), ret, clientId);
445 private static @Nonnull ABIVersion selectVersion(final ConnectClientRequest message) {
446 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
447 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
448 if (clientRange.contains(v)) {
453 throw new IllegalArgumentException(String.format(
454 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
458 @SuppressWarnings("checkstyle:IllegalCatch")
459 private void handleConnectClient(final ConnectClientRequest message) {
461 if (!isLeader() || !isLeaderActive()) {
462 LOG.info("{}: not currently leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
463 + "isLeadershipTransferInProgress: {}.",
464 persistenceId(), message, isLeader(), isLeaderActive(), isLeadershipTransferInProgress());
465 throw new NotLeaderException(getSelf());
468 final ABIVersion selectedVersion = selectVersion(message);
469 final LeaderFrontendState frontend = getFrontend(message.getTarget());
470 frontend.reconnect();
471 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
472 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
473 ActorRef.noSender());
474 } catch (RequestException | RuntimeException e) {
475 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
479 private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
480 throws RequestException {
481 // We are not the leader, hence we want to fail-fast.
482 if (!isLeader() || paused || !isLeaderActive()) {
483 LOG.debug("{}: not currently active leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
484 + "isLeadershipTransferInProgress: {}, paused: {}",
485 persistenceId(), envelope, isLeader(), isLeaderActive(), isLeadershipTransferInProgress(), paused);
486 throw new NotLeaderException(getSelf());
489 final Request<?, ?> request = envelope.getMessage();
490 if (request instanceof TransactionRequest) {
491 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
492 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
493 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
494 } else if (request instanceof LocalHistoryRequest) {
495 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
496 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
497 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
499 LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
500 throw new UnsupportedRequestException(request);
504 private boolean hasLeader() {
505 return getLeaderId() != null;
508 public int getPendingTxCommitQueueSize() {
509 return store.getQueueSize();
512 public int getCohortCacheSize() {
513 return commitCoordinator.getCohortCacheSize();
517 protected Optional<ActorRef> getRoleChangeNotifier() {
518 return roleChangeNotifier;
522 protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
523 final short leaderPayloadVersion) {
524 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
525 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
528 protected void onDatastoreContext(final DatastoreContext context) {
529 datastoreContext = context;
531 setTransactionCommitTimeout();
533 setPersistence(datastoreContext.isPersistent());
535 updateConfigParams(datastoreContext.getShardRaftConfig());
538 // applyState() will be invoked once consensus is reached on the payload
539 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
540 boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
541 if (canSkipPayload) {
542 applyState(self(), id, payload);
544 // We are faking the sender
545 persistData(self(), id, payload, batchHint);
549 private void handleCommitTransaction(final CommitTransaction commit) {
551 commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
553 ActorSelection leader = getLeader();
554 if (leader == null) {
555 messageRetrySupport.addMessageToRetry(commit, getSender(),
556 "Could not commit transaction " + commit.getTransactionId());
558 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
559 leader.forward(commit, getContext());
564 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
565 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
568 commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
570 ActorSelection leader = getLeader();
571 if (leader == null) {
572 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
573 "Could not canCommit transaction " + canCommit.getTransactionId());
575 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
576 leader.forward(canCommit, getContext());
581 @SuppressWarnings("checkstyle:IllegalCatch")
582 protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
584 commitCoordinator.handleBatchedModifications(batched, sender, this);
585 } catch (Exception e) {
586 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
587 batched.getTransactionId(), e);
588 sender.tell(new Failure(e), getSelf());
592 private void handleBatchedModifications(final BatchedModifications batched) {
593 // This message is sent to prepare the modifications transaction directly on the Shard as an
594 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
595 // BatchedModifications message, the caller sets the ready flag in the message indicating
596 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
597 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
598 // ReadyTransaction message.
600 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
601 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
602 // the primary/leader shard. However with timing and caching on the front-end, there's a small
603 // window where it could have a stale leader during leadership transitions.
605 boolean isLeaderActive = isLeaderActive();
606 if (isLeader() && isLeaderActive) {
607 handleBatchedModificationsLocal(batched, getSender());
609 ActorSelection leader = getLeader();
610 if (!isLeaderActive || leader == null) {
611 messageRetrySupport.addMessageToRetry(batched, getSender(),
612 "Could not commit transaction " + batched.getTransactionId());
614 // If this is not the first batch and leadership changed in between batched messages,
615 // we need to reconstruct previous BatchedModifications from the transaction
616 // DataTreeModification, honoring the max batched modification count, and forward all the
617 // previous BatchedModifications to the new leader.
618 Collection<BatchedModifications> newModifications = commitCoordinator
619 .createForwardedBatchedModifications(batched,
620 datastoreContext.getShardBatchedModificationCount());
622 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
623 newModifications.size(), leader);
625 for (BatchedModifications bm : newModifications) {
626 leader.forward(bm, getContext());
632 private boolean failIfIsolatedLeader(final ActorRef sender) {
633 if (isIsolatedLeader()) {
634 sender.tell(new Failure(new NoShardLeaderException(String.format(
635 "Shard %s was the leader but has lost contact with all of its followers. Either all"
636 + " other follower nodes are down or this node is isolated by a network partition.",
637 persistenceId()))), getSelf());
644 protected boolean isIsolatedLeader() {
645 return getRaftState() == RaftState.IsolatedLeader;
648 @SuppressWarnings("checkstyle:IllegalCatch")
649 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
650 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
652 boolean isLeaderActive = isLeaderActive();
653 if (isLeader() && isLeaderActive) {
655 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
656 } catch (Exception e) {
657 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
658 message.getTransactionId(), e);
659 getSender().tell(new Failure(e), getSelf());
662 ActorSelection leader = getLeader();
663 if (!isLeaderActive || leader == null) {
664 messageRetrySupport.addMessageToRetry(message, getSender(),
665 "Could not commit transaction " + message.getTransactionId());
667 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
668 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
669 leader.forward(message, getContext());
674 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
675 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
677 boolean isLeaderActive = isLeaderActive();
678 if (isLeader() && isLeaderActive) {
679 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
681 ActorSelection leader = getLeader();
682 if (!isLeaderActive || leader == null) {
683 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
684 "Could not commit transaction " + forwardedReady.getTransactionId());
686 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
688 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
689 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
690 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
691 leader.forward(readyLocal, getContext());
696 private void handleAbortTransaction(final AbortTransaction abort) {
697 doAbortTransaction(abort.getTransactionId(), getSender());
700 void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
701 commitCoordinator.handleAbort(transactionID, sender, this);
704 private void handleCreateTransaction(final Object message) {
706 createTransaction(CreateTransaction.fromSerializable(message));
707 } else if (getLeader() != null) {
708 getLeader().forward(message, getContext());
710 getSender().tell(new Failure(new NoShardLeaderException(
711 "Could not create a shard transaction", persistenceId())), getSelf());
715 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
716 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
717 store.closeTransactionChain(id, null);
718 store.purgeTransactionChain(id, null);
721 @SuppressWarnings("checkstyle:IllegalCatch")
722 private void createTransaction(final CreateTransaction createTransaction) {
724 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
725 && failIfIsolatedLeader(getSender())) {
729 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
730 createTransaction.getTransactionId());
732 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
733 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
734 } catch (Exception e) {
735 getSender().tell(new Failure(e), getSelf());
739 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
740 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
741 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
745 private void updateSchemaContext(final UpdateSchemaContext message) {
746 updateSchemaContext(message.getSchemaContext());
750 void updateSchemaContext(final SchemaContext schemaContext) {
751 store.updateSchemaContext(schemaContext);
754 private boolean isMetricsCaptureEnabled() {
755 CommonConfig config = new CommonConfig(getContext().system().settings().config());
756 return config.isMetricCaptureEnabled();
761 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
762 return snapshotCohort;
767 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
768 return new ShardRecoveryCoordinator(store,
769 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
773 protected void onRecoveryComplete() {
774 restoreFromSnapshot = null;
776 //notify shard manager
777 getContext().parent().tell(new ActorInitialized(), getSelf());
779 // Being paranoid here - this method should only be called once but just in case...
780 if (txCommitTimeoutCheckSchedule == null) {
781 // Schedule a message to be periodically sent to check if the current in-progress
782 // transaction should be expired and aborted.
783 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
784 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
785 period, period, getSelf(),
786 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
791 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
792 if (data instanceof Payload) {
794 store.applyReplicatedPayload(identifier, (Payload)data);
795 } catch (DataValidationFailedException | IOException e) {
796 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
799 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
804 protected void onStateChanged() {
805 boolean isLeader = isLeader();
806 boolean hasLeader = hasLeader();
807 changeSupport.onLeadershipChange(isLeader, hasLeader);
808 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
810 // If this actor is no longer the leader close all the transaction chains
812 if (LOG.isDebugEnabled()) {
814 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
815 persistenceId(), getId());
819 store.purgeLeaderState();
822 if (hasLeader && !isIsolatedLeader()) {
823 messageRetrySupport.retryMessages();
828 protected void onLeaderChanged(final String oldLeader, final String newLeader) {
829 shardMBean.incrementLeadershipChangeCount();
833 if (!knownFrontends.isEmpty()) {
834 LOG.debug("{}: removing frontend state for {}", persistenceId(), knownFrontends.keySet());
835 knownFrontends = ImmutableMap.of();
839 // No leader anywhere, nothing else to do
843 // Another leader was elected. If we were the previous leader and had pending transactions, convert
844 // them to transaction messages and send to the new leader.
845 ActorSelection leader = getLeader();
846 if (leader != null) {
847 Collection<?> messagesToForward = convertPendingTransactionsToMessages();
849 if (!messagesToForward.isEmpty()) {
850 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
851 messagesToForward.size(), leader);
853 for (Object message : messagesToForward) {
854 leader.tell(message, self());
858 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
859 + "change and the leader address isn't available.", this);
862 // We have become the leader, we need to reconstruct frontend state
863 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
864 LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
867 if (!isIsolatedLeader()) {
868 messageRetrySupport.retryMessages();
873 * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
875 * @return the converted messages
877 public Collection<?> convertPendingTransactionsToMessages() {
878 return commitCoordinator.convertPendingTransactionsToMessages(
879 datastoreContext.getShardBatchedModificationCount());
883 protected void pauseLeader(final Runnable operation) {
884 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
887 // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
888 knownFrontends.values().forEach(LeaderFrontendState::retire);
889 knownFrontends = ImmutableMap.of();
891 store.setRunOnPendingTransactionsComplete(operation);
895 protected void unpauseLeader() {
896 LOG.debug("{}: In unpauseLeader", persistenceId());
899 store.setRunOnPendingTransactionsComplete(null);
901 // Restore tell-based protocol state as if we were becoming the leader
902 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
906 protected OnDemandRaftState.AbstractBuilder<?> newOnDemandRaftStateBuilder() {
907 return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
908 .dataChangeListenerActors(changeSupport.getListenerActors())
909 .commitCohortActors(store.getCohortActors());
913 public String persistenceId() {
918 ShardCommitCoordinator getCommitCoordinator() {
919 return commitCoordinator;
922 public DatastoreContext getDatastoreContext() {
923 return datastoreContext;
927 public ShardDataTree getDataStore() {
932 ShardStats getShardMBean() {
936 public static Builder builder() {
937 return new Builder();
940 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
941 private final Class<S> shardClass;
942 private ShardIdentifier id;
943 private Map<String, String> peerAddresses = Collections.emptyMap();
944 private DatastoreContext datastoreContext;
945 private SchemaContextProvider schemaContextProvider;
946 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
947 private TipProducingDataTree dataTree;
948 private volatile boolean sealed;
950 protected AbstractBuilder(final Class<S> shardClass) {
951 this.shardClass = shardClass;
954 protected void checkSealed() {
955 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
958 @SuppressWarnings("unchecked")
963 public T id(final ShardIdentifier newId) {
969 public T peerAddresses(final Map<String, String> newPeerAddresses) {
971 this.peerAddresses = newPeerAddresses;
975 public T datastoreContext(final DatastoreContext newDatastoreContext) {
977 this.datastoreContext = newDatastoreContext;
981 public T schemaContextProvider(final SchemaContextProvider schemaContextProvider) {
983 this.schemaContextProvider = Preconditions.checkNotNull(schemaContextProvider);
987 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
989 this.restoreFromSnapshot = newRestoreFromSnapshot;
993 public T dataTree(final TipProducingDataTree newDataTree) {
995 this.dataTree = newDataTree;
999 public ShardIdentifier getId() {
1003 public Map<String, String> getPeerAddresses() {
1004 return peerAddresses;
1007 public DatastoreContext getDatastoreContext() {
1008 return datastoreContext;
1011 public SchemaContext getSchemaContext() {
1012 return Verify.verifyNotNull(schemaContextProvider.getSchemaContext());
1015 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
1016 return restoreFromSnapshot;
1019 public TipProducingDataTree getDataTree() {
1023 public TreeType getTreeType() {
1024 switch (datastoreContext.getLogicalStoreType()) {
1026 return TreeType.CONFIGURATION;
1028 return TreeType.OPERATIONAL;
1030 throw new IllegalStateException("Unhandled logical store type "
1031 + datastoreContext.getLogicalStoreType());
1035 protected void verify() {
1036 Preconditions.checkNotNull(id, "id should not be null");
1037 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
1038 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
1039 Preconditions.checkNotNull(schemaContextProvider, "schemaContextProvider should not be null");
1042 public Props props() {
1045 return Props.create(shardClass, this);
1049 public static class Builder extends AbstractBuilder<Builder, Shard> {
1056 return Ticker.systemTicker();
1059 void scheduleNextPendingTransaction() {
1060 self().tell(RESUME_NEXT_PENDING_TRANSACTION, ActorRef.noSender());