2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
8 package org.opendaylight.controller.cluster.datastore;
10 import akka.actor.ActorRef;
11 import akka.actor.ActorSelection;
12 import akka.actor.Cancellable;
13 import akka.actor.ExtendedActorSystem;
14 import akka.actor.Props;
15 import akka.actor.Status;
16 import akka.actor.Status.Failure;
17 import akka.serialization.JavaSerializer;
18 import akka.serialization.Serialization;
19 import com.google.common.annotations.VisibleForTesting;
20 import com.google.common.base.Optional;
21 import com.google.common.base.Preconditions;
22 import com.google.common.base.Ticker;
23 import com.google.common.base.Verify;
24 import com.google.common.collect.ImmutableList;
25 import com.google.common.collect.ImmutableMap;
26 import com.google.common.collect.Range;
27 import java.io.IOException;
28 import java.util.Arrays;
29 import java.util.Collection;
30 import java.util.Collections;
32 import java.util.concurrent.TimeUnit;
33 import javax.annotation.Nonnull;
34 import javax.annotation.Nullable;
35 import org.opendaylight.controller.cluster.access.ABIVersion;
36 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
37 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
38 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
39 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
40 import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelopeException;
41 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
42 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
43 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
44 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
45 import org.opendaylight.controller.cluster.access.concepts.Request;
46 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
47 import org.opendaylight.controller.cluster.access.concepts.RequestException;
48 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
49 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
50 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
51 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
52 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
53 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
54 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
55 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
56 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
57 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
58 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
59 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
60 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
61 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
62 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBeanImpl;
63 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
64 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
65 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
66 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
67 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
68 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
69 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
70 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
71 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
72 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
73 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
74 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
75 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
76 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
77 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
78 import org.opendaylight.controller.cluster.datastore.messages.PersistAbortTransactionPayload;
79 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
80 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
81 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
82 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
83 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
84 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
85 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
86 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
87 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
88 import org.opendaylight.controller.cluster.messaging.SliceOptions;
89 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
90 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
91 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
92 import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
93 import org.opendaylight.controller.cluster.raft.RaftActor;
94 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
95 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
96 import org.opendaylight.controller.cluster.raft.RaftState;
97 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
98 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
99 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
100 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
101 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
102 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
103 import org.opendaylight.yangtools.concepts.Identifier;
104 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
105 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
106 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
107 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
108 import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
109 import scala.concurrent.duration.FiniteDuration;
112 * A Shard represents a portion of the logical data tree.
115 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
117 public class Shard extends RaftActor {
120 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
122 public String toString() {
123 return "txCommitTimeoutCheck";
128 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
130 public String toString() {
131 return "getShardMBeanMessage";
135 static final Object RESUME_NEXT_PENDING_TRANSACTION = new Object() {
137 public String toString() {
138 return "resumeNextPendingTransaction";
142 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
143 public static final String DEFAULT_NAME = "default";
145 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
148 final ABIVersion[] values = ABIVersion.values();
149 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
150 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
153 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
154 private static final int CLIENT_MAX_MESSAGES = 1000;
156 // The state of this Shard
157 private final ShardDataTree store;
159 /// The name of this shard
160 private final String name;
162 private final String shardName;
164 private final ShardStats shardMBean;
166 private final ShardDataTreeListenerInfoMXBeanImpl listenerInfoMXBean;
168 private DatastoreContext datastoreContext;
170 private final ShardCommitCoordinator commitCoordinator;
172 private long transactionCommitTimeout;
174 private Cancellable txCommitTimeoutCheckSchedule;
176 private final Optional<ActorRef> roleChangeNotifier;
178 private final MessageTracker appendEntriesReplyTracker;
180 private final ShardTransactionActorFactory transactionActorFactory;
182 private final ShardSnapshotCohort snapshotCohort;
184 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
186 private ShardSnapshot restoreFromSnapshot;
188 private final ShardTransactionMessageRetrySupport messageRetrySupport;
190 private final FrontendMetadata frontendMetadata;
191 private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
192 private boolean paused;
194 private final MessageSlicer responseMessageSlicer;
195 private final Dispatchers dispatchers;
197 private final MessageAssembler requestMessageAssembler;
199 protected Shard(final AbstractBuilder<?, ?> builder) {
200 super(builder.getId().toString(), builder.getPeerAddresses(),
201 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
203 this.name = builder.getId().toString();
204 this.shardName = builder.getId().getShardName();
205 this.datastoreContext = builder.getDatastoreContext();
206 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
207 this.frontendMetadata = new FrontendMetadata(name);
209 setPersistence(datastoreContext.isPersistent());
211 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
213 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
214 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
215 if (builder.getDataTree() != null) {
216 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
217 treeChangeListenerPublisher, name, frontendMetadata);
219 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
220 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name, frontendMetadata);
223 shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
225 if (isMetricsCaptureEnabled()) {
226 getContext().become(new MeteringBehavior(this));
229 commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
231 setTransactionCommitTimeout();
233 // create a notifier actor for each cluster member
234 roleChangeNotifier = createRoleChangeNotifier(name);
236 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
237 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
239 dispatchers = new Dispatchers(context().system().dispatchers());
240 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
241 dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Transaction),
242 self(), getContext(), shardMBean, builder.getId().getShardName());
244 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
247 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
249 responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
250 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
251 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
252 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
254 requestMessageAssembler = MessageAssembler.builder().logContext(this.name)
255 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
256 .assembledMessageCallback((message, sender) -> self().tell(message, sender))
257 .expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
259 listenerInfoMXBean = new ShardDataTreeListenerInfoMXBeanImpl(name, datastoreContext.getDataStoreMXBeanType(),
261 listenerInfoMXBean.register();
264 private void setTransactionCommitTimeout() {
265 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
266 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
269 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
270 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
271 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
272 return Optional.of(shardRoleChangeNotifier);
276 public void postStop() {
277 LOG.info("Stopping Shard {}", persistenceId());
281 messageRetrySupport.close();
283 if (txCommitTimeoutCheckSchedule != null) {
284 txCommitTimeoutCheckSchedule.cancel();
287 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
289 shardMBean.unregisterMBean();
290 listenerInfoMXBean.unregister();
294 protected void handleRecover(final Object message) {
295 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
298 super.handleRecover(message);
299 if (LOG.isTraceEnabled()) {
300 appendEntriesReplyTracker.begin();
305 protected void handleNonRaftCommand(final Object message) {
306 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
307 final Optional<Error> maybeError = context.error();
308 if (maybeError.isPresent()) {
309 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
313 store.resetTransactionBatch();
315 if (message instanceof RequestEnvelope) {
316 handleRequestEnvelope((RequestEnvelope)message);
317 } else if (MessageAssembler.isHandledMessage(message)) {
318 handleRequestAssemblerMessage(message);
319 } else if (message instanceof ConnectClientRequest) {
320 handleConnectClient((ConnectClientRequest)message);
321 } else if (CreateTransaction.isSerializedType(message)) {
322 handleCreateTransaction(message);
323 } else if (message instanceof BatchedModifications) {
324 handleBatchedModifications((BatchedModifications)message);
325 } else if (message instanceof ForwardedReadyTransaction) {
326 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
327 } else if (message instanceof ReadyLocalTransaction) {
328 handleReadyLocalTransaction((ReadyLocalTransaction)message);
329 } else if (CanCommitTransaction.isSerializedType(message)) {
330 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
331 } else if (CommitTransaction.isSerializedType(message)) {
332 handleCommitTransaction(CommitTransaction.fromSerializable(message));
333 } else if (AbortTransaction.isSerializedType(message)) {
334 handleAbortTransaction(AbortTransaction.fromSerializable(message));
335 } else if (CloseTransactionChain.isSerializedType(message)) {
336 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
337 } else if (message instanceof RegisterDataTreeChangeListener) {
338 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
339 } else if (message instanceof UpdateSchemaContext) {
340 updateSchemaContext((UpdateSchemaContext) message);
341 } else if (message instanceof PeerAddressResolved) {
342 PeerAddressResolved resolved = (PeerAddressResolved) message;
343 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
344 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
345 commitTimeoutCheck();
346 } else if (message instanceof DatastoreContext) {
347 onDatastoreContext((DatastoreContext)message);
348 } else if (message instanceof RegisterRoleChangeListener) {
349 roleChangeNotifier.get().forward(message, context());
350 } else if (message instanceof FollowerInitialSyncUpStatus) {
351 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
352 context().parent().tell(message, self());
353 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
354 sender().tell(getShardMBean(), self());
355 } else if (message instanceof GetShardDataTree) {
356 sender().tell(store.getDataTree(), self());
357 } else if (message instanceof ServerRemoved) {
358 context().parent().forward(message, context());
359 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
360 messageRetrySupport.onTimerMessage(message);
361 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
362 store.processCohortRegistryCommand(getSender(),
363 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
364 } else if (message instanceof PersistAbortTransactionPayload) {
365 final TransactionIdentifier txId = ((PersistAbortTransactionPayload) message).getTransactionId();
366 persistPayload(txId, AbortTransactionPayload.create(txId), true);
367 } else if (message instanceof MakeLeaderLocal) {
369 } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
370 store.resumeNextPendingTransaction();
371 } else if (!responseMessageSlicer.handleMessage(message)) {
372 super.handleNonRaftCommand(message);
377 private void handleRequestAssemblerMessage(final Object message) {
378 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() -> {
379 JavaSerializer.currentSystem().value_$eq((ExtendedActorSystem) context().system());
380 requestMessageAssembler.handleMessage(message, self());
384 @SuppressWarnings("checkstyle:IllegalCatch")
385 private void handleRequestEnvelope(final RequestEnvelope envelope) {
386 final long now = ticker().read();
388 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
389 if (success != null) {
390 final long executionTimeNanos = ticker().read() - now;
391 if (success instanceof SliceableMessage) {
392 dispatchers.getDispatcher(DispatcherType.Serialization).execute(() ->
393 responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
394 .message(envelope.newSuccessEnvelope(success, executionTimeNanos))
395 .sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
396 .onFailureCallback(t -> LOG.warn("Error slicing response {}", success, t)).build()));
398 envelope.sendSuccess(success, executionTimeNanos);
401 } catch (RequestException e) {
402 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
403 envelope.sendFailure(e, ticker().read() - now);
404 } catch (Exception e) {
405 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
406 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
407 ticker().read() - now);
411 private void commitTimeoutCheck() {
412 store.checkForExpiredTransactions(transactionCommitTimeout, this::updateAccess);
413 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
414 requestMessageAssembler.checkExpiredAssembledMessageState();
417 private Optional<Long> updateAccess(final SimpleShardDataTreeCohort cohort) {
418 final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
419 final LeaderFrontendState state = knownFrontends.get(frontend);
421 // Not tell-based protocol, do nothing
422 return Optional.absent();
425 if (isIsolatedLeader()) {
426 // We are isolated and no new request can come through until we emerge from it. We are still updating
427 // liveness of frontend when we see it attempting to communicate. Use the last access timer.
428 return Optional.of(state.getLastSeenTicks());
431 // If this frontend has freshly connected, give it some time to catch up before killing its transactions.
432 return Optional.of(state.getLastConnectTicks());
435 private void onMakeLeaderLocal() {
436 LOG.debug("{}: onMakeLeaderLocal received", persistenceId());
438 getSender().tell(new Status.Success(null), getSelf());
442 final ActorSelection leader = getLeader();
444 if (leader == null) {
445 // Leader is not present. The cluster is most likely trying to
446 // elect a leader and we should let that run its normal course
448 // TODO we can wait for the election to complete and retry the
449 // request. We can also let the caller retry by sending a flag
450 // in the response indicating the request is "reTryable".
451 getSender().tell(new Failure(
452 new LeadershipTransferFailedException("We cannot initiate leadership transfer to local node. "
453 + "Currently there is no leader for " + persistenceId())),
458 leader.tell(new RequestLeadership(getId(), getSender()), getSelf());
461 // Acquire our frontend tracking handle and verify generation matches
463 private LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
464 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
465 if (existing != null) {
466 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
472 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
473 throw new RetiredGenerationException(clientId.getGeneration(),
474 existing.getIdentifier().getGeneration());
477 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
479 knownFrontends.remove(clientId.getFrontendId());
481 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
487 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
488 final LeaderFrontendState ret = findFrontend(clientId);
493 // TODO: a dedicated exception would be better, but this is technically true, too
494 throw new OutOfSequenceEnvelopeException(0);
498 private static ABIVersion selectVersion(final ConnectClientRequest message) {
499 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
500 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
501 if (clientRange.contains(v)) {
506 throw new IllegalArgumentException(String.format(
507 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
511 @SuppressWarnings("checkstyle:IllegalCatch")
512 private void handleConnectClient(final ConnectClientRequest message) {
514 final ClientIdentifier clientId = message.getTarget();
515 final LeaderFrontendState existing = findFrontend(clientId);
516 if (existing != null) {
520 if (!isLeader() || !isLeaderActive()) {
521 LOG.info("{}: not currently leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
522 + "isLeadershipTransferInProgress: {}.",
523 persistenceId(), message, isLeader(), isLeaderActive(), isLeadershipTransferInProgress());
524 throw new NotLeaderException(getSelf());
527 final ABIVersion selectedVersion = selectVersion(message);
528 final LeaderFrontendState frontend;
529 if (existing == null) {
530 frontend = new LeaderFrontendState(persistenceId(), clientId, store);
531 knownFrontends.put(clientId.getFrontendId(), frontend);
532 LOG.debug("{}: created state {} for client {}", persistenceId(), frontend, clientId);
537 frontend.reconnect();
538 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
539 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
540 ActorRef.noSender());
541 } catch (RequestException | RuntimeException e) {
542 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
547 private RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
548 throws RequestException {
549 // We are not the leader, hence we want to fail-fast.
550 if (!isLeader() || paused || !isLeaderActive()) {
551 LOG.debug("{}: not currently active leader, rejecting request {}. isLeader: {}, isLeaderActive: {},"
552 + "isLeadershipTransferInProgress: {}, paused: {}",
553 persistenceId(), envelope, isLeader(), isLeaderActive(), isLeadershipTransferInProgress(), paused);
554 throw new NotLeaderException(getSelf());
557 final Request<?, ?> request = envelope.getMessage();
558 if (request instanceof TransactionRequest) {
559 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
560 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
561 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
562 } else if (request instanceof LocalHistoryRequest) {
563 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
564 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
565 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
567 LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
568 throw new UnsupportedRequestException(request);
572 private boolean hasLeader() {
573 return getLeaderId() != null;
576 public int getPendingTxCommitQueueSize() {
577 return store.getQueueSize();
580 public int getCohortCacheSize() {
581 return commitCoordinator.getCohortCacheSize();
585 protected Optional<ActorRef> getRoleChangeNotifier() {
586 return roleChangeNotifier;
589 String getShardName() {
594 protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
595 final short leaderPayloadVersion) {
596 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
597 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
600 protected void onDatastoreContext(final DatastoreContext context) {
601 datastoreContext = context;
603 setTransactionCommitTimeout();
605 setPersistence(datastoreContext.isPersistent());
607 updateConfigParams(datastoreContext.getShardRaftConfig());
610 // applyState() will be invoked once consensus is reached on the payload
611 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
612 boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
613 if (canSkipPayload) {
614 applyState(self(), id, payload);
616 // We are faking the sender
617 persistData(self(), id, payload, batchHint);
621 private void handleCommitTransaction(final CommitTransaction commit) {
623 commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
625 ActorSelection leader = getLeader();
626 if (leader == null) {
627 messageRetrySupport.addMessageToRetry(commit, getSender(),
628 "Could not commit transaction " + commit.getTransactionId());
630 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
631 leader.forward(commit, getContext());
636 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
637 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
640 commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
642 ActorSelection leader = getLeader();
643 if (leader == null) {
644 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
645 "Could not canCommit transaction " + canCommit.getTransactionId());
647 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
648 leader.forward(canCommit, getContext());
653 @SuppressWarnings("checkstyle:IllegalCatch")
654 protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
656 commitCoordinator.handleBatchedModifications(batched, sender, this);
657 } catch (Exception e) {
658 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
659 batched.getTransactionId(), e);
660 sender.tell(new Failure(e), getSelf());
664 private void handleBatchedModifications(final BatchedModifications batched) {
665 // This message is sent to prepare the modifications transaction directly on the Shard as an
666 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
667 // BatchedModifications message, the caller sets the ready flag in the message indicating
668 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
669 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
670 // ReadyTransaction message.
672 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
673 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
674 // the primary/leader shard. However with timing and caching on the front-end, there's a small
675 // window where it could have a stale leader during leadership transitions.
677 boolean isLeaderActive = isLeaderActive();
678 if (isLeader() && isLeaderActive) {
679 handleBatchedModificationsLocal(batched, getSender());
681 ActorSelection leader = getLeader();
682 if (!isLeaderActive || leader == null) {
683 messageRetrySupport.addMessageToRetry(batched, getSender(),
684 "Could not process BatchedModifications " + batched.getTransactionId());
686 // If this is not the first batch and leadership changed in between batched messages,
687 // we need to reconstruct previous BatchedModifications from the transaction
688 // DataTreeModification, honoring the max batched modification count, and forward all the
689 // previous BatchedModifications to the new leader.
690 Collection<BatchedModifications> newModifications = commitCoordinator
691 .createForwardedBatchedModifications(batched,
692 datastoreContext.getShardBatchedModificationCount());
694 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
695 newModifications.size(), leader);
697 for (BatchedModifications bm : newModifications) {
698 leader.forward(bm, getContext());
704 private boolean failIfIsolatedLeader(final ActorRef sender) {
705 if (isIsolatedLeader()) {
706 sender.tell(new Failure(new NoShardLeaderException(String.format(
707 "Shard %s was the leader but has lost contact with all of its followers. Either all"
708 + " other follower nodes are down or this node is isolated by a network partition.",
709 persistenceId()))), getSelf());
716 protected boolean isIsolatedLeader() {
717 return getRaftState() == RaftState.IsolatedLeader;
720 @SuppressWarnings("checkstyle:IllegalCatch")
721 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
722 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
724 boolean isLeaderActive = isLeaderActive();
725 if (isLeader() && isLeaderActive) {
727 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
728 } catch (Exception e) {
729 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
730 message.getTransactionId(), e);
731 getSender().tell(new Failure(e), getSelf());
734 ActorSelection leader = getLeader();
735 if (!isLeaderActive || leader == null) {
736 messageRetrySupport.addMessageToRetry(message, getSender(),
737 "Could not process ready local transaction " + message.getTransactionId());
739 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
740 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
741 leader.forward(message, getContext());
746 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
747 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
749 boolean isLeaderActive = isLeaderActive();
750 if (isLeader() && isLeaderActive) {
751 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
753 ActorSelection leader = getLeader();
754 if (!isLeaderActive || leader == null) {
755 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
756 "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
758 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
760 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
761 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
762 forwardedReady.getParticipatingShardNames());
763 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
764 leader.forward(readyLocal, getContext());
769 private void handleAbortTransaction(final AbortTransaction abort) {
770 doAbortTransaction(abort.getTransactionId(), getSender());
773 void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
774 commitCoordinator.handleAbort(transactionID, sender, this);
777 private void handleCreateTransaction(final Object message) {
779 createTransaction(CreateTransaction.fromSerializable(message));
780 } else if (getLeader() != null) {
781 getLeader().forward(message, getContext());
783 getSender().tell(new Failure(new NoShardLeaderException(
784 "Could not create a shard transaction", persistenceId())), getSelf());
788 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
790 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
791 // FIXME: CONTROLLER-1628: stage purge once no transactions are present
792 store.closeTransactionChain(id, null);
793 store.purgeTransactionChain(id, null);
794 } else if (getLeader() != null) {
795 getLeader().forward(closeTransactionChain, getContext());
797 LOG.warn("{}: Could not close transaction {}", persistenceId(), closeTransactionChain.getIdentifier());
801 @SuppressWarnings("checkstyle:IllegalCatch")
802 private void createTransaction(final CreateTransaction createTransaction) {
804 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
805 && failIfIsolatedLeader(getSender())) {
809 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
810 createTransaction.getTransactionId());
812 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
813 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
814 } catch (Exception e) {
815 getSender().tell(new Failure(e), getSelf());
819 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
820 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
821 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
825 private void updateSchemaContext(final UpdateSchemaContext message) {
826 updateSchemaContext(message.getSchemaContext());
830 void updateSchemaContext(final SchemaContext schemaContext) {
831 store.updateSchemaContext(schemaContext);
834 private boolean isMetricsCaptureEnabled() {
835 CommonConfig config = new CommonConfig(getContext().system().settings().config());
836 return config.isMetricCaptureEnabled();
841 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
842 return snapshotCohort;
847 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
848 if (restoreFromSnapshot == null) {
849 return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
852 return ShardRecoveryCoordinator.forSnapshot(store, persistenceId(), LOG, restoreFromSnapshot.getSnapshot());
856 protected void onRecoveryComplete() {
857 restoreFromSnapshot = null;
859 //notify shard manager
860 getContext().parent().tell(new ActorInitialized(), getSelf());
862 // Being paranoid here - this method should only be called once but just in case...
863 if (txCommitTimeoutCheckSchedule == null) {
864 // Schedule a message to be periodically sent to check if the current in-progress
865 // transaction should be expired and aborted.
866 FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
867 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
868 period, period, getSelf(),
869 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
874 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
875 if (data instanceof Payload) {
877 store.applyReplicatedPayload(identifier, (Payload)data);
878 } catch (DataValidationFailedException | IOException e) {
879 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
882 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
887 protected void onStateChanged() {
888 boolean isLeader = isLeader();
889 boolean hasLeader = hasLeader();
890 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
892 // If this actor is no longer the leader close all the transaction chains
894 if (LOG.isDebugEnabled()) {
896 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
897 persistenceId(), getId());
901 store.purgeLeaderState();
904 if (hasLeader && !isIsolatedLeader()) {
905 messageRetrySupport.retryMessages();
910 protected void onLeaderChanged(final String oldLeader, final String newLeader) {
911 shardMBean.incrementLeadershipChangeCount();
915 if (!knownFrontends.isEmpty()) {
916 LOG.debug("{}: removing frontend state for {}", persistenceId(), knownFrontends.keySet());
917 knownFrontends = ImmutableMap.of();
920 requestMessageAssembler.close();
923 // No leader anywhere, nothing else to do
927 // Another leader was elected. If we were the previous leader and had pending transactions, convert
928 // them to transaction messages and send to the new leader.
929 ActorSelection leader = getLeader();
930 if (leader != null) {
931 Collection<?> messagesToForward = convertPendingTransactionsToMessages();
933 if (!messagesToForward.isEmpty()) {
934 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
935 messagesToForward.size(), leader);
937 for (Object message : messagesToForward) {
938 LOG.debug("{}: Forwarding pending transaction message {}", persistenceId(), message);
940 leader.tell(message, self());
944 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
945 + "change and the leader address isn't available.", this);
948 // We have become the leader, we need to reconstruct frontend state
949 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
950 LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
953 if (!isIsolatedLeader()) {
954 messageRetrySupport.retryMessages();
959 * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
961 * @return the converted messages
963 public Collection<?> convertPendingTransactionsToMessages() {
964 return commitCoordinator.convertPendingTransactionsToMessages(
965 datastoreContext.getShardBatchedModificationCount());
969 protected void pauseLeader(final Runnable operation) {
970 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
973 // Tell-based protocol can replay transaction state, so it is safe to blow it up when we are paused.
974 knownFrontends.values().forEach(LeaderFrontendState::retire);
975 knownFrontends = ImmutableMap.of();
977 store.setRunOnPendingTransactionsComplete(operation);
981 protected void unpauseLeader() {
982 LOG.debug("{}: In unpauseLeader", persistenceId());
985 store.setRunOnPendingTransactionsComplete(null);
987 // Restore tell-based protocol state as if we were becoming the leader
988 knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
992 protected OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
993 return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
994 .commitCohortActors(store.getCohortActors());
998 public String persistenceId() {
1003 ShardCommitCoordinator getCommitCoordinator() {
1004 return commitCoordinator;
1007 public DatastoreContext getDatastoreContext() {
1008 return datastoreContext;
1012 public ShardDataTree getDataStore() {
1017 ShardStats getShardMBean() {
1021 public static Builder builder() {
1022 return new Builder();
1025 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
1026 private final Class<S> shardClass;
1027 private ShardIdentifier id;
1028 private Map<String, String> peerAddresses = Collections.emptyMap();
1029 private DatastoreContext datastoreContext;
1030 private SchemaContextProvider schemaContextProvider;
1031 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
1032 private DataTree dataTree;
1033 private volatile boolean sealed;
1035 protected AbstractBuilder(final Class<S> shardClass) {
1036 this.shardClass = shardClass;
1039 protected void checkSealed() {
1040 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
1043 @SuppressWarnings("unchecked")
1048 public T id(final ShardIdentifier newId) {
1054 public T peerAddresses(final Map<String, String> newPeerAddresses) {
1056 this.peerAddresses = newPeerAddresses;
1060 public T datastoreContext(final DatastoreContext newDatastoreContext) {
1062 this.datastoreContext = newDatastoreContext;
1066 public T schemaContextProvider(final SchemaContextProvider newSchemaContextProvider) {
1068 this.schemaContextProvider = Preconditions.checkNotNull(newSchemaContextProvider);
1072 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
1074 this.restoreFromSnapshot = newRestoreFromSnapshot;
1078 public T dataTree(final DataTree newDataTree) {
1080 this.dataTree = newDataTree;
1084 public ShardIdentifier getId() {
1088 public Map<String, String> getPeerAddresses() {
1089 return peerAddresses;
1092 public DatastoreContext getDatastoreContext() {
1093 return datastoreContext;
1096 public SchemaContext getSchemaContext() {
1097 return Verify.verifyNotNull(schemaContextProvider.getSchemaContext());
1100 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
1101 return restoreFromSnapshot;
1104 public DataTree getDataTree() {
1108 public TreeType getTreeType() {
1109 switch (datastoreContext.getLogicalStoreType()) {
1111 return TreeType.CONFIGURATION;
1113 return TreeType.OPERATIONAL;
1115 throw new IllegalStateException("Unhandled logical store type "
1116 + datastoreContext.getLogicalStoreType());
1120 protected void verify() {
1121 Preconditions.checkNotNull(id, "id should not be null");
1122 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
1123 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
1124 Preconditions.checkNotNull(schemaContextProvider, "schemaContextProvider should not be null");
1127 public Props props() {
1130 return Props.create(shardClass, this);
1134 public static class Builder extends AbstractBuilder<Builder, Shard> {
1141 return Ticker.systemTicker();
1144 void scheduleNextPendingTransaction() {
1145 self().tell(RESUME_NEXT_PENDING_TRANSACTION, ActorRef.noSender());