2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.actor.Status.Failure;
16 import akka.serialization.Serialization;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.Optional;
19 import com.google.common.base.Preconditions;
20 import com.google.common.base.Ticker;
21 import com.google.common.collect.ImmutableList;
22 import com.google.common.collect.Range;
23 import java.io.IOException;
24 import java.util.Arrays;
25 import java.util.Collection;
26 import java.util.Collections;
27 import java.util.HashMap;
29 import java.util.concurrent.TimeUnit;
30 import javax.annotation.Nonnull;
31 import javax.annotation.Nullable;
32 import org.opendaylight.controller.cluster.access.ABIVersion;
33 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
34 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
35 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
36 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
37 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
38 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
39 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
40 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
41 import org.opendaylight.controller.cluster.access.concepts.Request;
42 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
43 import org.opendaylight.controller.cluster.access.concepts.RequestException;
44 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
45 import org.opendaylight.controller.cluster.access.concepts.RetiredGenerationException;
46 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
47 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
48 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
49 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
50 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
51 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
52 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
53 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
54 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
55 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
56 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
57 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
58 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
59 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
60 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
61 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
62 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
63 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
64 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
65 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
66 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
67 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
68 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
69 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
70 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
71 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
72 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
73 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
74 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
75 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
76 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
77 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
78 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
79 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
80 import org.opendaylight.controller.cluster.raft.RaftActor;
81 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
82 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
83 import org.opendaylight.controller.cluster.raft.RaftState;
84 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
85 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
86 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
87 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
88 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
89 import org.opendaylight.yangtools.concepts.Identifier;
90 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
91 import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree;
92 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
93 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
94 import scala.concurrent.duration.Duration;
95 import scala.concurrent.duration.FiniteDuration;
98 * A Shard represents a portion of the logical data tree.
101 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
103 public class Shard extends RaftActor {
106 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
108 public String toString() {
109 return "txCommitTimeoutCheck";
114 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
116 public String toString() {
117 return "getShardMBeanMessage";
121 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
122 public static final String DEFAULT_NAME = "default";
124 private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
127 final ABIVersion[] values = ABIVersion.values();
128 final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
129 SUPPORTED_ABIVERSIONS = ImmutableList.copyOf(real).reverse();
132 // FIXME: make this a dynamic property based on mailbox size and maximum number of clients
133 private static final int CLIENT_MAX_MESSAGES = 1000;
135 // The state of this Shard
136 private final ShardDataTree store;
138 /// The name of this shard
139 private final String name;
141 private final ShardStats shardMBean;
143 private DatastoreContext datastoreContext;
145 private final ShardCommitCoordinator commitCoordinator;
147 private long transactionCommitTimeout;
149 private Cancellable txCommitTimeoutCheckSchedule;
151 private final Optional<ActorRef> roleChangeNotifier;
153 private final MessageTracker appendEntriesReplyTracker;
155 private final ShardTransactionActorFactory transactionActorFactory;
157 private final ShardSnapshotCohort snapshotCohort;
159 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
160 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
163 private ShardSnapshot restoreFromSnapshot;
165 private final ShardTransactionMessageRetrySupport messageRetrySupport;
167 private final FrontendMetadata frontendMetadata = new FrontendMetadata();
168 private final Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = new HashMap<>();
170 protected Shard(final AbstractBuilder<?, ?> builder) {
171 super(builder.getId().toString(), builder.getPeerAddresses(),
172 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
174 this.name = builder.getId().toString();
175 this.datastoreContext = builder.getDatastoreContext();
176 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
178 setPersistence(datastoreContext.isPersistent());
180 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
182 ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
183 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher");
184 ShardDataChangeListenerPublisherActorProxy dataChangeListenerPublisher =
185 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher");
186 if (builder.getDataTree() != null) {
187 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
188 treeChangeListenerPublisher, dataChangeListenerPublisher, name);
190 store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
191 builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher,
192 dataChangeListenerPublisher, name);
195 shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
197 if (isMetricsCaptureEnabled()) {
198 getContext().become(new MeteringBehavior(this));
201 commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
203 setTransactionCommitTimeout();
205 // create a notifier actor for each cluster member
206 roleChangeNotifier = createRoleChangeNotifier(name);
208 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
209 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
211 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
212 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Transaction),
213 self(), getContext(), shardMBean, builder.getId().getShardName());
215 snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
218 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
221 private void setTransactionCommitTimeout() {
222 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
223 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
226 private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
227 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
228 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
229 return Optional.of(shardRoleChangeNotifier);
233 public void postStop() {
234 LOG.info("Stopping Shard {}", persistenceId());
238 messageRetrySupport.close();
240 if (txCommitTimeoutCheckSchedule != null) {
241 txCommitTimeoutCheckSchedule.cancel();
244 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
246 shardMBean.unregisterMBean();
250 protected void handleRecover(final Object message) {
251 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
254 super.handleRecover(message);
255 if (LOG.isTraceEnabled()) {
256 appendEntriesReplyTracker.begin();
260 @SuppressWarnings("checkstyle:IllegalCatch")
262 protected void handleNonRaftCommand(final Object message) {
263 try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
264 final Optional<Error> maybeError = context.error();
265 if (maybeError.isPresent()) {
266 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
270 if (message instanceof RequestEnvelope) {
271 final long now = ticker().read();
272 final RequestEnvelope envelope = (RequestEnvelope)message;
275 final RequestSuccess<?, ?> success = handleRequest(envelope, now);
276 if (success != null) {
277 envelope.sendSuccess(success, ticker().read() - now);
279 } catch (RequestException e) {
280 LOG.debug("{}: request {} failed", persistenceId(), envelope, e);
281 envelope.sendFailure(e, ticker().read() - now);
282 } catch (Exception e) {
283 LOG.debug("{}: request {} caused failure", persistenceId(), envelope, e);
284 envelope.sendFailure(new RuntimeRequestException("Request failed to process", e),
285 ticker().read() - now);
287 } else if (message instanceof ConnectClientRequest) {
288 handleConnectClient((ConnectClientRequest)message);
289 } else if (CreateTransaction.isSerializedType(message)) {
290 handleCreateTransaction(message);
291 } else if (message instanceof BatchedModifications) {
292 handleBatchedModifications((BatchedModifications)message);
293 } else if (message instanceof ForwardedReadyTransaction) {
294 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
295 } else if (message instanceof ReadyLocalTransaction) {
296 handleReadyLocalTransaction((ReadyLocalTransaction)message);
297 } else if (CanCommitTransaction.isSerializedType(message)) {
298 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
299 } else if (CommitTransaction.isSerializedType(message)) {
300 handleCommitTransaction(CommitTransaction.fromSerializable(message));
301 } else if (AbortTransaction.isSerializedType(message)) {
302 handleAbortTransaction(AbortTransaction.fromSerializable(message));
303 } else if (CloseTransactionChain.isSerializedType(message)) {
304 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
305 } else if (message instanceof RegisterChangeListener) {
306 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
307 } else if (message instanceof RegisterDataTreeChangeListener) {
308 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
309 } else if (message instanceof UpdateSchemaContext) {
310 updateSchemaContext((UpdateSchemaContext) message);
311 } else if (message instanceof PeerAddressResolved) {
312 PeerAddressResolved resolved = (PeerAddressResolved) message;
313 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
314 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
315 store.checkForExpiredTransactions(transactionCommitTimeout);
316 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
317 } else if (message instanceof DatastoreContext) {
318 onDatastoreContext((DatastoreContext)message);
319 } else if (message instanceof RegisterRoleChangeListener) {
320 roleChangeNotifier.get().forward(message, context());
321 } else if (message instanceof FollowerInitialSyncUpStatus) {
322 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
323 context().parent().tell(message, self());
324 } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
325 sender().tell(getShardMBean(), self());
326 } else if (message instanceof GetShardDataTree) {
327 sender().tell(store.getDataTree(), self());
328 } else if (message instanceof ServerRemoved) {
329 context().parent().forward(message, context());
330 } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
331 messageRetrySupport.onTimerMessage(message);
332 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
333 store.processCohortRegistryCommand(getSender(),
334 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
336 super.handleNonRaftCommand(message);
341 // Acquire our frontend tracking handle and verify generation matches
342 private LeaderFrontendState getFrontend(final ClientIdentifier clientId) throws RequestException {
343 final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
344 if (existing != null) {
345 final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
350 LOG.debug("{}: rejecting request from outdated client {}", persistenceId(), clientId);
351 throw new RetiredGenerationException(existing.getIdentifier().getGeneration());
354 LOG.info("{}: retiring state {}, outdated by request from client {}", persistenceId(), existing, clientId);
356 knownFrontends.remove(clientId.getFrontendId());
358 LOG.debug("{}: client {} is not yet known", persistenceId(), clientId);
361 final LeaderFrontendState ret = new LeaderFrontendState(persistenceId(), clientId, store);
362 knownFrontends.put(clientId.getFrontendId(), ret);
363 LOG.debug("{}: created state {} for client {}", persistenceId(), ret, clientId);
367 private static @Nonnull ABIVersion selectVersion(final ConnectClientRequest message) {
368 final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
369 for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
370 if (clientRange.contains(v)) {
375 throw new IllegalArgumentException(String.format(
376 "No common version between backend versions %s and client versions %s", SUPPORTED_ABIVERSIONS,
380 @SuppressWarnings("checkstyle:IllegalCatch")
381 private void handleConnectClient(final ConnectClientRequest message) {
383 if (!isLeader() || !isLeaderActive()) {
384 LOG.debug("{}: not currently leader, rejecting request {}", persistenceId(), message);
385 throw new NotLeaderException(getSelf());
388 final ABIVersion selectedVersion = selectVersion(message);
389 final LeaderFrontendState frontend = getFrontend(message.getTarget());
390 frontend.reconnect();
391 message.getReplyTo().tell(new ConnectClientSuccess(message.getTarget(), message.getSequence(), getSelf(),
392 ImmutableList.of(), store.getDataTree(), CLIENT_MAX_MESSAGES).toVersion(selectedVersion),
393 ActorRef.noSender());
394 } catch (RequestException | RuntimeException e) {
395 message.getReplyTo().tell(new Failure(e), ActorRef.noSender());
399 private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
400 throws RequestException {
401 // We are not the leader, hence we want to fail-fast.
402 if (!isLeader() || !isLeaderActive()) {
403 LOG.debug("{}: not currently leader, rejecting request {}", persistenceId(), envelope);
404 throw new NotLeaderException(getSelf());
407 final Request<?, ?> request = envelope.getMessage();
408 if (request instanceof TransactionRequest) {
409 final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
410 final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
411 return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
412 } else if (request instanceof LocalHistoryRequest) {
413 final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
414 final ClientIdentifier clientId = lhReq.getTarget().getClientId();
415 return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
417 LOG.debug("{}: rejecting unsupported request {}", persistenceId(), request);
418 throw new UnsupportedRequestException(request);
422 private boolean hasLeader() {
423 return getLeaderId() != null;
426 public int getPendingTxCommitQueueSize() {
427 return store.getQueueSize();
430 public int getCohortCacheSize() {
431 return commitCoordinator.getCohortCacheSize();
435 protected Optional<ActorRef> getRoleChangeNotifier() {
436 return roleChangeNotifier;
440 protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
441 final short leaderPayloadVersion) {
442 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
443 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
446 protected void onDatastoreContext(final DatastoreContext context) {
447 datastoreContext = context;
449 setTransactionCommitTimeout();
451 setPersistence(datastoreContext.isPersistent());
453 updateConfigParams(datastoreContext.getShardRaftConfig());
456 // applyState() will be invoked once consensus is reached on the payload
457 void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
458 boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
459 if (canSkipPayload) {
460 applyState(self(), id, payload);
462 // We are faking the sender
463 persistData(self(), id, payload, batchHint);
467 private void handleCommitTransaction(final CommitTransaction commit) {
469 commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
471 ActorSelection leader = getLeader();
472 if (leader == null) {
473 messageRetrySupport.addMessageToRetry(commit, getSender(),
474 "Could not commit transaction " + commit.getTransactionId());
476 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
477 leader.forward(commit, getContext());
482 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
483 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
486 commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
488 ActorSelection leader = getLeader();
489 if (leader == null) {
490 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
491 "Could not canCommit transaction " + canCommit.getTransactionId());
493 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
494 leader.forward(canCommit, getContext());
499 @SuppressWarnings("checkstyle:IllegalCatch")
500 protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
502 commitCoordinator.handleBatchedModifications(batched, sender, this);
503 } catch (Exception e) {
504 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
505 batched.getTransactionId(), e);
506 sender.tell(new Failure(e), getSelf());
510 private void handleBatchedModifications(final BatchedModifications batched) {
511 // This message is sent to prepare the modifications transaction directly on the Shard as an
512 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
513 // BatchedModifications message, the caller sets the ready flag in the message indicating
514 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
515 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
516 // ReadyTransaction message.
518 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
519 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
520 // the primary/leader shard. However with timing and caching on the front-end, there's a small
521 // window where it could have a stale leader during leadership transitions.
523 boolean isLeaderActive = isLeaderActive();
524 if (isLeader() && isLeaderActive) {
525 handleBatchedModificationsLocal(batched, getSender());
527 ActorSelection leader = getLeader();
528 if (!isLeaderActive || leader == null) {
529 messageRetrySupport.addMessageToRetry(batched, getSender(),
530 "Could not commit transaction " + batched.getTransactionId());
532 // If this is not the first batch and leadership changed in between batched messages,
533 // we need to reconstruct previous BatchedModifications from the transaction
534 // DataTreeModification, honoring the max batched modification count, and forward all the
535 // previous BatchedModifications to the new leader.
536 Collection<BatchedModifications> newModifications = commitCoordinator
537 .createForwardedBatchedModifications(batched,
538 datastoreContext.getShardBatchedModificationCount());
540 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
541 newModifications.size(), leader);
543 for (BatchedModifications bm : newModifications) {
544 leader.forward(bm, getContext());
550 private boolean failIfIsolatedLeader(final ActorRef sender) {
551 if (isIsolatedLeader()) {
552 sender.tell(new Failure(new NoShardLeaderException(String.format(
553 "Shard %s was the leader but has lost contact with all of its followers. Either all"
554 + " other follower nodes are down or this node is isolated by a network partition.",
555 persistenceId()))), getSelf());
562 protected boolean isIsolatedLeader() {
563 return getRaftState() == RaftState.IsolatedLeader;
566 @SuppressWarnings("checkstyle:IllegalCatch")
567 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
568 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
570 boolean isLeaderActive = isLeaderActive();
571 if (isLeader() && isLeaderActive) {
573 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
574 } catch (Exception e) {
575 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
576 message.getTransactionId(), e);
577 getSender().tell(new Failure(e), getSelf());
580 ActorSelection leader = getLeader();
581 if (!isLeaderActive || leader == null) {
582 messageRetrySupport.addMessageToRetry(message, getSender(),
583 "Could not commit transaction " + message.getTransactionId());
585 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
586 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
587 leader.forward(message, getContext());
592 private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
593 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
595 boolean isLeaderActive = isLeaderActive();
596 if (isLeader() && isLeaderActive) {
597 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
599 ActorSelection leader = getLeader();
600 if (!isLeaderActive || leader == null) {
601 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
602 "Could not commit transaction " + forwardedReady.getTransactionId());
604 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
606 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
607 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
608 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
609 leader.forward(readyLocal, getContext());
614 private void handleAbortTransaction(final AbortTransaction abort) {
615 doAbortTransaction(abort.getTransactionId(), getSender());
618 void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
619 commitCoordinator.handleAbort(transactionID, sender, this);
622 private void handleCreateTransaction(final Object message) {
624 createTransaction(CreateTransaction.fromSerializable(message));
625 } else if (getLeader() != null) {
626 getLeader().forward(message, getContext());
628 getSender().tell(new Failure(new NoShardLeaderException(
629 "Could not create a shard transaction", persistenceId())), getSelf());
633 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
634 final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
635 store.closeTransactionChain(id, null);
636 store.purgeTransactionChain(id, null);
639 @SuppressWarnings("checkstyle:IllegalCatch")
640 private void createTransaction(final CreateTransaction createTransaction) {
642 if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
643 && failIfIsolatedLeader(getSender())) {
647 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
648 createTransaction.getTransactionId());
650 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
651 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
652 } catch (Exception e) {
653 getSender().tell(new Failure(e), getSelf());
657 private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
658 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
659 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
663 private void updateSchemaContext(final UpdateSchemaContext message) {
664 updateSchemaContext(message.getSchemaContext());
668 void updateSchemaContext(final SchemaContext schemaContext) {
669 store.updateSchemaContext(schemaContext);
672 private boolean isMetricsCaptureEnabled() {
673 CommonConfig config = new CommonConfig(getContext().system().settings().config());
674 return config.isMetricCaptureEnabled();
679 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
680 return snapshotCohort;
685 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
686 return new ShardRecoveryCoordinator(store,
687 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
691 protected void onRecoveryComplete() {
692 restoreFromSnapshot = null;
694 //notify shard manager
695 getContext().parent().tell(new ActorInitialized(), getSelf());
697 // Being paranoid here - this method should only be called once but just in case...
698 if (txCommitTimeoutCheckSchedule == null) {
699 // Schedule a message to be periodically sent to check if the current in-progress
700 // transaction should be expired and aborted.
701 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
702 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
703 period, period, getSelf(),
704 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
709 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
710 if (data instanceof Payload) {
712 store.applyReplicatedPayload(identifier, (Payload)data);
713 } catch (DataValidationFailedException | IOException e) {
714 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
717 LOG.error("{}: Unknown state for {} received {}", persistenceId(), identifier, data);
722 protected void onStateChanged() {
723 boolean isLeader = isLeader();
724 boolean hasLeader = hasLeader();
725 changeSupport.onLeadershipChange(isLeader, hasLeader);
726 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
728 // If this actor is no longer the leader close all the transaction chains
730 if (LOG.isDebugEnabled()) {
732 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
733 persistenceId(), getId());
736 store.closeAllTransactionChains();
739 if (hasLeader && !isIsolatedLeader()) {
740 messageRetrySupport.retryMessages();
745 protected void onLeaderChanged(final String oldLeader, final String newLeader) {
746 shardMBean.incrementLeadershipChangeCount();
748 boolean hasLeader = hasLeader();
749 if (hasLeader && !isLeader()) {
750 // Another leader was elected. If we were the previous leader and had pending transactions, convert
751 // them to transaction messages and send to the new leader.
752 ActorSelection leader = getLeader();
753 if (leader != null) {
754 Collection<?> messagesToForward = convertPendingTransactionsToMessages();
756 if (!messagesToForward.isEmpty()) {
757 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
758 messagesToForward.size(), leader);
760 for (Object message : messagesToForward) {
761 leader.tell(message, self());
765 commitCoordinator.abortPendingTransactions("The transacton was aborted due to inflight leadership "
766 + "change and the leader address isn't available.", this);
770 if (hasLeader && !isIsolatedLeader()) {
771 messageRetrySupport.retryMessages();
776 * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
778 * @return the converted messages
780 public Collection<?> convertPendingTransactionsToMessages() {
781 return commitCoordinator.convertPendingTransactionsToMessages(
782 datastoreContext.getShardBatchedModificationCount());
786 protected void pauseLeader(final Runnable operation) {
787 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
788 store.setRunOnPendingTransactionsComplete(operation);
792 protected OnDemandRaftState.AbstractBuilder<?> newOnDemandRaftStateBuilder() {
793 return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
794 .dataChangeListenerActors(changeSupport.getListenerActors())
795 .commitCohortActors(store.getCohortActors());
799 public String persistenceId() {
804 ShardCommitCoordinator getCommitCoordinator() {
805 return commitCoordinator;
808 public DatastoreContext getDatastoreContext() {
809 return datastoreContext;
813 public ShardDataTree getDataStore() {
818 ShardStats getShardMBean() {
822 public static Builder builder() {
823 return new Builder();
826 public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
827 private final Class<S> shardClass;
828 private ShardIdentifier id;
829 private Map<String, String> peerAddresses = Collections.emptyMap();
830 private DatastoreContext datastoreContext;
831 private SchemaContext schemaContext;
832 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
833 private TipProducingDataTree dataTree;
834 private volatile boolean sealed;
836 protected AbstractBuilder(final Class<S> shardClass) {
837 this.shardClass = shardClass;
840 protected void checkSealed() {
841 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
844 @SuppressWarnings("unchecked")
849 public T id(final ShardIdentifier newId) {
855 public T peerAddresses(final Map<String, String> newPeerAddresses) {
857 this.peerAddresses = newPeerAddresses;
861 public T datastoreContext(final DatastoreContext newDatastoreContext) {
863 this.datastoreContext = newDatastoreContext;
867 public T schemaContext(final SchemaContext newSchemaContext) {
869 this.schemaContext = newSchemaContext;
873 public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
875 this.restoreFromSnapshot = newRestoreFromSnapshot;
879 public T dataTree(final TipProducingDataTree newDataTree) {
881 this.dataTree = newDataTree;
885 public ShardIdentifier getId() {
889 public Map<String, String> getPeerAddresses() {
890 return peerAddresses;
893 public DatastoreContext getDatastoreContext() {
894 return datastoreContext;
897 public SchemaContext getSchemaContext() {
898 return schemaContext;
901 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
902 return restoreFromSnapshot;
905 public TipProducingDataTree getDataTree() {
909 public TreeType getTreeType() {
910 switch (datastoreContext.getLogicalStoreType()) {
912 return TreeType.CONFIGURATION;
914 return TreeType.OPERATIONAL;
916 throw new IllegalStateException("Unhandled logical store type "
917 + datastoreContext.getLogicalStoreType());
921 protected void verify() {
922 Preconditions.checkNotNull(id, "id should not be null");
923 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
924 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
925 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
928 public Props props() {
931 return Props.create(shardClass, this);
935 public static class Builder extends AbstractBuilder<Builder, Shard> {
942 return Ticker.systemTicker();