2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import com.google.common.base.Throwables;
20 import java.io.IOException;
21 import java.util.Collection;
22 import java.util.Collections;
24 import java.util.concurrent.ExecutionException;
25 import java.util.concurrent.TimeUnit;
26 import javax.annotation.Nonnull;
27 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
28 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
29 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
30 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
31 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
32 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
33 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
35 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
36 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
38 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
39 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
42 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
47 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
49 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
50 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
53 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
54 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
55 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
56 import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateSupplier;
57 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
58 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
59 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
60 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
61 import org.opendaylight.controller.cluster.raft.RaftActor;
62 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
63 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
64 import org.opendaylight.controller.cluster.raft.RaftState;
65 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
66 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
67 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
69 import org.opendaylight.yangtools.concepts.Identifier;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
73 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
74 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
75 import scala.concurrent.duration.Duration;
76 import scala.concurrent.duration.FiniteDuration;
79 * A Shard represents a portion of the logical data tree <br/>
81 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
84 public class Shard extends RaftActor {
87 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
89 public String toString() {
90 return "txCommitTimeoutCheck";
95 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
97 public String toString() {
98 return "getShardMBeanMessage";
102 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
103 public static final String DEFAULT_NAME = "default";
105 // The state of this Shard
106 private final ShardDataTree store;
108 /// The name of this shard
109 private final String name;
111 private final ShardStats shardMBean;
113 private DatastoreContext datastoreContext;
115 private final ShardCommitCoordinator commitCoordinator;
117 private long transactionCommitTimeout;
119 private Cancellable txCommitTimeoutCheckSchedule;
121 private final Optional<ActorRef> roleChangeNotifier;
123 private final MessageTracker appendEntriesReplyTracker;
125 private final ShardTransactionActorFactory transactionActorFactory;
127 private final ShardSnapshotCohort snapshotCohort;
129 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
130 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
133 private ShardSnapshot restoreFromSnapshot;
135 private final ShardTransactionMessageRetrySupport messageRetrySupport;
137 protected Shard(AbstractBuilder<?, ?> builder) {
138 super(builder.getId().toString(), builder.getPeerAddresses(),
139 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
141 this.name = builder.getId().toString();
142 this.datastoreContext = builder.getDatastoreContext();
143 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
145 setPersistence(datastoreContext.isPersistent());
147 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
149 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
150 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
151 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
153 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
154 datastoreContext.getDataStoreMXBeanType());
155 shardMBean.setShard(this);
157 if (isMetricsCaptureEnabled()) {
158 getContext().become(new MeteringBehavior(this));
161 commitCoordinator = new ShardCommitCoordinator(store,
162 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
163 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
165 setTransactionCommitTimeout();
167 // create a notifier actor for each cluster member
168 roleChangeNotifier = createRoleChangeNotifier(name.toString());
170 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
171 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
173 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
174 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
175 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
177 snapshotCohort = new ShardSnapshotCohort(builder.getId().getMemberName(), transactionActorFactory, store,
180 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
183 private void setTransactionCommitTimeout() {
184 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
185 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
188 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
189 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
190 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
191 return Optional.of(shardRoleChangeNotifier);
195 public void postStop() {
196 LOG.info("Stopping Shard {}", persistenceId());
200 messageRetrySupport.close();
202 if(txCommitTimeoutCheckSchedule != null) {
203 txCommitTimeoutCheckSchedule.cancel();
206 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
208 shardMBean.unregisterMBean();
212 protected void handleRecover(final Object message) {
213 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
216 super.handleRecover(message);
217 if (LOG.isTraceEnabled()) {
218 appendEntriesReplyTracker.begin();
223 protected void handleNonRaftCommand(final Object message) {
224 try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
225 final Optional<Error> maybeError = context.error();
226 if (maybeError.isPresent()) {
227 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
231 if (CreateTransaction.isSerializedType(message)) {
232 handleCreateTransaction(message);
233 } else if (message instanceof BatchedModifications) {
234 handleBatchedModifications((BatchedModifications)message);
235 } else if (message instanceof ForwardedReadyTransaction) {
236 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
237 } else if (message instanceof ReadyLocalTransaction) {
238 handleReadyLocalTransaction((ReadyLocalTransaction)message);
239 } else if (CanCommitTransaction.isSerializedType(message)) {
240 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
241 } else if (CommitTransaction.isSerializedType(message)) {
242 handleCommitTransaction(CommitTransaction.fromSerializable(message));
243 } else if (AbortTransaction.isSerializedType(message)) {
244 handleAbortTransaction(AbortTransaction.fromSerializable(message));
245 } else if (CloseTransactionChain.isSerializedType(message)) {
246 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
247 } else if (message instanceof RegisterChangeListener) {
248 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
249 } else if (message instanceof RegisterDataTreeChangeListener) {
250 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
251 } else if (message instanceof UpdateSchemaContext) {
252 updateSchemaContext((UpdateSchemaContext) message);
253 } else if (message instanceof PeerAddressResolved) {
254 PeerAddressResolved resolved = (PeerAddressResolved) message;
255 setPeerAddress(resolved.getPeerId().toString(),
256 resolved.getPeerAddress());
257 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
258 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
259 } else if(message instanceof DatastoreContext) {
260 onDatastoreContext((DatastoreContext)message);
261 } else if(message instanceof RegisterRoleChangeListener){
262 roleChangeNotifier.get().forward(message, context());
263 } else if (message instanceof FollowerInitialSyncUpStatus) {
264 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
265 context().parent().tell(message, self());
266 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
267 sender().tell(getShardMBean(), self());
268 } else if(message instanceof GetShardDataTree) {
269 sender().tell(store.getDataTree(), self());
270 } else if(message instanceof ServerRemoved){
271 context().parent().forward(message, context());
272 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
273 messageRetrySupport.onTimerMessage(message);
274 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
275 commitCoordinator.processCohortRegistryCommand(getSender(),
276 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
278 super.handleNonRaftCommand(message);
283 private boolean hasLeader() {
284 return getLeaderId() != null;
287 public int getPendingTxCommitQueueSize() {
288 return commitCoordinator.getQueueSize();
291 public int getCohortCacheSize() {
292 return commitCoordinator.getCohortCacheSize();
296 protected Optional<ActorRef> getRoleChangeNotifier() {
297 return roleChangeNotifier;
301 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
302 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
303 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
306 protected void onDatastoreContext(DatastoreContext context) {
307 datastoreContext = context;
309 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
311 setTransactionCommitTimeout();
313 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
314 setPersistence(true);
315 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
316 setPersistence(false);
319 updateConfigParams(datastoreContext.getShardRaftConfig());
322 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
323 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
326 void continueCommit(final CohortEntry cohortEntry) {
327 final DataTreeCandidate candidate = cohortEntry.getCandidate();
328 final TransactionIdentifier transactionId = cohortEntry.getTransactionID();
330 // If we do not have any followers and we are not using persistence
331 // or if cohortEntry has no modifications
332 // we can apply modification to the state immediately
333 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
334 applyModificationToState(cohortEntry.getReplySender(), transactionId, candidate);
338 final Payload payload;
340 payload = CommitTransactionPayload.create(transactionId, candidate);
341 } catch (IOException e) {
342 LOG.error("{}: failed to encode transaction {} candidate {}", persistenceId(), transactionId, candidate,
344 // TODO: do we need to do something smarter here?
345 throw Throwables.propagate(e);
348 persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), payload);
351 private void handleCommitTransaction(final CommitTransaction commit) {
353 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
354 shardMBean.incrementFailedTransactionsCount();
357 ActorSelection leader = getLeader();
358 if (leader == null) {
359 messageRetrySupport.addMessageToRetry(commit, getSender(),
360 "Could not commit transaction " + commit.getTransactionID());
362 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
363 leader.forward(commit, getContext());
368 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final Identifier transactionID,
369 @Nonnull final CohortEntry cohortEntry) {
370 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
374 cohortEntry.commit();
375 } catch(ExecutionException e) {
376 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
377 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
378 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
379 // applying it to the state. We then become the leader and a second tx is pre-committed and
380 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
381 // candidate via applyState prior to the second tx. Since the second tx has already been
382 // pre-committed, when it gets here to commit it will get an IllegalStateException.
384 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
385 // solution will be forthcoming.
386 if(e.getCause() instanceof IllegalStateException) {
387 LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
389 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
395 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
397 shardMBean.incrementCommittedTransactionCount();
398 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
400 } catch (Exception e) {
401 sender.tell(new akka.actor.Status.Failure(e), getSelf());
403 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
405 shardMBean.incrementFailedTransactionsCount();
407 commitCoordinator.currentTransactionComplete(transactionID, true);
411 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull Identifier transactionID) {
412 // With persistence enabled, this method is called via applyState by the leader strategy
413 // after the commit has been replicated to a majority of the followers.
415 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
416 if (cohortEntry == null) {
417 // The transaction is no longer the current commit. This can happen if the transaction
418 // was aborted prior, most likely due to timeout in the front-end. We need to finish
419 // committing the transaction though since it was successfully persisted and replicated
420 // however we can't use the original cohort b/c it was already preCommitted and may
421 // conflict with the current commit or may have been aborted so we commit with a new
423 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
424 if(cohortEntry != null) {
426 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
427 } catch (DataValidationFailedException e) {
428 shardMBean.incrementFailedTransactionsCount();
429 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
432 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
435 // This really shouldn't happen - it likely means that persistence or replication
436 // took so long to complete such that the cohort entry was expired from the cache.
437 IllegalStateException ex = new IllegalStateException(
438 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
439 persistenceId(), transactionID));
440 LOG.error(ex.getMessage());
441 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
444 finishCommit(sender, transactionID, cohortEntry);
448 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
449 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
452 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
454 ActorSelection leader = getLeader();
455 if (leader == null) {
456 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
457 "Could not canCommit transaction " + canCommit.getTransactionID());
459 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
460 leader.forward(canCommit, getContext());
465 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
467 commitCoordinator.handleBatchedModifications(batched, sender, this, store.getSchemaContext());
468 } catch (Exception e) {
469 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
470 batched.getTransactionID(), e);
471 sender.tell(new akka.actor.Status.Failure(e), getSelf());
475 private void handleBatchedModifications(BatchedModifications batched) {
476 // This message is sent to prepare the modifications transaction directly on the Shard as an
477 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
478 // BatchedModifications message, the caller sets the ready flag in the message indicating
479 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
480 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
481 // ReadyTransaction message.
483 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
484 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
485 // the primary/leader shard. However with timing and caching on the front-end, there's a small
486 // window where it could have a stale leader during leadership transitions.
488 boolean isLeaderActive = isLeaderActive();
489 if (isLeader() && isLeaderActive) {
490 handleBatchedModificationsLocal(batched, getSender());
492 ActorSelection leader = getLeader();
493 if (!isLeaderActive || leader == null) {
494 messageRetrySupport.addMessageToRetry(batched, getSender(),
495 "Could not commit transaction " + batched.getTransactionID());
497 // If this is not the first batch and leadership changed in between batched messages,
498 // we need to reconstruct previous BatchedModifications from the transaction
499 // DataTreeModification, honoring the max batched modification count, and forward all the
500 // previous BatchedModifications to the new leader.
501 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
502 batched, datastoreContext.getShardBatchedModificationCount());
504 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
505 newModifications.size(), leader);
507 for(BatchedModifications bm: newModifications) {
508 leader.forward(bm, getContext());
514 private boolean failIfIsolatedLeader(ActorRef sender) {
515 if(isIsolatedLeader()) {
516 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
517 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
518 " other follower nodes are down or this node is isolated by a network partition.",
519 persistenceId()))), getSelf());
526 protected boolean isIsolatedLeader() {
527 return getRaftState() == RaftState.IsolatedLeader;
530 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
531 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
533 boolean isLeaderActive = isLeaderActive();
534 if (isLeader() && isLeaderActive) {
536 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this, store.getSchemaContext());
537 } catch (Exception e) {
538 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
539 message.getTransactionID(), e);
540 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
543 ActorSelection leader = getLeader();
544 if (!isLeaderActive || leader == null) {
545 messageRetrySupport.addMessageToRetry(message, getSender(),
546 "Could not commit transaction " + message.getTransactionID());
548 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
549 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
550 leader.forward(message, getContext());
555 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
556 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
558 boolean isLeaderActive = isLeaderActive();
559 if (isLeader() && isLeaderActive) {
560 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this,
561 store.getSchemaContext());
563 ActorSelection leader = getLeader();
564 if (!isLeaderActive || leader == null) {
565 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
566 "Could not commit transaction " + forwardedReady.getTransactionID());
568 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
570 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
571 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
572 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
573 leader.forward(readyLocal, getContext());
578 private void handleAbortTransaction(final AbortTransaction abort) {
579 doAbortTransaction(abort.getTransactionID(), getSender());
582 void doAbortTransaction(final TransactionIdentifier transactionID, final ActorRef sender) {
583 commitCoordinator.handleAbort(transactionID, sender, this);
586 private void handleCreateTransaction(final Object message) {
588 createTransaction(CreateTransaction.fromSerializable(message));
589 } else if (getLeader() != null) {
590 getLeader().forward(message, getContext());
592 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
593 "Could not create a shard transaction", persistenceId())), getSelf());
597 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
598 store.closeTransactionChain(closeTransactionChain.getIdentifier());
601 private void createTransaction(CreateTransaction createTransaction) {
603 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
604 failIfIsolatedLeader(getSender())) {
608 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
609 createTransaction.getTransactionId());
611 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
612 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
613 } catch (Exception e) {
614 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
618 private ActorRef createTransaction(int transactionType, TransactionIdentifier transactionId) {
619 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
620 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
624 private void commitWithNewTransaction(final BatchedModifications modification) {
625 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.getTransactionID());
626 modification.apply(tx.getSnapshot());
628 snapshotCohort.syncCommitTransaction(tx);
629 shardMBean.incrementCommittedTransactionCount();
630 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
631 } catch (Exception e) {
632 shardMBean.incrementFailedTransactionsCount();
633 LOG.error("{}: Failed to commit", persistenceId(), e);
637 private void updateSchemaContext(final UpdateSchemaContext message) {
638 updateSchemaContext(message.getSchemaContext());
642 void updateSchemaContext(final SchemaContext schemaContext) {
643 store.updateSchemaContext(schemaContext);
646 private boolean isMetricsCaptureEnabled() {
647 CommonConfig config = new CommonConfig(getContext().system().settings().config());
648 return config.isMetricCaptureEnabled();
653 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
654 return snapshotCohort;
659 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
660 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
661 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
665 protected void onRecoveryComplete() {
666 restoreFromSnapshot = null;
668 //notify shard manager
669 getContext().parent().tell(new ActorInitialized(), getSelf());
671 // Being paranoid here - this method should only be called once but just in case...
672 if(txCommitTimeoutCheckSchedule == null) {
673 // Schedule a message to be periodically sent to check if the current in-progress
674 // transaction should be expired and aborted.
675 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
676 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
677 period, period, getSelf(),
678 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
683 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
684 if (data instanceof DataTreeCandidateSupplier) {
685 if (clientActor == null) {
686 // No clientActor indicates a replica coming from the leader
688 store.applyForeignCandidate(identifier, ((DataTreeCandidateSupplier)data).getCandidate().getValue());
689 } catch (DataValidationFailedException | IOException e) {
690 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
693 // Replication consensus reached, proceed to commit
694 finishCommit(clientActor, identifier);
697 LOG.error("{}: Unknown state received {} ClassLoader {}", persistenceId(), data,
698 data.getClass().getClassLoader());
702 private void applyModificationToState(ActorRef clientActor, Identifier identifier, Object modification) {
703 if(modification == null) {
705 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
706 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
707 } else if(clientActor == null) {
708 // There's no clientActor to which to send a commit reply so we must be applying
709 // replicated state from the leader.
711 // The only implementation we know of is BatchedModifications, which also carries a transaction
712 // identifier -- which we really need that.
713 Preconditions.checkArgument(modification instanceof BatchedModifications);
714 commitWithNewTransaction((BatchedModifications)modification);
716 // This must be the OK to commit after replication consensus.
717 finishCommit(clientActor, identifier);
722 protected void onStateChanged() {
723 boolean isLeader = isLeader();
724 boolean hasLeader = hasLeader();
725 changeSupport.onLeadershipChange(isLeader, hasLeader);
726 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
728 // If this actor is no longer the leader close all the transaction chains
730 if(LOG.isDebugEnabled()) {
732 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
733 persistenceId(), getId());
736 store.closeAllTransactionChains();
739 if(hasLeader && !isIsolatedLeader()) {
740 messageRetrySupport.retryMessages();
745 protected void onLeaderChanged(String oldLeader, String newLeader) {
746 shardMBean.incrementLeadershipChangeCount();
748 boolean hasLeader = hasLeader();
749 if(hasLeader && !isLeader()) {
750 // Another leader was elected. If we were the previous leader and had pending transactions, convert
751 // them to transaction messages and send to the new leader.
752 ActorSelection leader = getLeader();
754 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
755 datastoreContext.getShardBatchedModificationCount());
757 if(!messagesToForward.isEmpty()) {
758 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
759 messagesToForward.size(), leader);
761 for(Object message: messagesToForward) {
762 leader.tell(message, self());
766 commitCoordinator.abortPendingTransactions(
767 "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
772 if(hasLeader && !isIsolatedLeader()) {
773 messageRetrySupport.retryMessages();
778 protected void pauseLeader(Runnable operation) {
779 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
780 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
784 public String persistenceId() {
789 ShardCommitCoordinator getCommitCoordinator() {
790 return commitCoordinator;
793 public DatastoreContext getDatastoreContext() {
794 return datastoreContext;
798 public ShardDataTree getDataStore() {
803 ShardStats getShardMBean() {
807 public static Builder builder() {
808 return new Builder();
811 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
812 private final Class<S> shardClass;
813 private ShardIdentifier id;
814 private Map<String, String> peerAddresses = Collections.emptyMap();
815 private DatastoreContext datastoreContext;
816 private SchemaContext schemaContext;
817 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
818 private volatile boolean sealed;
820 protected AbstractBuilder(Class<S> shardClass) {
821 this.shardClass = shardClass;
824 protected void checkSealed() {
825 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
828 @SuppressWarnings("unchecked")
833 public T id(ShardIdentifier id) {
839 public T peerAddresses(Map<String, String> peerAddresses) {
841 this.peerAddresses = peerAddresses;
845 public T datastoreContext(DatastoreContext datastoreContext) {
847 this.datastoreContext = datastoreContext;
851 public T schemaContext(SchemaContext schemaContext) {
853 this.schemaContext = schemaContext;
857 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
859 this.restoreFromSnapshot = restoreFromSnapshot;
863 public ShardIdentifier getId() {
867 public Map<String, String> getPeerAddresses() {
868 return peerAddresses;
871 public DatastoreContext getDatastoreContext() {
872 return datastoreContext;
875 public SchemaContext getSchemaContext() {
876 return schemaContext;
879 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
880 return restoreFromSnapshot;
883 public TreeType getTreeType() {
884 switch (datastoreContext.getLogicalStoreType()) {
886 return TreeType.CONFIGURATION;
888 return TreeType.OPERATIONAL;
891 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
894 protected void verify() {
895 Preconditions.checkNotNull(id, "id should not be null");
896 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
897 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
898 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
901 public Props props() {
904 return Props.create(shardClass, this);
908 public static class Builder extends AbstractBuilder<Builder, Shard> {