2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
23 import java.util.concurrent.ExecutionException;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
27 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
28 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
29 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
30 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
31 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
33 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
35 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
37 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
38 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
40 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
44 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
53 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
54 import org.opendaylight.controller.cluster.datastore.modification.Modification;
55 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
56 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
57 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
58 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
59 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
60 import org.opendaylight.controller.cluster.raft.RaftActor;
61 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
62 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
63 import org.opendaylight.controller.cluster.raft.RaftState;
64 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
65 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
66 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
67 import org.opendaylight.yangtools.concepts.Identifier;
68 import org.opendaylight.yangtools.util.StringIdentifier;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
78 * A Shard represents a portion of the logical data tree <br/>
80 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
83 public class Shard extends RaftActor {
86 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
88 public String toString() {
89 return "txCommitTimeoutCheck";
94 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
96 public String toString() {
97 return "getShardMBeanMessage";
101 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
102 public static final String DEFAULT_NAME = "default";
104 // The state of this Shard
105 private final ShardDataTree store;
107 /// The name of this shard
108 private final String name;
110 private final ShardStats shardMBean;
112 private DatastoreContext datastoreContext;
114 private final ShardCommitCoordinator commitCoordinator;
116 private long transactionCommitTimeout;
118 private Cancellable txCommitTimeoutCheckSchedule;
120 private final Optional<ActorRef> roleChangeNotifier;
122 private final MessageTracker appendEntriesReplyTracker;
124 private final ShardTransactionActorFactory transactionActorFactory;
126 private final ShardSnapshotCohort snapshotCohort;
128 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
129 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
132 private ShardSnapshot restoreFromSnapshot;
134 private final ShardTransactionMessageRetrySupport messageRetrySupport;
136 protected Shard(AbstractBuilder<?, ?> builder) {
137 super(builder.getId().toString(), builder.getPeerAddresses(),
138 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
140 this.name = builder.getId().toString();
141 this.datastoreContext = builder.getDatastoreContext();
142 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
144 setPersistence(datastoreContext.isPersistent());
146 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
148 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
149 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
150 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
152 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
153 datastoreContext.getDataStoreMXBeanType());
154 shardMBean.setShard(this);
156 if (isMetricsCaptureEnabled()) {
157 getContext().become(new MeteringBehavior(this));
160 commitCoordinator = new ShardCommitCoordinator(store,
161 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
162 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
164 setTransactionCommitTimeout();
166 // create a notifier actor for each cluster member
167 roleChangeNotifier = createRoleChangeNotifier(name.toString());
169 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
170 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
172 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
173 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
174 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
176 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
178 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
181 private void setTransactionCommitTimeout() {
182 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
183 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
186 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
187 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
188 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
189 return Optional.of(shardRoleChangeNotifier);
193 public void postStop() {
194 LOG.info("Stopping Shard {}", persistenceId());
198 messageRetrySupport.close();
200 if(txCommitTimeoutCheckSchedule != null) {
201 txCommitTimeoutCheckSchedule.cancel();
204 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
206 shardMBean.unregisterMBean();
210 protected void handleRecover(final Object message) {
211 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
214 super.handleRecover(message);
215 if (LOG.isTraceEnabled()) {
216 appendEntriesReplyTracker.begin();
221 protected void handleNonRaftCommand(final Object message) {
222 try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
223 final Optional<Error> maybeError = context.error();
224 if (maybeError.isPresent()) {
225 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
229 if (CreateTransaction.isSerializedType(message)) {
230 handleCreateTransaction(message);
231 } else if (message instanceof BatchedModifications) {
232 handleBatchedModifications((BatchedModifications)message);
233 } else if (message instanceof ForwardedReadyTransaction) {
234 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
235 } else if (message instanceof ReadyLocalTransaction) {
236 handleReadyLocalTransaction((ReadyLocalTransaction)message);
237 } else if (CanCommitTransaction.isSerializedType(message)) {
238 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
239 } else if (CommitTransaction.isSerializedType(message)) {
240 handleCommitTransaction(CommitTransaction.fromSerializable(message));
241 } else if (AbortTransaction.isSerializedType(message)) {
242 handleAbortTransaction(AbortTransaction.fromSerializable(message));
243 } else if (CloseTransactionChain.isSerializedType(message)) {
244 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
245 } else if (message instanceof RegisterChangeListener) {
246 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
247 } else if (message instanceof RegisterDataTreeChangeListener) {
248 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
249 } else if (message instanceof UpdateSchemaContext) {
250 updateSchemaContext((UpdateSchemaContext) message);
251 } else if (message instanceof PeerAddressResolved) {
252 PeerAddressResolved resolved = (PeerAddressResolved) message;
253 setPeerAddress(resolved.getPeerId().toString(),
254 resolved.getPeerAddress());
255 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
256 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
257 } else if(message instanceof DatastoreContext) {
258 onDatastoreContext((DatastoreContext)message);
259 } else if(message instanceof RegisterRoleChangeListener){
260 roleChangeNotifier.get().forward(message, context());
261 } else if (message instanceof FollowerInitialSyncUpStatus) {
262 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
263 context().parent().tell(message, self());
264 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
265 sender().tell(getShardMBean(), self());
266 } else if(message instanceof GetShardDataTree) {
267 sender().tell(store.getDataTree(), self());
268 } else if(message instanceof ServerRemoved){
269 context().parent().forward(message, context());
270 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
271 messageRetrySupport.onTimerMessage(message);
272 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
273 commitCoordinator.processCohortRegistryCommand(getSender(),
274 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
276 super.handleNonRaftCommand(message);
281 private boolean hasLeader() {
282 return getLeaderId() != null;
285 public int getPendingTxCommitQueueSize() {
286 return commitCoordinator.getQueueSize();
289 public int getCohortCacheSize() {
290 return commitCoordinator.getCohortCacheSize();
294 protected Optional<ActorRef> getRoleChangeNotifier() {
295 return roleChangeNotifier;
299 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
300 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
301 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
304 protected void onDatastoreContext(DatastoreContext context) {
305 datastoreContext = context;
307 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
309 setTransactionCommitTimeout();
311 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
312 setPersistence(true);
313 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
314 setPersistence(false);
317 updateConfigParams(datastoreContext.getShardRaftConfig());
320 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
321 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
324 void continueCommit(final CohortEntry cohortEntry) {
325 final DataTreeCandidate candidate = cohortEntry.getCandidate();
327 // If we do not have any followers and we are not using persistence
328 // or if cohortEntry has no modifications
329 // we can apply modification to the state immediately
330 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
331 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
333 persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
334 DataTreeCandidatePayload.create(candidate));
338 private void handleCommitTransaction(final CommitTransaction commit) {
340 if(!commitCoordinator.handleCommit(new StringIdentifier(commit.getTransactionID()), getSender(), this)) {
341 shardMBean.incrementFailedTransactionsCount();
344 ActorSelection leader = getLeader();
345 if (leader == null) {
346 messageRetrySupport.addMessageToRetry(commit, getSender(),
347 "Could not commit transaction " + commit.getTransactionID());
349 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
350 leader.forward(commit, getContext());
355 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final Identifier transactionID,
356 @Nonnull final CohortEntry cohortEntry) {
357 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
361 cohortEntry.commit();
362 } catch(ExecutionException e) {
363 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
364 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
365 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
366 // applying it to the state. We then become the leader and a second tx is pre-committed and
367 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
368 // candidate via applyState prior to the second tx. Since the second tx has already been
369 // pre-committed, when it gets here to commit it will get an IllegalStateException.
371 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
372 // solution will be forthcoming.
373 if(e.getCause() instanceof IllegalStateException) {
374 LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
376 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
382 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
384 shardMBean.incrementCommittedTransactionCount();
385 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
387 } catch (Exception e) {
388 sender.tell(new akka.actor.Status.Failure(e), getSelf());
390 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
392 shardMBean.incrementFailedTransactionsCount();
394 commitCoordinator.currentTransactionComplete(transactionID, true);
398 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull Identifier transactionID) {
399 // With persistence enabled, this method is called via applyState by the leader strategy
400 // after the commit has been replicated to a majority of the followers.
402 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
403 if (cohortEntry == null) {
404 // The transaction is no longer the current commit. This can happen if the transaction
405 // was aborted prior, most likely due to timeout in the front-end. We need to finish
406 // committing the transaction though since it was successfully persisted and replicated
407 // however we can't use the original cohort b/c it was already preCommitted and may
408 // conflict with the current commit or may have been aborted so we commit with a new
410 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
411 if(cohortEntry != null) {
413 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
414 } catch (DataValidationFailedException e) {
415 shardMBean.incrementFailedTransactionsCount();
416 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
419 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
422 // This really shouldn't happen - it likely means that persistence or replication
423 // took so long to complete such that the cohort entry was expired from the cache.
424 IllegalStateException ex = new IllegalStateException(
425 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
426 persistenceId(), transactionID));
427 LOG.error(ex.getMessage());
428 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
431 finishCommit(sender, transactionID, cohortEntry);
435 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
436 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
439 commitCoordinator.handleCanCommit(new StringIdentifier(canCommit.getTransactionID()), getSender(), this);
441 ActorSelection leader = getLeader();
442 if (leader == null) {
443 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
444 "Could not canCommit transaction " + canCommit.getTransactionID());
446 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
447 leader.forward(canCommit, getContext());
452 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
454 commitCoordinator.handleBatchedModifications(batched, sender, this, store.getSchemaContext());
455 } catch (Exception e) {
456 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
457 batched.getTransactionID(), e);
458 sender.tell(new akka.actor.Status.Failure(e), getSelf());
462 private void handleBatchedModifications(BatchedModifications batched) {
463 // This message is sent to prepare the modifications transaction directly on the Shard as an
464 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
465 // BatchedModifications message, the caller sets the ready flag in the message indicating
466 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
467 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
468 // ReadyTransaction message.
470 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
471 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
472 // the primary/leader shard. However with timing and caching on the front-end, there's a small
473 // window where it could have a stale leader during leadership transitions.
475 boolean isLeaderActive = isLeaderActive();
476 if (isLeader() && isLeaderActive) {
477 handleBatchedModificationsLocal(batched, getSender());
479 ActorSelection leader = getLeader();
480 if (!isLeaderActive || leader == null) {
481 messageRetrySupport.addMessageToRetry(batched, getSender(),
482 "Could not commit transaction " + batched.getTransactionID());
484 // If this is not the first batch and leadership changed in between batched messages,
485 // we need to reconstruct previous BatchedModifications from the transaction
486 // DataTreeModification, honoring the max batched modification count, and forward all the
487 // previous BatchedModifications to the new leader.
488 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
489 batched, datastoreContext.getShardBatchedModificationCount());
491 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
492 newModifications.size(), leader);
494 for(BatchedModifications bm: newModifications) {
495 leader.forward(bm, getContext());
501 private boolean failIfIsolatedLeader(ActorRef sender) {
502 if(isIsolatedLeader()) {
503 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
504 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
505 " other follower nodes are down or this node is isolated by a network partition.",
506 persistenceId()))), getSelf());
513 protected boolean isIsolatedLeader() {
514 return getRaftState() == RaftState.IsolatedLeader;
517 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
518 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
520 boolean isLeaderActive = isLeaderActive();
521 if (isLeader() && isLeaderActive) {
523 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this, store.getSchemaContext());
524 } catch (Exception e) {
525 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
526 message.getTransactionID(), e);
527 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
530 ActorSelection leader = getLeader();
531 if (!isLeaderActive || leader == null) {
532 messageRetrySupport.addMessageToRetry(message, getSender(),
533 "Could not commit transaction " + message.getTransactionID());
535 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
536 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
537 leader.forward(message, getContext());
542 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
543 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
545 boolean isLeaderActive = isLeaderActive();
546 if (isLeader() && isLeaderActive) {
547 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this,
548 store.getSchemaContext());
550 ActorSelection leader = getLeader();
551 if (!isLeaderActive || leader == null) {
552 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
553 "Could not commit transaction " + forwardedReady.getTransactionID());
555 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
557 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
558 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
559 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
560 leader.forward(readyLocal, getContext());
565 private void handleAbortTransaction(final AbortTransaction abort) {
566 doAbortTransaction(abort.getTransactionID(), getSender());
569 void doAbortTransaction(final String transactionID, final ActorRef sender) {
570 commitCoordinator.handleAbort(new StringIdentifier(transactionID), sender, this);
573 private void handleCreateTransaction(final Object message) {
575 createTransaction(CreateTransaction.fromSerializable(message));
576 } else if (getLeader() != null) {
577 getLeader().forward(message, getContext());
579 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
580 "Could not create a shard transaction", persistenceId())), getSelf());
584 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
585 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
588 private void createTransaction(CreateTransaction createTransaction) {
590 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
591 failIfIsolatedLeader(getSender())) {
595 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
596 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
598 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
599 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
600 } catch (Exception e) {
601 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
605 private ActorRef createTransaction(int transactionType, String remoteTransactionId, String transactionChainId) {
606 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
607 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
608 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
609 transactionId, transactionChainId);
612 private void commitWithNewTransaction(final Modification modification) {
613 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
614 modification.apply(tx.getSnapshot());
616 snapshotCohort.syncCommitTransaction(tx);
617 shardMBean.incrementCommittedTransactionCount();
618 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
619 } catch (Exception e) {
620 shardMBean.incrementFailedTransactionsCount();
621 LOG.error("{}: Failed to commit", persistenceId(), e);
625 private void updateSchemaContext(final UpdateSchemaContext message) {
626 updateSchemaContext(message.getSchemaContext());
630 void updateSchemaContext(final SchemaContext schemaContext) {
631 store.updateSchemaContext(schemaContext);
634 private boolean isMetricsCaptureEnabled() {
635 CommonConfig config = new CommonConfig(getContext().system().settings().config());
636 return config.isMetricCaptureEnabled();
641 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
642 return snapshotCohort;
647 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
648 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
649 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
653 protected void onRecoveryComplete() {
654 restoreFromSnapshot = null;
656 //notify shard manager
657 getContext().parent().tell(new ActorInitialized(), getSelf());
659 // Being paranoid here - this method should only be called once but just in case...
660 if(txCommitTimeoutCheckSchedule == null) {
661 // Schedule a message to be periodically sent to check if the current in-progress
662 // transaction should be expired and aborted.
663 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
664 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
665 period, period, getSelf(),
666 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
671 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
672 if (data instanceof DataTreeCandidatePayload) {
673 if (clientActor == null) {
674 // No clientActor indicates a replica coming from the leader
676 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
677 } catch (DataValidationFailedException | IOException e) {
678 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
681 // Replication consensus reached, proceed to commit
682 finishCommit(clientActor, identifier);
685 LOG.error("{}: Unknown state received {} ClassLoader {}", persistenceId(), data,
686 data.getClass().getClassLoader());
690 private void applyModificationToState(ActorRef clientActor, Identifier identifier, Object modification) {
691 if(modification == null) {
693 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
694 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
695 } else if(clientActor == null) {
696 // There's no clientActor to which to send a commit reply so we must be applying
697 // replicated state from the leader.
698 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
700 // This must be the OK to commit after replication consensus.
701 finishCommit(clientActor, identifier);
706 protected void onStateChanged() {
707 boolean isLeader = isLeader();
708 boolean hasLeader = hasLeader();
709 changeSupport.onLeadershipChange(isLeader, hasLeader);
710 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
712 // If this actor is no longer the leader close all the transaction chains
714 if(LOG.isDebugEnabled()) {
716 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
717 persistenceId(), getId());
720 store.closeAllTransactionChains();
723 if(hasLeader && !isIsolatedLeader()) {
724 messageRetrySupport.retryMessages();
729 protected void onLeaderChanged(String oldLeader, String newLeader) {
730 shardMBean.incrementLeadershipChangeCount();
732 boolean hasLeader = hasLeader();
733 if(hasLeader && !isLeader()) {
734 // Another leader was elected. If we were the previous leader and had pending transactions, convert
735 // them to transaction messages and send to the new leader.
736 ActorSelection leader = getLeader();
738 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
739 datastoreContext.getShardBatchedModificationCount());
741 if(!messagesToForward.isEmpty()) {
742 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
743 messagesToForward.size(), leader);
745 for(Object message: messagesToForward) {
746 leader.tell(message, self());
750 commitCoordinator.abortPendingTransactions(
751 "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
756 if(hasLeader && !isIsolatedLeader()) {
757 messageRetrySupport.retryMessages();
762 protected void pauseLeader(Runnable operation) {
763 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
764 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
768 public String persistenceId() {
773 ShardCommitCoordinator getCommitCoordinator() {
774 return commitCoordinator;
777 public DatastoreContext getDatastoreContext() {
778 return datastoreContext;
782 public ShardDataTree getDataStore() {
787 ShardStats getShardMBean() {
791 public static Builder builder() {
792 return new Builder();
795 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
796 private final Class<S> shardClass;
797 private ShardIdentifier id;
798 private Map<String, String> peerAddresses = Collections.emptyMap();
799 private DatastoreContext datastoreContext;
800 private SchemaContext schemaContext;
801 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
802 private volatile boolean sealed;
804 protected AbstractBuilder(Class<S> shardClass) {
805 this.shardClass = shardClass;
808 protected void checkSealed() {
809 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
812 @SuppressWarnings("unchecked")
817 public T id(ShardIdentifier id) {
823 public T peerAddresses(Map<String, String> peerAddresses) {
825 this.peerAddresses = peerAddresses;
829 public T datastoreContext(DatastoreContext datastoreContext) {
831 this.datastoreContext = datastoreContext;
835 public T schemaContext(SchemaContext schemaContext) {
837 this.schemaContext = schemaContext;
841 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
843 this.restoreFromSnapshot = restoreFromSnapshot;
847 public ShardIdentifier getId() {
851 public Map<String, String> getPeerAddresses() {
852 return peerAddresses;
855 public DatastoreContext getDatastoreContext() {
856 return datastoreContext;
859 public SchemaContext getSchemaContext() {
860 return schemaContext;
863 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
864 return restoreFromSnapshot;
867 public TreeType getTreeType() {
868 switch (datastoreContext.getLogicalStoreType()) {
870 return TreeType.CONFIGURATION;
872 return TreeType.OPERATIONAL;
875 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
878 protected void verify() {
879 Preconditions.checkNotNull(id, "id should not be null");
880 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
881 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
882 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
885 public Props props() {
888 return Props.create(shardClass, this);
892 public static class Builder extends AbstractBuilder<Builder, Shard> {