2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
23 import java.util.concurrent.ExecutionException;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
27 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
28 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
29 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
30 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
31 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
33 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
35 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
37 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
38 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
40 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
44 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
53 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
54 import org.opendaylight.controller.cluster.datastore.modification.Modification;
55 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
56 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
57 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
58 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
59 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
60 import org.opendaylight.controller.cluster.raft.RaftActor;
61 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
62 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
63 import org.opendaylight.controller.cluster.raft.RaftState;
64 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
65 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
66 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
69 import org.opendaylight.yangtools.concepts.Identifier;
70 import org.opendaylight.yangtools.util.StringIdentifier;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
73 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
74 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
75 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
76 import scala.concurrent.duration.Duration;
77 import scala.concurrent.duration.FiniteDuration;
80 * A Shard represents a portion of the logical data tree <br/>
82 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
85 public class Shard extends RaftActor {
88 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
90 public String toString() {
91 return "txCommitTimeoutCheck";
96 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
98 public String toString() {
99 return "getShardMBeanMessage";
103 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
104 public static final String DEFAULT_NAME = "default";
106 // The state of this Shard
107 private final ShardDataTree store;
109 /// The name of this shard
110 private final String name;
112 private final ShardStats shardMBean;
114 private DatastoreContext datastoreContext;
116 private final ShardCommitCoordinator commitCoordinator;
118 private long transactionCommitTimeout;
120 private Cancellable txCommitTimeoutCheckSchedule;
122 private final Optional<ActorRef> roleChangeNotifier;
124 private final MessageTracker appendEntriesReplyTracker;
126 private final ShardTransactionActorFactory transactionActorFactory;
128 private final ShardSnapshotCohort snapshotCohort;
130 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
131 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
134 private ShardSnapshot restoreFromSnapshot;
136 private final ShardTransactionMessageRetrySupport messageRetrySupport;
138 protected Shard(AbstractBuilder<?, ?> builder) {
139 super(builder.getId().toString(), builder.getPeerAddresses(),
140 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
142 this.name = builder.getId().toString();
143 this.datastoreContext = builder.getDatastoreContext();
144 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
146 setPersistence(datastoreContext.isPersistent());
148 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
150 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
151 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
152 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
154 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
155 datastoreContext.getDataStoreMXBeanType());
156 shardMBean.setShard(this);
158 if (isMetricsCaptureEnabled()) {
159 getContext().become(new MeteringBehavior(this));
162 commitCoordinator = new ShardCommitCoordinator(store,
163 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
164 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
166 setTransactionCommitTimeout();
168 // create a notifier actor for each cluster member
169 roleChangeNotifier = createRoleChangeNotifier(name.toString());
171 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
172 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
174 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
175 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
176 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
178 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
180 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
183 private void setTransactionCommitTimeout() {
184 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
185 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
188 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
189 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
190 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
191 return Optional.of(shardRoleChangeNotifier);
195 public void postStop() {
196 LOG.info("Stopping Shard {}", persistenceId());
200 messageRetrySupport.close();
202 if(txCommitTimeoutCheckSchedule != null) {
203 txCommitTimeoutCheckSchedule.cancel();
206 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
208 shardMBean.unregisterMBean();
212 protected void handleRecover(final Object message) {
213 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
216 super.handleRecover(message);
217 if (LOG.isTraceEnabled()) {
218 appendEntriesReplyTracker.begin();
223 protected void handleNonRaftCommand(final Object message) {
224 try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
225 final Optional<Error> maybeError = context.error();
226 if (maybeError.isPresent()) {
227 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
231 if (CreateTransaction.isSerializedType(message)) {
232 handleCreateTransaction(message);
233 } else if (message instanceof BatchedModifications) {
234 handleBatchedModifications((BatchedModifications)message);
235 } else if (message instanceof ForwardedReadyTransaction) {
236 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
237 } else if (message instanceof ReadyLocalTransaction) {
238 handleReadyLocalTransaction((ReadyLocalTransaction)message);
239 } else if (CanCommitTransaction.isSerializedType(message)) {
240 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
241 } else if (CommitTransaction.isSerializedType(message)) {
242 handleCommitTransaction(CommitTransaction.fromSerializable(message));
243 } else if (AbortTransaction.isSerializedType(message)) {
244 handleAbortTransaction(AbortTransaction.fromSerializable(message));
245 } else if (CloseTransactionChain.isSerializedType(message)) {
246 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
247 } else if (message instanceof RegisterChangeListener) {
248 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
249 } else if (message instanceof RegisterDataTreeChangeListener) {
250 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
251 } else if (message instanceof UpdateSchemaContext) {
252 updateSchemaContext((UpdateSchemaContext) message);
253 } else if (message instanceof PeerAddressResolved) {
254 PeerAddressResolved resolved = (PeerAddressResolved) message;
255 setPeerAddress(resolved.getPeerId().toString(),
256 resolved.getPeerAddress());
257 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
258 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
259 } else if(message instanceof DatastoreContext) {
260 onDatastoreContext((DatastoreContext)message);
261 } else if(message instanceof RegisterRoleChangeListener){
262 roleChangeNotifier.get().forward(message, context());
263 } else if (message instanceof FollowerInitialSyncUpStatus) {
264 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
265 context().parent().tell(message, self());
266 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
267 sender().tell(getShardMBean(), self());
268 } else if(message instanceof GetShardDataTree) {
269 sender().tell(store.getDataTree(), self());
270 } else if(message instanceof ServerRemoved){
271 context().parent().forward(message, context());
272 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
273 messageRetrySupport.onTimerMessage(message);
274 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
275 commitCoordinator.processCohortRegistryCommand(getSender(),
276 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
278 super.handleNonRaftCommand(message);
283 private boolean hasLeader() {
284 return getLeaderId() != null;
287 public int getPendingTxCommitQueueSize() {
288 return commitCoordinator.getQueueSize();
291 public int getCohortCacheSize() {
292 return commitCoordinator.getCohortCacheSize();
296 protected Optional<ActorRef> getRoleChangeNotifier() {
297 return roleChangeNotifier;
301 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
302 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
303 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
306 protected void onDatastoreContext(DatastoreContext context) {
307 datastoreContext = context;
309 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
311 setTransactionCommitTimeout();
313 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
314 setPersistence(true);
315 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
316 setPersistence(false);
319 updateConfigParams(datastoreContext.getShardRaftConfig());
322 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
323 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
326 void continueCommit(final CohortEntry cohortEntry) {
327 final DataTreeCandidate candidate = cohortEntry.getCandidate();
329 // If we do not have any followers and we are not using persistence
330 // or if cohortEntry has no modifications
331 // we can apply modification to the state immediately
332 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
333 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
335 persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
336 DataTreeCandidatePayload.create(candidate));
340 private void handleCommitTransaction(final CommitTransaction commit) {
342 if(!commitCoordinator.handleCommit(new StringIdentifier(commit.getTransactionID()), getSender(), this)) {
343 shardMBean.incrementFailedTransactionsCount();
346 ActorSelection leader = getLeader();
347 if (leader == null) {
348 messageRetrySupport.addMessageToRetry(commit, getSender(),
349 "Could not commit transaction " + commit.getTransactionID());
351 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
352 leader.forward(commit, getContext());
357 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final Identifier transactionID,
358 @Nonnull final CohortEntry cohortEntry) {
359 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
363 cohortEntry.commit();
364 } catch(ExecutionException e) {
365 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
366 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
367 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
368 // applying it to the state. We then become the leader and a second tx is pre-committed and
369 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
370 // candidate via applyState prior to the second tx. Since the second tx has already been
371 // pre-committed, when it gets here to commit it will get an IllegalStateException.
373 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
374 // solution will be forthcoming.
375 if(e.getCause() instanceof IllegalStateException) {
376 LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
378 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
384 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
386 shardMBean.incrementCommittedTransactionCount();
387 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
389 } catch (Exception e) {
390 sender.tell(new akka.actor.Status.Failure(e), getSelf());
392 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
394 shardMBean.incrementFailedTransactionsCount();
396 commitCoordinator.currentTransactionComplete(transactionID, true);
400 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull Identifier transactionID) {
401 // With persistence enabled, this method is called via applyState by the leader strategy
402 // after the commit has been replicated to a majority of the followers.
404 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
405 if (cohortEntry == null) {
406 // The transaction is no longer the current commit. This can happen if the transaction
407 // was aborted prior, most likely due to timeout in the front-end. We need to finish
408 // committing the transaction though since it was successfully persisted and replicated
409 // however we can't use the original cohort b/c it was already preCommitted and may
410 // conflict with the current commit or may have been aborted so we commit with a new
412 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
413 if(cohortEntry != null) {
415 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
416 } catch (DataValidationFailedException e) {
417 shardMBean.incrementFailedTransactionsCount();
418 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
421 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
424 // This really shouldn't happen - it likely means that persistence or replication
425 // took so long to complete such that the cohort entry was expired from the cache.
426 IllegalStateException ex = new IllegalStateException(
427 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
428 persistenceId(), transactionID));
429 LOG.error(ex.getMessage());
430 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
433 finishCommit(sender, transactionID, cohortEntry);
437 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
438 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
441 commitCoordinator.handleCanCommit(new StringIdentifier(canCommit.getTransactionID()), getSender(), this);
443 ActorSelection leader = getLeader();
444 if (leader == null) {
445 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
446 "Could not canCommit transaction " + canCommit.getTransactionID());
448 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
449 leader.forward(canCommit, getContext());
454 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
456 commitCoordinator.handleBatchedModifications(batched, sender, this, store.getSchemaContext());
457 } catch (Exception e) {
458 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
459 batched.getTransactionID(), e);
460 sender.tell(new akka.actor.Status.Failure(e), getSelf());
464 private void handleBatchedModifications(BatchedModifications batched) {
465 // This message is sent to prepare the modifications transaction directly on the Shard as an
466 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
467 // BatchedModifications message, the caller sets the ready flag in the message indicating
468 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
469 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
470 // ReadyTransaction message.
472 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
473 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
474 // the primary/leader shard. However with timing and caching on the front-end, there's a small
475 // window where it could have a stale leader during leadership transitions.
477 boolean isLeaderActive = isLeaderActive();
478 if (isLeader() && isLeaderActive) {
479 handleBatchedModificationsLocal(batched, getSender());
481 ActorSelection leader = getLeader();
482 if (!isLeaderActive || leader == null) {
483 messageRetrySupport.addMessageToRetry(batched, getSender(),
484 "Could not commit transaction " + batched.getTransactionID());
486 // If this is not the first batch and leadership changed in between batched messages,
487 // we need to reconstruct previous BatchedModifications from the transaction
488 // DataTreeModification, honoring the max batched modification count, and forward all the
489 // previous BatchedModifications to the new leader.
490 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
491 batched, datastoreContext.getShardBatchedModificationCount());
493 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
494 newModifications.size(), leader);
496 for(BatchedModifications bm: newModifications) {
497 leader.forward(bm, getContext());
503 private boolean failIfIsolatedLeader(ActorRef sender) {
504 if(isIsolatedLeader()) {
505 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
506 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
507 " other follower nodes are down or this node is isolated by a network partition.",
508 persistenceId()))), getSelf());
515 protected boolean isIsolatedLeader() {
516 return getRaftState() == RaftState.IsolatedLeader;
519 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
520 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
522 boolean isLeaderActive = isLeaderActive();
523 if (isLeader() && isLeaderActive) {
525 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this, store.getSchemaContext());
526 } catch (Exception e) {
527 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
528 message.getTransactionID(), e);
529 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
532 ActorSelection leader = getLeader();
533 if (!isLeaderActive || leader == null) {
534 messageRetrySupport.addMessageToRetry(message, getSender(),
535 "Could not commit transaction " + message.getTransactionID());
537 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
538 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
539 leader.forward(message, getContext());
544 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
545 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
547 boolean isLeaderActive = isLeaderActive();
548 if (isLeader() && isLeaderActive) {
549 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this,
550 store.getSchemaContext());
552 ActorSelection leader = getLeader();
553 if (!isLeaderActive || leader == null) {
554 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
555 "Could not commit transaction " + forwardedReady.getTransactionID());
557 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
559 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
560 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
561 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
562 leader.forward(readyLocal, getContext());
567 private void handleAbortTransaction(final AbortTransaction abort) {
568 doAbortTransaction(abort.getTransactionID(), getSender());
571 void doAbortTransaction(final String transactionID, final ActorRef sender) {
572 commitCoordinator.handleAbort(new StringIdentifier(transactionID), sender, this);
575 private void handleCreateTransaction(final Object message) {
577 createTransaction(CreateTransaction.fromSerializable(message));
578 } else if (getLeader() != null) {
579 getLeader().forward(message, getContext());
581 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
582 "Could not create a shard transaction", persistenceId())), getSelf());
586 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
587 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
590 private ActorRef createTypedTransactionActor(int transactionType,
591 ShardTransactionIdentifier transactionId, String transactionChainId) {
593 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
594 transactionId, transactionChainId);
597 private void createTransaction(CreateTransaction createTransaction) {
599 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
600 failIfIsolatedLeader(getSender())) {
604 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
605 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
607 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
608 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
609 } catch (Exception e) {
610 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
614 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
615 String transactionChainId) {
618 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
620 if(LOG.isDebugEnabled()) {
621 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
624 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
627 return transactionActor;
630 private void commitWithNewTransaction(final Modification modification) {
631 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
632 modification.apply(tx.getSnapshot());
634 snapshotCohort.syncCommitTransaction(tx);
635 shardMBean.incrementCommittedTransactionCount();
636 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
637 } catch (Exception e) {
638 shardMBean.incrementFailedTransactionsCount();
639 LOG.error("{}: Failed to commit", persistenceId(), e);
643 private void updateSchemaContext(final UpdateSchemaContext message) {
644 updateSchemaContext(message.getSchemaContext());
648 void updateSchemaContext(final SchemaContext schemaContext) {
649 store.updateSchemaContext(schemaContext);
652 private boolean isMetricsCaptureEnabled() {
653 CommonConfig config = new CommonConfig(getContext().system().settings().config());
654 return config.isMetricCaptureEnabled();
659 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
660 return snapshotCohort;
665 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
666 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
667 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
671 protected void onRecoveryComplete() {
672 restoreFromSnapshot = null;
674 //notify shard manager
675 getContext().parent().tell(new ActorInitialized(), getSelf());
677 // Being paranoid here - this method should only be called once but just in case...
678 if(txCommitTimeoutCheckSchedule == null) {
679 // Schedule a message to be periodically sent to check if the current in-progress
680 // transaction should be expired and aborted.
681 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
682 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
683 period, period, getSelf(),
684 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
689 protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
690 if (data instanceof DataTreeCandidatePayload) {
691 if (clientActor == null) {
692 // No clientActor indicates a replica coming from the leader
694 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
695 } catch (DataValidationFailedException | IOException e) {
696 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
699 // Replication consensus reached, proceed to commit
700 finishCommit(clientActor, identifier);
702 } else if (data instanceof CompositeModificationPayload) {
703 Object modification = ((CompositeModificationPayload) data).getModification();
705 applyModificationToState(clientActor, identifier, modification);
706 } else if(data instanceof CompositeModificationByteStringPayload ){
707 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
709 applyModificationToState(clientActor, identifier, modification);
711 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
712 persistenceId(), data, data.getClass().getClassLoader(),
713 CompositeModificationPayload.class.getClassLoader());
717 private void applyModificationToState(ActorRef clientActor, Identifier identifier, Object modification) {
718 if(modification == null) {
720 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
721 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
722 } else if(clientActor == null) {
723 // There's no clientActor to which to send a commit reply so we must be applying
724 // replicated state from the leader.
725 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
727 // This must be the OK to commit after replication consensus.
728 finishCommit(clientActor, identifier);
733 protected void onStateChanged() {
734 boolean isLeader = isLeader();
735 boolean hasLeader = hasLeader();
736 changeSupport.onLeadershipChange(isLeader, hasLeader);
737 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
739 // If this actor is no longer the leader close all the transaction chains
741 if(LOG.isDebugEnabled()) {
743 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
744 persistenceId(), getId());
747 store.closeAllTransactionChains();
750 if(hasLeader && !isIsolatedLeader()) {
751 messageRetrySupport.retryMessages();
756 protected void onLeaderChanged(String oldLeader, String newLeader) {
757 shardMBean.incrementLeadershipChangeCount();
759 boolean hasLeader = hasLeader();
760 if(hasLeader && !isLeader()) {
761 // Another leader was elected. If we were the previous leader and had pending transactions, convert
762 // them to transaction messages and send to the new leader.
763 ActorSelection leader = getLeader();
765 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
766 datastoreContext.getShardBatchedModificationCount());
768 if(!messagesToForward.isEmpty()) {
769 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
770 messagesToForward.size(), leader);
772 for(Object message: messagesToForward) {
773 leader.tell(message, self());
777 commitCoordinator.abortPendingTransactions(
778 "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
783 if(hasLeader && !isIsolatedLeader()) {
784 messageRetrySupport.retryMessages();
789 protected void pauseLeader(Runnable operation) {
790 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
791 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
795 public String persistenceId() {
800 ShardCommitCoordinator getCommitCoordinator() {
801 return commitCoordinator;
804 public DatastoreContext getDatastoreContext() {
805 return datastoreContext;
809 public ShardDataTree getDataStore() {
814 ShardStats getShardMBean() {
818 public static Builder builder() {
819 return new Builder();
822 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
823 private final Class<S> shardClass;
824 private ShardIdentifier id;
825 private Map<String, String> peerAddresses = Collections.emptyMap();
826 private DatastoreContext datastoreContext;
827 private SchemaContext schemaContext;
828 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
829 private volatile boolean sealed;
831 protected AbstractBuilder(Class<S> shardClass) {
832 this.shardClass = shardClass;
835 protected void checkSealed() {
836 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
839 @SuppressWarnings("unchecked")
844 public T id(ShardIdentifier id) {
850 public T peerAddresses(Map<String, String> peerAddresses) {
852 this.peerAddresses = peerAddresses;
856 public T datastoreContext(DatastoreContext datastoreContext) {
858 this.datastoreContext = datastoreContext;
862 public T schemaContext(SchemaContext schemaContext) {
864 this.schemaContext = schemaContext;
868 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
870 this.restoreFromSnapshot = restoreFromSnapshot;
874 public ShardIdentifier getId() {
878 public Map<String, String> getPeerAddresses() {
879 return peerAddresses;
882 public DatastoreContext getDatastoreContext() {
883 return datastoreContext;
886 public SchemaContext getSchemaContext() {
887 return schemaContext;
890 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
891 return restoreFromSnapshot;
894 public TreeType getTreeType() {
895 switch (datastoreContext.getLogicalStoreType()) {
897 return TreeType.CONFIGURATION;
899 return TreeType.OPERATIONAL;
902 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
905 protected void verify() {
906 Preconditions.checkNotNull(id, "id should not be null");
907 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
908 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
909 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
912 public Props props() {
915 return Props.create(shardClass, this);
919 public static class Builder extends AbstractBuilder<Builder, Shard> {