2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
23 import java.util.concurrent.ExecutionException;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
27 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
28 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
29 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
30 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
31 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
33 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
35 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
37 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
38 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
40 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
44 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
53 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
54 import org.opendaylight.controller.cluster.datastore.modification.Modification;
55 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
56 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
57 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
58 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
59 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
60 import org.opendaylight.controller.cluster.raft.RaftActor;
61 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
62 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
63 import org.opendaylight.controller.cluster.raft.RaftState;
64 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
65 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
66 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
78 * A Shard represents a portion of the logical data tree <br/>
80 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
83 public class Shard extends RaftActor {
86 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
88 public String toString() {
89 return "txCommitTimeoutCheck";
94 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
96 public String toString() {
97 return "getShardMBeanMessage";
101 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
102 public static final String DEFAULT_NAME = "default";
104 // The state of this Shard
105 private final ShardDataTree store;
107 /// The name of this shard
108 private final String name;
110 private final ShardStats shardMBean;
112 private DatastoreContext datastoreContext;
114 private final ShardCommitCoordinator commitCoordinator;
116 private long transactionCommitTimeout;
118 private Cancellable txCommitTimeoutCheckSchedule;
120 private final Optional<ActorRef> roleChangeNotifier;
122 private final MessageTracker appendEntriesReplyTracker;
124 private final ShardTransactionActorFactory transactionActorFactory;
126 private final ShardSnapshotCohort snapshotCohort;
128 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
129 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
132 private ShardSnapshot restoreFromSnapshot;
134 private final ShardTransactionMessageRetrySupport messageRetrySupport;
136 protected Shard(AbstractBuilder<?, ?> builder) {
137 super(builder.getId().toString(), builder.getPeerAddresses(),
138 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
140 this.name = builder.getId().toString();
141 this.datastoreContext = builder.getDatastoreContext();
142 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
144 setPersistence(datastoreContext.isPersistent());
146 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
148 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
149 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
150 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
152 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
153 datastoreContext.getDataStoreMXBeanType());
154 shardMBean.setShard(this);
156 if (isMetricsCaptureEnabled()) {
157 getContext().become(new MeteringBehavior(this));
160 commitCoordinator = new ShardCommitCoordinator(store,
161 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
162 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
164 setTransactionCommitTimeout();
166 // create a notifier actor for each cluster member
167 roleChangeNotifier = createRoleChangeNotifier(name.toString());
169 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
170 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
172 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
173 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
174 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
176 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
178 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
181 private void setTransactionCommitTimeout() {
182 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
183 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
186 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
187 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
188 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
189 return Optional.of(shardRoleChangeNotifier);
193 public void postStop() {
194 LOG.info("Stopping Shard {}", persistenceId());
198 messageRetrySupport.close();
200 if(txCommitTimeoutCheckSchedule != null) {
201 txCommitTimeoutCheckSchedule.cancel();
204 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
206 shardMBean.unregisterMBean();
210 protected void handleRecover(final Object message) {
211 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
214 super.handleRecover(message);
215 if (LOG.isTraceEnabled()) {
216 appendEntriesReplyTracker.begin();
221 protected void handleNonRaftCommand(final Object message) {
222 try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
223 final Optional<Error> maybeError = context.error();
224 if (maybeError.isPresent()) {
225 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
229 if (CreateTransaction.isSerializedType(message)) {
230 handleCreateTransaction(message);
231 } else if (message instanceof BatchedModifications) {
232 handleBatchedModifications((BatchedModifications)message);
233 } else if (message instanceof ForwardedReadyTransaction) {
234 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
235 } else if (message instanceof ReadyLocalTransaction) {
236 handleReadyLocalTransaction((ReadyLocalTransaction)message);
237 } else if (CanCommitTransaction.isSerializedType(message)) {
238 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
239 } else if (CommitTransaction.isSerializedType(message)) {
240 handleCommitTransaction(CommitTransaction.fromSerializable(message));
241 } else if (AbortTransaction.isSerializedType(message)) {
242 handleAbortTransaction(AbortTransaction.fromSerializable(message));
243 } else if (CloseTransactionChain.isSerializedType(message)) {
244 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
245 } else if (message instanceof RegisterChangeListener) {
246 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
247 } else if (message instanceof RegisterDataTreeChangeListener) {
248 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
249 } else if (message instanceof UpdateSchemaContext) {
250 updateSchemaContext((UpdateSchemaContext) message);
251 } else if (message instanceof PeerAddressResolved) {
252 PeerAddressResolved resolved = (PeerAddressResolved) message;
253 setPeerAddress(resolved.getPeerId().toString(),
254 resolved.getPeerAddress());
255 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
256 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
257 } else if(message instanceof DatastoreContext) {
258 onDatastoreContext((DatastoreContext)message);
259 } else if(message instanceof RegisterRoleChangeListener){
260 roleChangeNotifier.get().forward(message, context());
261 } else if (message instanceof FollowerInitialSyncUpStatus) {
262 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
263 context().parent().tell(message, self());
264 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
265 sender().tell(getShardMBean(), self());
266 } else if(message instanceof GetShardDataTree) {
267 sender().tell(store.getDataTree(), self());
268 } else if(message instanceof ServerRemoved){
269 context().parent().forward(message, context());
270 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
271 messageRetrySupport.onTimerMessage(message);
272 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
273 commitCoordinator.processCohortRegistryCommand(getSender(),
274 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
276 super.handleNonRaftCommand(message);
281 private boolean hasLeader() {
282 return getLeaderId() != null;
285 public int getPendingTxCommitQueueSize() {
286 return commitCoordinator.getQueueSize();
289 public int getCohortCacheSize() {
290 return commitCoordinator.getCohortCacheSize();
294 protected Optional<ActorRef> getRoleChangeNotifier() {
295 return roleChangeNotifier;
299 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
300 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
301 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
304 protected void onDatastoreContext(DatastoreContext context) {
305 datastoreContext = context;
307 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
309 setTransactionCommitTimeout();
311 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
312 setPersistence(true);
313 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
314 setPersistence(false);
317 updateConfigParams(datastoreContext.getShardRaftConfig());
320 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
321 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
324 void continueCommit(final CohortEntry cohortEntry) {
325 final DataTreeCandidate candidate = cohortEntry.getCandidate();
327 // If we do not have any followers and we are not using persistence
328 // or if cohortEntry has no modifications
329 // we can apply modification to the state immediately
330 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
331 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
333 Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
334 DataTreeCandidatePayload.create(candidate));
338 private void handleCommitTransaction(final CommitTransaction commit) {
340 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
341 shardMBean.incrementFailedTransactionsCount();
344 ActorSelection leader = getLeader();
345 if (leader == null) {
346 messageRetrySupport.addMessageToRetry(commit, getSender(),
347 "Could not commit transaction " + commit.getTransactionID());
349 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
350 leader.forward(commit, getContext());
355 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
356 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
360 cohortEntry.commit();
361 } catch(ExecutionException e) {
362 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
363 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
364 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
365 // applying it to the state. We then become the leader and a second tx is pre-committed and
366 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
367 // candidate via applyState prior to the second tx. Since the second tx has already been
368 // pre-committed, when it gets here to commit it will get an IllegalStateException.
370 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
371 // solution will be forthcoming.
372 if(e.getCause() instanceof IllegalStateException) {
373 LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
375 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
381 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
383 shardMBean.incrementCommittedTransactionCount();
384 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
386 } catch (Exception e) {
387 sender.tell(new akka.actor.Status.Failure(e), getSelf());
389 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
391 shardMBean.incrementFailedTransactionsCount();
393 commitCoordinator.currentTransactionComplete(transactionID, true);
397 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
398 // With persistence enabled, this method is called via applyState by the leader strategy
399 // after the commit has been replicated to a majority of the followers.
401 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
402 if (cohortEntry == null) {
403 // The transaction is no longer the current commit. This can happen if the transaction
404 // was aborted prior, most likely due to timeout in the front-end. We need to finish
405 // committing the transaction though since it was successfully persisted and replicated
406 // however we can't use the original cohort b/c it was already preCommitted and may
407 // conflict with the current commit or may have been aborted so we commit with a new
409 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
410 if(cohortEntry != null) {
412 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
413 } catch (DataValidationFailedException e) {
414 shardMBean.incrementFailedTransactionsCount();
415 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
418 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
421 // This really shouldn't happen - it likely means that persistence or replication
422 // took so long to complete such that the cohort entry was expired from the cache.
423 IllegalStateException ex = new IllegalStateException(
424 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
425 persistenceId(), transactionID));
426 LOG.error(ex.getMessage());
427 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
430 finishCommit(sender, transactionID, cohortEntry);
434 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
435 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
438 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
440 ActorSelection leader = getLeader();
441 if (leader == null) {
442 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
443 "Could not canCommit transaction " + canCommit.getTransactionID());
445 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
446 leader.forward(canCommit, getContext());
451 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
453 commitCoordinator.handleBatchedModifications(batched, sender, this, store.getSchemaContext());
454 } catch (Exception e) {
455 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
456 batched.getTransactionID(), e);
457 sender.tell(new akka.actor.Status.Failure(e), getSelf());
461 private void handleBatchedModifications(BatchedModifications batched) {
462 // This message is sent to prepare the modifications transaction directly on the Shard as an
463 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
464 // BatchedModifications message, the caller sets the ready flag in the message indicating
465 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
466 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
467 // ReadyTransaction message.
469 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
470 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
471 // the primary/leader shard. However with timing and caching on the front-end, there's a small
472 // window where it could have a stale leader during leadership transitions.
474 boolean isLeaderActive = isLeaderActive();
475 if (isLeader() && isLeaderActive) {
476 handleBatchedModificationsLocal(batched, getSender());
478 ActorSelection leader = getLeader();
479 if (!isLeaderActive || leader == null) {
480 messageRetrySupport.addMessageToRetry(batched, getSender(),
481 "Could not commit transaction " + batched.getTransactionID());
483 // If this is not the first batch and leadership changed in between batched messages,
484 // we need to reconstruct previous BatchedModifications from the transaction
485 // DataTreeModification, honoring the max batched modification count, and forward all the
486 // previous BatchedModifications to the new leader.
487 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
488 batched, datastoreContext.getShardBatchedModificationCount());
490 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
491 newModifications.size(), leader);
493 for(BatchedModifications bm: newModifications) {
494 leader.forward(bm, getContext());
500 private boolean failIfIsolatedLeader(ActorRef sender) {
501 if(isIsolatedLeader()) {
502 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
503 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
504 " other follower nodes are down or this node is isolated by a network partition.",
505 persistenceId()))), getSelf());
512 protected boolean isIsolatedLeader() {
513 return getRaftState() == RaftState.IsolatedLeader;
516 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
517 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
519 boolean isLeaderActive = isLeaderActive();
520 if (isLeader() && isLeaderActive) {
522 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this, store.getSchemaContext());
523 } catch (Exception e) {
524 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
525 message.getTransactionID(), e);
526 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
529 ActorSelection leader = getLeader();
530 if (!isLeaderActive || leader == null) {
531 messageRetrySupport.addMessageToRetry(message, getSender(),
532 "Could not commit transaction " + message.getTransactionID());
534 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
535 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
536 leader.forward(message, getContext());
541 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
542 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
544 boolean isLeaderActive = isLeaderActive();
545 if (isLeader() && isLeaderActive) {
546 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this,
547 store.getSchemaContext());
549 ActorSelection leader = getLeader();
550 if (!isLeaderActive || leader == null) {
551 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
552 "Could not commit transaction " + forwardedReady.getTransactionID());
554 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
556 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
557 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
558 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
559 leader.forward(readyLocal, getContext());
564 private void handleAbortTransaction(final AbortTransaction abort) {
565 doAbortTransaction(abort.getTransactionID(), getSender());
568 void doAbortTransaction(final String transactionID, final ActorRef sender) {
569 commitCoordinator.handleAbort(transactionID, sender, this);
572 private void handleCreateTransaction(final Object message) {
574 createTransaction(CreateTransaction.fromSerializable(message));
575 } else if (getLeader() != null) {
576 getLeader().forward(message, getContext());
578 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
579 "Could not create a shard transaction", persistenceId())), getSelf());
583 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
584 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
587 private ActorRef createTypedTransactionActor(int transactionType,
588 ShardTransactionIdentifier transactionId, String transactionChainId) {
590 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
591 transactionId, transactionChainId);
594 private void createTransaction(CreateTransaction createTransaction) {
596 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
597 failIfIsolatedLeader(getSender())) {
601 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
602 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
604 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
605 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
606 } catch (Exception e) {
607 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
611 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
612 String transactionChainId) {
615 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
617 if(LOG.isDebugEnabled()) {
618 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
621 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
624 return transactionActor;
627 private void commitWithNewTransaction(final Modification modification) {
628 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
629 modification.apply(tx.getSnapshot());
631 snapshotCohort.syncCommitTransaction(tx);
632 shardMBean.incrementCommittedTransactionCount();
633 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
634 } catch (Exception e) {
635 shardMBean.incrementFailedTransactionsCount();
636 LOG.error("{}: Failed to commit", persistenceId(), e);
640 private void updateSchemaContext(final UpdateSchemaContext message) {
641 updateSchemaContext(message.getSchemaContext());
645 void updateSchemaContext(final SchemaContext schemaContext) {
646 store.updateSchemaContext(schemaContext);
649 private boolean isMetricsCaptureEnabled() {
650 CommonConfig config = new CommonConfig(getContext().system().settings().config());
651 return config.isMetricCaptureEnabled();
656 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
657 return snapshotCohort;
662 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
663 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
664 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
668 protected void onRecoveryComplete() {
669 restoreFromSnapshot = null;
671 //notify shard manager
672 getContext().parent().tell(new ActorInitialized(), getSelf());
674 // Being paranoid here - this method should only be called once but just in case...
675 if(txCommitTimeoutCheckSchedule == null) {
676 // Schedule a message to be periodically sent to check if the current in-progress
677 // transaction should be expired and aborted.
678 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
679 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
680 period, period, getSelf(),
681 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
686 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
687 if (data instanceof DataTreeCandidatePayload) {
688 if (clientActor == null) {
689 // No clientActor indicates a replica coming from the leader
691 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
692 } catch (DataValidationFailedException | IOException e) {
693 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
696 // Replication consensus reached, proceed to commit
697 finishCommit(clientActor, identifier);
699 } else if (data instanceof CompositeModificationPayload) {
700 Object modification = ((CompositeModificationPayload) data).getModification();
702 applyModificationToState(clientActor, identifier, modification);
703 } else if(data instanceof CompositeModificationByteStringPayload ){
704 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
706 applyModificationToState(clientActor, identifier, modification);
708 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
709 persistenceId(), data, data.getClass().getClassLoader(),
710 CompositeModificationPayload.class.getClassLoader());
714 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
715 if(modification == null) {
717 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
718 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
719 } else if(clientActor == null) {
720 // There's no clientActor to which to send a commit reply so we must be applying
721 // replicated state from the leader.
722 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
724 // This must be the OK to commit after replication consensus.
725 finishCommit(clientActor, identifier);
730 protected void onStateChanged() {
731 boolean isLeader = isLeader();
732 boolean hasLeader = hasLeader();
733 changeSupport.onLeadershipChange(isLeader, hasLeader);
734 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
736 // If this actor is no longer the leader close all the transaction chains
738 if(LOG.isDebugEnabled()) {
740 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
741 persistenceId(), getId());
744 store.closeAllTransactionChains();
747 if(hasLeader && !isIsolatedLeader()) {
748 messageRetrySupport.retryMessages();
753 protected void onLeaderChanged(String oldLeader, String newLeader) {
754 shardMBean.incrementLeadershipChangeCount();
756 boolean hasLeader = hasLeader();
757 if(hasLeader && !isLeader()) {
758 // Another leader was elected. If we were the previous leader and had pending transactions, convert
759 // them to transaction messages and send to the new leader.
760 ActorSelection leader = getLeader();
762 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
763 datastoreContext.getShardBatchedModificationCount());
765 if(!messagesToForward.isEmpty()) {
766 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
767 messagesToForward.size(), leader);
769 for(Object message: messagesToForward) {
770 leader.tell(message, self());
774 commitCoordinator.abortPendingTransactions(
775 "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
780 if(hasLeader && !isIsolatedLeader()) {
781 messageRetrySupport.retryMessages();
786 protected void pauseLeader(Runnable operation) {
787 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
788 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
792 public String persistenceId() {
797 ShardCommitCoordinator getCommitCoordinator() {
798 return commitCoordinator;
801 public DatastoreContext getDatastoreContext() {
802 return datastoreContext;
806 public ShardDataTree getDataStore() {
811 ShardStats getShardMBean() {
815 public static Builder builder() {
816 return new Builder();
819 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
820 private final Class<S> shardClass;
821 private ShardIdentifier id;
822 private Map<String, String> peerAddresses = Collections.emptyMap();
823 private DatastoreContext datastoreContext;
824 private SchemaContext schemaContext;
825 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
826 private volatile boolean sealed;
828 protected AbstractBuilder(Class<S> shardClass) {
829 this.shardClass = shardClass;
832 protected void checkSealed() {
833 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
836 @SuppressWarnings("unchecked")
841 public T id(ShardIdentifier id) {
847 public T peerAddresses(Map<String, String> peerAddresses) {
849 this.peerAddresses = peerAddresses;
853 public T datastoreContext(DatastoreContext datastoreContext) {
855 this.datastoreContext = datastoreContext;
859 public T schemaContext(SchemaContext schemaContext) {
861 this.schemaContext = schemaContext;
865 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
867 this.restoreFromSnapshot = restoreFromSnapshot;
871 public ShardIdentifier getId() {
875 public Map<String, String> getPeerAddresses() {
876 return peerAddresses;
879 public DatastoreContext getDatastoreContext() {
880 return datastoreContext;
883 public SchemaContext getSchemaContext() {
884 return schemaContext;
887 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
888 return restoreFromSnapshot;
891 public TreeType getTreeType() {
892 switch (datastoreContext.getLogicalStoreType()) {
894 return TreeType.CONFIGURATION;
896 return TreeType.OPERATIONAL;
899 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
902 protected void verify() {
903 Preconditions.checkNotNull(id, "id should not be null");
904 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
905 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
906 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
909 public Props props() {
912 return Props.create(shardClass, this);
916 public static class Builder extends AbstractBuilder<Builder, Shard> {