2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
23 import java.util.concurrent.ExecutionException;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
27 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
28 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
29 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
30 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
31 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
33 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
35 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
36 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
38 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
39 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
42 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
47 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
49 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
50 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
53 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
54 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
55 import org.opendaylight.controller.cluster.datastore.modification.Modification;
56 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
57 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
58 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
59 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
60 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
61 import org.opendaylight.controller.cluster.raft.RaftActor;
62 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
63 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
64 import org.opendaylight.controller.cluster.raft.RaftState;
65 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
66 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
67 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
69 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
73 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
74 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
75 import scala.concurrent.duration.Duration;
76 import scala.concurrent.duration.FiniteDuration;
79 * A Shard represents a portion of the logical data tree <br/>
81 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
84 public class Shard extends RaftActor {
87 static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
89 public String toString() {
90 return "txCommitTimeoutCheck";
95 static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
97 public String toString() {
98 return "getShardMBeanMessage";
102 // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
103 public static final String DEFAULT_NAME = "default";
105 // The state of this Shard
106 private final ShardDataTree store;
108 /// The name of this shard
109 private final String name;
111 private final ShardStats shardMBean;
113 private DatastoreContext datastoreContext;
115 private final ShardCommitCoordinator commitCoordinator;
117 private long transactionCommitTimeout;
119 private Cancellable txCommitTimeoutCheckSchedule;
121 private final Optional<ActorRef> roleChangeNotifier;
123 private final MessageTracker appendEntriesReplyTracker;
125 private final ShardTransactionActorFactory transactionActorFactory;
127 private final ShardSnapshotCohort snapshotCohort;
129 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
130 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
133 private ShardSnapshot restoreFromSnapshot;
135 private final ShardTransactionMessageRetrySupport messageRetrySupport;
137 protected Shard(AbstractBuilder<?, ?> builder) {
138 super(builder.getId().toString(), builder.getPeerAddresses(),
139 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
141 this.name = builder.getId().toString();
142 this.datastoreContext = builder.getDatastoreContext();
143 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
145 setPersistence(datastoreContext.isPersistent());
147 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
149 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
150 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
151 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
153 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
154 datastoreContext.getDataStoreMXBeanType());
155 shardMBean.setShard(this);
157 if (isMetricsCaptureEnabled()) {
158 getContext().become(new MeteringBehavior(this));
161 commitCoordinator = new ShardCommitCoordinator(store,
162 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
163 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
165 setTransactionCommitTimeout();
167 // create a notifier actor for each cluster member
168 roleChangeNotifier = createRoleChangeNotifier(name.toString());
170 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
171 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
173 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
174 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
175 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
177 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
179 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
182 private void setTransactionCommitTimeout() {
183 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
184 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
187 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
188 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
189 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
190 return Optional.of(shardRoleChangeNotifier);
194 public void postStop() {
195 LOG.info("Stopping Shard {}", persistenceId());
199 messageRetrySupport.close();
201 if(txCommitTimeoutCheckSchedule != null) {
202 txCommitTimeoutCheckSchedule.cancel();
205 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
207 shardMBean.unregisterMBean();
211 protected void handleRecover(final Object message) {
212 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
215 super.handleRecover(message);
216 if (LOG.isTraceEnabled()) {
217 appendEntriesReplyTracker.begin();
222 protected void handleNonRaftCommand(final Object message) {
223 try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
224 final Optional<Error> maybeError = context.error();
225 if (maybeError.isPresent()) {
226 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
230 if (CreateTransaction.isSerializedType(message)) {
231 handleCreateTransaction(message);
232 } else if (message instanceof BatchedModifications) {
233 handleBatchedModifications((BatchedModifications)message);
234 } else if (message instanceof ForwardedReadyTransaction) {
235 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
236 } else if (message instanceof ReadyLocalTransaction) {
237 handleReadyLocalTransaction((ReadyLocalTransaction)message);
238 } else if (CanCommitTransaction.isSerializedType(message)) {
239 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
240 } else if (CommitTransaction.isSerializedType(message)) {
241 handleCommitTransaction(CommitTransaction.fromSerializable(message));
242 } else if (AbortTransaction.isSerializedType(message)) {
243 handleAbortTransaction(AbortTransaction.fromSerializable(message));
244 } else if (CloseTransactionChain.isSerializedType(message)) {
245 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
246 } else if (message instanceof RegisterChangeListener) {
247 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
248 } else if (message instanceof RegisterDataTreeChangeListener) {
249 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
250 } else if (message instanceof UpdateSchemaContext) {
251 updateSchemaContext((UpdateSchemaContext) message);
252 } else if (message instanceof PeerAddressResolved) {
253 PeerAddressResolved resolved = (PeerAddressResolved) message;
254 setPeerAddress(resolved.getPeerId().toString(),
255 resolved.getPeerAddress());
256 } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
257 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
258 } else if(message instanceof DatastoreContext) {
259 onDatastoreContext((DatastoreContext)message);
260 } else if(message instanceof RegisterRoleChangeListener){
261 roleChangeNotifier.get().forward(message, context());
262 } else if (message instanceof FollowerInitialSyncUpStatus) {
263 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
264 context().parent().tell(message, self());
265 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
266 sender().tell(getShardMBean(), self());
267 } else if(message instanceof GetShardDataTree) {
268 sender().tell(store.getDataTree(), self());
269 } else if(message instanceof ServerRemoved){
270 context().parent().forward(message, context());
271 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
272 messageRetrySupport.onTimerMessage(message);
273 } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
274 commitCoordinator.processCohortRegistryCommand(getSender(),
275 (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
277 super.handleNonRaftCommand(message);
282 private boolean hasLeader() {
283 return getLeaderId() != null;
286 public int getPendingTxCommitQueueSize() {
287 return commitCoordinator.getQueueSize();
290 public int getCohortCacheSize() {
291 return commitCoordinator.getCohortCacheSize();
295 protected Optional<ActorRef> getRoleChangeNotifier() {
296 return roleChangeNotifier;
300 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
301 return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
302 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
305 protected void onDatastoreContext(DatastoreContext context) {
306 datastoreContext = context;
308 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
310 setTransactionCommitTimeout();
312 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
313 setPersistence(true);
314 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
315 setPersistence(false);
318 updateConfigParams(datastoreContext.getShardRaftConfig());
321 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
322 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
325 void continueCommit(final CohortEntry cohortEntry) {
326 final DataTreeCandidate candidate = cohortEntry.getCandidate();
328 // If we do not have any followers and we are not using persistence
329 // or if cohortEntry has no modifications
330 // we can apply modification to the state immediately
331 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
332 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
334 Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
335 DataTreeCandidatePayload.create(candidate));
339 private void handleCommitTransaction(final CommitTransaction commit) {
341 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
342 shardMBean.incrementFailedTransactionsCount();
345 ActorSelection leader = getLeader();
346 if (leader == null) {
347 messageRetrySupport.addMessageToRetry(commit, getSender(),
348 "Could not commit transaction " + commit.getTransactionID());
350 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
351 leader.forward(commit, getContext());
356 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
357 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
361 cohortEntry.commit();
362 } catch(ExecutionException e) {
363 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
364 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
365 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
366 // applying it to the state. We then become the leader and a second tx is pre-committed and
367 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
368 // candidate via applyState prior to the second tx. Since the second tx has already been
369 // pre-committed, when it gets here to commit it will get an IllegalStateException.
371 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
372 // solution will be forthcoming.
373 if(e.getCause() instanceof IllegalStateException) {
374 LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
376 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
382 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
384 shardMBean.incrementCommittedTransactionCount();
385 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
387 } catch (Exception e) {
388 sender.tell(new akka.actor.Status.Failure(e), getSelf());
390 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
392 shardMBean.incrementFailedTransactionsCount();
394 commitCoordinator.currentTransactionComplete(transactionID, true);
398 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
399 // With persistence enabled, this method is called via applyState by the leader strategy
400 // after the commit has been replicated to a majority of the followers.
402 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
403 if (cohortEntry == null) {
404 // The transaction is no longer the current commit. This can happen if the transaction
405 // was aborted prior, most likely due to timeout in the front-end. We need to finish
406 // committing the transaction though since it was successfully persisted and replicated
407 // however we can't use the original cohort b/c it was already preCommitted and may
408 // conflict with the current commit or may have been aborted so we commit with a new
410 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
411 if(cohortEntry != null) {
413 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
414 } catch (DataValidationFailedException e) {
415 shardMBean.incrementFailedTransactionsCount();
416 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
419 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
422 // This really shouldn't happen - it likely means that persistence or replication
423 // took so long to complete such that the cohort entry was expired from the cache.
424 IllegalStateException ex = new IllegalStateException(
425 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
426 persistenceId(), transactionID));
427 LOG.error(ex.getMessage());
428 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
431 finishCommit(sender, transactionID, cohortEntry);
435 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
436 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
439 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
441 ActorSelection leader = getLeader();
442 if (leader == null) {
443 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
444 "Could not canCommit transaction " + canCommit.getTransactionID());
446 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
447 leader.forward(canCommit, getContext());
452 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
454 commitCoordinator.handleBatchedModifications(batched, sender, this, store.getSchemaContext());
455 } catch (Exception e) {
456 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
457 batched.getTransactionID(), e);
458 sender.tell(new akka.actor.Status.Failure(e), getSelf());
462 private void handleBatchedModifications(BatchedModifications batched) {
463 // This message is sent to prepare the modifications transaction directly on the Shard as an
464 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
465 // BatchedModifications message, the caller sets the ready flag in the message indicating
466 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
467 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
468 // ReadyTransaction message.
470 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
471 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
472 // the primary/leader shard. However with timing and caching on the front-end, there's a small
473 // window where it could have a stale leader during leadership transitions.
475 boolean isLeaderActive = isLeaderActive();
476 if (isLeader() && isLeaderActive) {
477 handleBatchedModificationsLocal(batched, getSender());
479 ActorSelection leader = getLeader();
480 if (!isLeaderActive || leader == null) {
481 messageRetrySupport.addMessageToRetry(batched, getSender(),
482 "Could not commit transaction " + batched.getTransactionID());
484 // If this is not the first batch and leadership changed in between batched messages,
485 // we need to reconstruct previous BatchedModifications from the transaction
486 // DataTreeModification, honoring the max batched modification count, and forward all the
487 // previous BatchedModifications to the new leader.
488 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
489 batched, datastoreContext.getShardBatchedModificationCount());
491 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
492 newModifications.size(), leader);
494 for(BatchedModifications bm: newModifications) {
495 leader.forward(bm, getContext());
501 private boolean failIfIsolatedLeader(ActorRef sender) {
502 if(isIsolatedLeader()) {
503 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
504 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
505 " other follower nodes are down or this node is isolated by a network partition.",
506 persistenceId()))), getSelf());
513 protected boolean isIsolatedLeader() {
514 return getRaftState() == RaftState.IsolatedLeader;
517 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
518 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
520 boolean isLeaderActive = isLeaderActive();
521 if (isLeader() && isLeaderActive) {
523 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this, store.getSchemaContext());
524 } catch (Exception e) {
525 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
526 message.getTransactionID(), e);
527 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
530 ActorSelection leader = getLeader();
531 if (!isLeaderActive || leader == null) {
532 messageRetrySupport.addMessageToRetry(message, getSender(),
533 "Could not commit transaction " + message.getTransactionID());
535 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
536 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
537 leader.forward(message, getContext());
542 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
543 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
545 boolean isLeaderActive = isLeaderActive();
546 if (isLeader() && isLeaderActive) {
547 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this,
548 store.getSchemaContext());
550 ActorSelection leader = getLeader();
551 if (!isLeaderActive || leader == null) {
552 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
553 "Could not commit transaction " + forwardedReady.getTransactionID());
555 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
557 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
558 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
559 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
560 leader.forward(readyLocal, getContext());
565 private void handleAbortTransaction(final AbortTransaction abort) {
566 doAbortTransaction(abort.getTransactionID(), getSender());
569 void doAbortTransaction(final String transactionID, final ActorRef sender) {
570 commitCoordinator.handleAbort(transactionID, sender, this);
573 private void handleCreateTransaction(final Object message) {
575 createTransaction(CreateTransaction.fromSerializable(message));
576 } else if (getLeader() != null) {
577 getLeader().forward(message, getContext());
579 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
580 "Could not create a shard transaction", persistenceId())), getSelf());
584 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
585 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
588 private ActorRef createTypedTransactionActor(int transactionType,
589 ShardTransactionIdentifier transactionId, String transactionChainId) {
591 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
592 transactionId, transactionChainId);
595 private void createTransaction(CreateTransaction createTransaction) {
597 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
598 failIfIsolatedLeader(getSender())) {
602 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
603 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
605 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
606 createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
607 } catch (Exception e) {
608 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
612 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
613 String transactionChainId) {
616 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
618 if(LOG.isDebugEnabled()) {
619 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
622 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
625 return transactionActor;
628 private void commitWithNewTransaction(final Modification modification) {
629 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
630 modification.apply(tx.getSnapshot());
632 snapshotCohort.syncCommitTransaction(tx);
633 shardMBean.incrementCommittedTransactionCount();
634 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
635 } catch (Exception e) {
636 shardMBean.incrementFailedTransactionsCount();
637 LOG.error("{}: Failed to commit", persistenceId(), e);
641 private void updateSchemaContext(final UpdateSchemaContext message) {
642 updateSchemaContext(message.getSchemaContext());
646 void updateSchemaContext(final SchemaContext schemaContext) {
647 store.updateSchemaContext(schemaContext);
650 private boolean isMetricsCaptureEnabled() {
651 CommonConfig config = new CommonConfig(getContext().system().settings().config());
652 return config.isMetricCaptureEnabled();
657 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
658 return snapshotCohort;
663 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
664 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
665 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
669 protected void onRecoveryComplete() {
670 restoreFromSnapshot = null;
672 //notify shard manager
673 getContext().parent().tell(new ActorInitialized(), getSelf());
675 // Being paranoid here - this method should only be called once but just in case...
676 if(txCommitTimeoutCheckSchedule == null) {
677 // Schedule a message to be periodically sent to check if the current in-progress
678 // transaction should be expired and aborted.
679 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
680 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
681 period, period, getSelf(),
682 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
687 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
688 if (data instanceof DataTreeCandidatePayload) {
689 if (clientActor == null) {
690 // No clientActor indicates a replica coming from the leader
692 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
693 } catch (DataValidationFailedException | IOException e) {
694 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
697 // Replication consensus reached, proceed to commit
698 finishCommit(clientActor, identifier);
700 } else if (data instanceof CompositeModificationPayload) {
701 Object modification = ((CompositeModificationPayload) data).getModification();
703 applyModificationToState(clientActor, identifier, modification);
704 } else if(data instanceof CompositeModificationByteStringPayload ){
705 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
707 applyModificationToState(clientActor, identifier, modification);
709 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
710 persistenceId(), data, data.getClass().getClassLoader(),
711 CompositeModificationPayload.class.getClassLoader());
715 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
716 if(modification == null) {
718 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
719 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
720 } else if(clientActor == null) {
721 // There's no clientActor to which to send a commit reply so we must be applying
722 // replicated state from the leader.
723 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
725 // This must be the OK to commit after replication consensus.
726 finishCommit(clientActor, identifier);
731 protected void onStateChanged() {
732 boolean isLeader = isLeader();
733 boolean hasLeader = hasLeader();
734 changeSupport.onLeadershipChange(isLeader, hasLeader);
735 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
737 // If this actor is no longer the leader close all the transaction chains
739 if(LOG.isDebugEnabled()) {
741 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
742 persistenceId(), getId());
745 store.closeAllTransactionChains();
748 if(hasLeader && !isIsolatedLeader()) {
749 messageRetrySupport.retryMessages();
754 protected void onLeaderChanged(String oldLeader, String newLeader) {
755 shardMBean.incrementLeadershipChangeCount();
757 boolean hasLeader = hasLeader();
758 if(hasLeader && !isLeader()) {
759 // Another leader was elected. If we were the previous leader and had pending transactions, convert
760 // them to transaction messages and send to the new leader.
761 ActorSelection leader = getLeader();
763 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
764 datastoreContext.getShardBatchedModificationCount());
766 if(!messagesToForward.isEmpty()) {
767 LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
768 messagesToForward.size(), leader);
770 for(Object message: messagesToForward) {
771 leader.tell(message, self());
775 commitCoordinator.abortPendingTransactions(
776 "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
781 if(hasLeader && !isIsolatedLeader()) {
782 messageRetrySupport.retryMessages();
787 protected void pauseLeader(Runnable operation) {
788 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
789 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
793 public String persistenceId() {
798 ShardCommitCoordinator getCommitCoordinator() {
799 return commitCoordinator;
802 public DatastoreContext getDatastoreContext() {
803 return datastoreContext;
807 public ShardDataTree getDataStore() {
812 ShardStats getShardMBean() {
816 public static Builder builder() {
817 return new Builder();
820 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
821 private final Class<S> shardClass;
822 private ShardIdentifier id;
823 private Map<String, String> peerAddresses = Collections.emptyMap();
824 private DatastoreContext datastoreContext;
825 private SchemaContext schemaContext;
826 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
827 private volatile boolean sealed;
829 protected AbstractBuilder(Class<S> shardClass) {
830 this.shardClass = shardClass;
833 protected void checkSealed() {
834 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
837 @SuppressWarnings("unchecked")
842 public T id(ShardIdentifier id) {
848 public T peerAddresses(Map<String, String> peerAddresses) {
850 this.peerAddresses = peerAddresses;
854 public T datastoreContext(DatastoreContext datastoreContext) {
856 this.datastoreContext = datastoreContext;
860 public T schemaContext(SchemaContext schemaContext) {
862 this.schemaContext = schemaContext;
866 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
868 this.restoreFromSnapshot = restoreFromSnapshot;
872 public ShardIdentifier getId() {
876 public Map<String, String> getPeerAddresses() {
877 return peerAddresses;
880 public DatastoreContext getDatastoreContext() {
881 return datastoreContext;
884 public SchemaContext getSchemaContext() {
885 return schemaContext;
888 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
889 return restoreFromSnapshot;
892 public TreeType getTreeType() {
893 switch (datastoreContext.getLogicalStoreType()) {
895 return TreeType.CONFIGURATION;
897 return TreeType.OPERATIONAL;
900 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
903 protected void verify() {
904 Preconditions.checkNotNull(id, "id should not be null");
905 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
906 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
907 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
910 public Props props() {
913 return Props.create(shardClass, this);
917 public static class Builder extends AbstractBuilder<Builder, Shard> {