2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.persistence.RecoveryFailure;
16 import akka.serialization.Serialization;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.Optional;
19 import com.google.common.base.Preconditions;
20 import java.io.IOException;
21 import java.util.Collections;
23 import java.util.concurrent.TimeUnit;
24 import javax.annotation.Nonnull;
25 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
26 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
27 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
28 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
30 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
32 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
33 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
34 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
35 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
36 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
43 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
44 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
46 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
47 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
49 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
50 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
51 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
52 import org.opendaylight.controller.cluster.datastore.modification.Modification;
53 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
54 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
55 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
56 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
57 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
58 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
59 import org.opendaylight.controller.cluster.raft.RaftActor;
60 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
61 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
62 import org.opendaylight.controller.cluster.raft.RaftState;
63 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
64 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
65 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
78 * A Shard represents a portion of the logical data tree <br/>
80 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
83 public class Shard extends RaftActor {
85 protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
88 static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
91 static final String DEFAULT_NAME = "default";
93 // The state of this Shard
94 private final ShardDataTree store;
96 /// The name of this shard
97 private final String name;
99 private final ShardStats shardMBean;
101 private DatastoreContext datastoreContext;
103 private final ShardCommitCoordinator commitCoordinator;
105 private long transactionCommitTimeout;
107 private Cancellable txCommitTimeoutCheckSchedule;
109 private final Optional<ActorRef> roleChangeNotifier;
111 private final MessageTracker appendEntriesReplyTracker;
113 private final ShardTransactionActorFactory transactionActorFactory;
115 private final ShardSnapshotCohort snapshotCohort;
117 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
118 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
121 private ShardSnapshot restoreFromSnapshot;
123 private final ShardTransactionMessageRetrySupport messageRetrySupport;
125 protected Shard(AbstractBuilder<?, ?> builder) {
126 super(builder.getId().toString(), builder.getPeerAddresses(),
127 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
129 this.name = builder.getId().toString();
130 this.datastoreContext = builder.getDatastoreContext();
131 this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
133 setPersistence(datastoreContext.isPersistent());
135 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
137 store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType());
139 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
140 datastoreContext.getDataStoreMXBeanType());
141 shardMBean.setShard(this);
143 if (isMetricsCaptureEnabled()) {
144 getContext().become(new MeteringBehavior(this));
147 commitCoordinator = new ShardCommitCoordinator(store,
148 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
149 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
151 setTransactionCommitTimeout();
153 // create a notifier actor for each cluster member
154 roleChangeNotifier = createRoleChangeNotifier(name.toString());
156 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
157 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
159 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
160 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
161 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
163 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
165 messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
168 private void setTransactionCommitTimeout() {
169 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
170 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
173 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
174 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
175 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
176 return Optional.of(shardRoleChangeNotifier);
180 public void postStop() {
181 LOG.info("Stopping Shard {}", persistenceId());
185 messageRetrySupport.close();
187 if(txCommitTimeoutCheckSchedule != null) {
188 txCommitTimeoutCheckSchedule.cancel();
191 commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
193 shardMBean.unregisterMBean();
197 public void onReceiveRecover(final Object message) throws Exception {
198 if(LOG.isDebugEnabled()) {
199 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
200 message.getClass().toString(), getSender());
203 if (message instanceof RecoveryFailure){
204 LOG.error("{}: Recovery failed because of this cause",
205 persistenceId(), ((RecoveryFailure) message).cause());
207 // Even though recovery failed, we still need to finish our recovery, eg send the
208 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
209 onRecoveryComplete();
211 super.onReceiveRecover(message);
212 if(LOG.isTraceEnabled()) {
213 appendEntriesReplyTracker.begin();
219 public void onReceiveCommand(final Object message) throws Exception {
221 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
223 if(context.error().isPresent()){
224 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
229 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
230 handleCreateTransaction(message);
231 } else if (BatchedModifications.class.isInstance(message)) {
232 handleBatchedModifications((BatchedModifications)message);
233 } else if (message instanceof ForwardedReadyTransaction) {
234 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
235 } else if (message instanceof ReadyLocalTransaction) {
236 handleReadyLocalTransaction((ReadyLocalTransaction)message);
237 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
238 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
239 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
240 handleCommitTransaction(CommitTransaction.fromSerializable(message));
241 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
242 handleAbortTransaction(AbortTransaction.fromSerializable(message));
243 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
244 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
245 } else if (message instanceof RegisterChangeListener) {
246 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
247 } else if (message instanceof RegisterDataTreeChangeListener) {
248 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
249 } else if (message instanceof UpdateSchemaContext) {
250 updateSchemaContext((UpdateSchemaContext) message);
251 } else if (message instanceof PeerAddressResolved) {
252 PeerAddressResolved resolved = (PeerAddressResolved) message;
253 setPeerAddress(resolved.getPeerId().toString(),
254 resolved.getPeerAddress());
255 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
256 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
257 } else if(message instanceof DatastoreContext) {
258 onDatastoreContext((DatastoreContext)message);
259 } else if(message instanceof RegisterRoleChangeListener){
260 roleChangeNotifier.get().forward(message, context());
261 } else if (message instanceof FollowerInitialSyncUpStatus) {
262 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
263 context().parent().tell(message, self());
264 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
265 sender().tell(getShardMBean(), self());
266 } else if(message instanceof GetShardDataTree) {
267 sender().tell(store.getDataTree(), self());
268 } else if(message instanceof ServerRemoved){
269 context().parent().forward(message, context());
270 } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
271 messageRetrySupport.onTimerMessage(message);
273 super.onReceiveCommand(message);
280 private boolean hasLeader() {
281 return getLeaderId() != null;
284 public int getPendingTxCommitQueueSize() {
285 return commitCoordinator.getQueueSize();
288 public int getCohortCacheSize() {
289 return commitCoordinator.getCohortCacheSize();
293 protected Optional<ActorRef> getRoleChangeNotifier() {
294 return roleChangeNotifier;
298 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
299 return new ShardLeaderStateChanged(memberId, leaderId,
300 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
301 leaderPayloadVersion);
304 protected void onDatastoreContext(DatastoreContext context) {
305 datastoreContext = context;
307 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
309 setTransactionCommitTimeout();
311 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
312 setPersistence(true);
313 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
314 setPersistence(false);
317 updateConfigParams(datastoreContext.getShardRaftConfig());
320 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
321 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
324 void continueCommit(final CohortEntry cohortEntry) {
325 final DataTreeCandidate candidate = cohortEntry.getCandidate();
327 // If we do not have any followers and we are not using persistence
328 // or if cohortEntry has no modifications
329 // we can apply modification to the state immediately
330 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
331 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
333 Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
334 DataTreeCandidatePayload.create(candidate));
338 private void handleCommitTransaction(final CommitTransaction commit) {
339 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
340 shardMBean.incrementFailedTransactionsCount();
344 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
345 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
348 cohortEntry.commit();
350 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
352 shardMBean.incrementCommittedTransactionCount();
353 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
355 } catch (Exception e) {
356 sender.tell(new akka.actor.Status.Failure(e), getSelf());
358 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
360 shardMBean.incrementFailedTransactionsCount();
362 commitCoordinator.currentTransactionComplete(transactionID, true);
366 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
367 // With persistence enabled, this method is called via applyState by the leader strategy
368 // after the commit has been replicated to a majority of the followers.
370 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
371 if (cohortEntry == null) {
372 // The transaction is no longer the current commit. This can happen if the transaction
373 // was aborted prior, most likely due to timeout in the front-end. We need to finish
374 // committing the transaction though since it was successfully persisted and replicated
375 // however we can't use the original cohort b/c it was already preCommitted and may
376 // conflict with the current commit or may have been aborted so we commit with a new
378 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
379 if(cohortEntry != null) {
381 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
382 } catch (DataValidationFailedException e) {
383 shardMBean.incrementFailedTransactionsCount();
384 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
387 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
389 // This really shouldn't happen - it likely means that persistence or replication
390 // took so long to complete such that the cohort entry was expired from the cache.
391 IllegalStateException ex = new IllegalStateException(
392 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
393 persistenceId(), transactionID));
394 LOG.error(ex.getMessage());
395 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
398 finishCommit(sender, transactionID, cohortEntry);
402 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
403 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
404 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
407 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
409 commitCoordinator.handleBatchedModifications(batched, sender, this);
410 } catch (Exception e) {
411 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
412 batched.getTransactionID(), e);
413 sender.tell(new akka.actor.Status.Failure(e), getSelf());
417 private void handleBatchedModifications(BatchedModifications batched) {
418 // This message is sent to prepare the modifications transaction directly on the Shard as an
419 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
420 // BatchedModifications message, the caller sets the ready flag in the message indicating
421 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
422 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
423 // ReadyTransaction message.
425 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
426 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
427 // the primary/leader shard. However with timing and caching on the front-end, there's a small
428 // window where it could have a stale leader during leadership transitions.
430 boolean isLeaderActive = isLeaderActive();
431 if (isLeader() && isLeaderActive) {
432 handleBatchedModificationsLocal(batched, getSender());
434 ActorSelection leader = getLeader();
435 if (!isLeaderActive || leader == null) {
436 messageRetrySupport.addMessageToRetry(batched, getSender(),
437 "Could not commit transaction " + batched.getTransactionID());
439 // TODO: what if this is not the first batch and leadership changed in between batched messages?
440 // We could check if the commitCoordinator already has a cached entry and forward all the previous
441 // batched modifications.
442 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
443 leader.forward(batched, getContext());
448 private boolean failIfIsolatedLeader(ActorRef sender) {
449 if(isIsolatedLeader()) {
450 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
451 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
452 " other follower nodes are down or this node is isolated by a network partition.",
453 persistenceId()))), getSelf());
460 protected boolean isIsolatedLeader() {
461 return getRaftState() == RaftState.IsolatedLeader;
464 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
465 LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
467 boolean isLeaderActive = isLeaderActive();
468 if (isLeader() && isLeaderActive) {
470 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
471 } catch (Exception e) {
472 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
473 message.getTransactionID(), e);
474 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
477 ActorSelection leader = getLeader();
478 if (!isLeaderActive || leader == null) {
479 messageRetrySupport.addMessageToRetry(message, getSender(),
480 "Could not commit transaction " + message.getTransactionID());
482 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
483 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
484 leader.forward(message, getContext());
489 private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
490 LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
492 boolean isLeaderActive = isLeaderActive();
493 if (isLeader() && isLeaderActive) {
494 commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
496 ActorSelection leader = getLeader();
497 if (!isLeaderActive || leader == null) {
498 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
499 "Could not commit transaction " + forwardedReady.getTransactionID());
501 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
503 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
504 forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
505 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
506 leader.forward(readyLocal, getContext());
511 private void handleAbortTransaction(final AbortTransaction abort) {
512 doAbortTransaction(abort.getTransactionID(), getSender());
515 void doAbortTransaction(final String transactionID, final ActorRef sender) {
516 commitCoordinator.handleAbort(transactionID, sender, this);
519 private void handleCreateTransaction(final Object message) {
521 createTransaction(CreateTransaction.fromSerializable(message));
522 } else if (getLeader() != null) {
523 getLeader().forward(message, getContext());
525 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
526 "Could not create a shard transaction", persistenceId())), getSelf());
530 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
531 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
534 private ActorRef createTypedTransactionActor(int transactionType,
535 ShardTransactionIdentifier transactionId, String transactionChainId,
536 short clientVersion ) {
538 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
539 transactionId, transactionChainId, clientVersion);
542 private void createTransaction(CreateTransaction createTransaction) {
544 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
545 failIfIsolatedLeader(getSender())) {
549 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
550 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
551 createTransaction.getVersion());
553 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
554 createTransaction.getTransactionId()).toSerializable(), getSelf());
555 } catch (Exception e) {
556 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
560 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
561 String transactionChainId, short clientVersion) {
564 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
566 if(LOG.isDebugEnabled()) {
567 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
570 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
571 transactionChainId, clientVersion);
573 return transactionActor;
576 private void commitWithNewTransaction(final Modification modification) {
577 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
578 modification.apply(tx.getSnapshot());
580 snapshotCohort.syncCommitTransaction(tx);
581 shardMBean.incrementCommittedTransactionCount();
582 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
583 } catch (Exception e) {
584 shardMBean.incrementFailedTransactionsCount();
585 LOG.error("{}: Failed to commit", persistenceId(), e);
589 private void updateSchemaContext(final UpdateSchemaContext message) {
590 updateSchemaContext(message.getSchemaContext());
594 void updateSchemaContext(final SchemaContext schemaContext) {
595 store.updateSchemaContext(schemaContext);
598 private boolean isMetricsCaptureEnabled() {
599 CommonConfig config = new CommonConfig(getContext().system().settings().config());
600 return config.isMetricCaptureEnabled();
605 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
606 return snapshotCohort;
611 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
612 return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
613 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
617 protected void onRecoveryComplete() {
618 restoreFromSnapshot = null;
620 //notify shard manager
621 getContext().parent().tell(new ActorInitialized(), getSelf());
623 // Being paranoid here - this method should only be called once but just in case...
624 if(txCommitTimeoutCheckSchedule == null) {
625 // Schedule a message to be periodically sent to check if the current in-progress
626 // transaction should be expired and aborted.
627 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
628 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
629 period, period, getSelf(),
630 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
635 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
636 if (data instanceof DataTreeCandidatePayload) {
637 if (clientActor == null) {
638 // No clientActor indicates a replica coming from the leader
640 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
641 } catch (DataValidationFailedException | IOException e) {
642 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
645 // Replication consensus reached, proceed to commit
646 finishCommit(clientActor, identifier);
648 } else if (data instanceof CompositeModificationPayload) {
649 Object modification = ((CompositeModificationPayload) data).getModification();
651 applyModificationToState(clientActor, identifier, modification);
652 } else if(data instanceof CompositeModificationByteStringPayload ){
653 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
655 applyModificationToState(clientActor, identifier, modification);
657 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
658 persistenceId(), data, data.getClass().getClassLoader(),
659 CompositeModificationPayload.class.getClassLoader());
663 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
664 if(modification == null) {
666 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
667 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
668 } else if(clientActor == null) {
669 // There's no clientActor to which to send a commit reply so we must be applying
670 // replicated state from the leader.
671 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
673 // This must be the OK to commit after replication consensus.
674 finishCommit(clientActor, identifier);
679 protected void onStateChanged() {
680 boolean isLeader = isLeader();
681 boolean hasLeader = hasLeader();
682 changeSupport.onLeadershipChange(isLeader, hasLeader);
683 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
685 // If this actor is no longer the leader close all the transaction chains
687 if(LOG.isDebugEnabled()) {
689 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
690 persistenceId(), getId());
693 store.closeAllTransactionChains();
695 commitCoordinator.abortPendingTransactions(
696 "The transacton was aborted due to inflight leadership change.", this);
699 if(hasLeader && !isIsolatedLeader()) {
700 messageRetrySupport.retryMessages();
705 protected void onLeaderChanged(String oldLeader, String newLeader) {
706 shardMBean.incrementLeadershipChangeCount();
708 if(hasLeader() && !isIsolatedLeader()) {
709 messageRetrySupport.retryMessages();
714 protected void pauseLeader(Runnable operation) {
715 LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
716 commitCoordinator.setRunOnPendingTransactionsComplete(operation);
720 public String persistenceId() {
725 ShardCommitCoordinator getCommitCoordinator() {
726 return commitCoordinator;
729 public DatastoreContext getDatastoreContext() {
730 return datastoreContext;
734 public ShardDataTree getDataStore() {
739 ShardStats getShardMBean() {
743 public static Builder builder() {
744 return new Builder();
747 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
748 private final Class<S> shardClass;
749 private ShardIdentifier id;
750 private Map<String, String> peerAddresses = Collections.emptyMap();
751 private DatastoreContext datastoreContext;
752 private SchemaContext schemaContext;
753 private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
754 private volatile boolean sealed;
756 protected AbstractBuilder(Class<S> shardClass) {
757 this.shardClass = shardClass;
760 protected void checkSealed() {
761 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
764 @SuppressWarnings("unchecked")
769 public T id(ShardIdentifier id) {
775 public T peerAddresses(Map<String, String> peerAddresses) {
777 this.peerAddresses = peerAddresses;
781 public T datastoreContext(DatastoreContext datastoreContext) {
783 this.datastoreContext = datastoreContext;
787 public T schemaContext(SchemaContext schemaContext) {
789 this.schemaContext = schemaContext;
793 public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
795 this.restoreFromSnapshot = restoreFromSnapshot;
799 public ShardIdentifier getId() {
803 public Map<String, String> getPeerAddresses() {
804 return peerAddresses;
807 public DatastoreContext getDatastoreContext() {
808 return datastoreContext;
811 public SchemaContext getSchemaContext() {
812 return schemaContext;
815 public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
816 return restoreFromSnapshot;
819 public TreeType getTreeType() {
820 switch (datastoreContext.getLogicalStoreType()) {
822 return TreeType.CONFIGURATION;
824 return TreeType.OPERATIONAL;
827 throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
830 protected void verify() {
831 Preconditions.checkNotNull(id, "id should not be null");
832 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
833 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
834 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
837 public Props props() {
840 return Props.create(shardClass, this);
844 public static class Builder extends AbstractBuilder<Builder, Shard> {