2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.persistence.RecoveryFailure;
16 import akka.serialization.Serialization;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.Optional;
19 import com.google.common.base.Preconditions;
20 import java.io.IOException;
21 import java.util.Collections;
23 import java.util.concurrent.TimeUnit;
24 import javax.annotation.Nonnull;
25 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
26 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
27 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
28 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
30 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
32 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
33 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
34 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
35 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
36 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
44 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
45 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
46 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
47 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
48 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
49 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
50 import org.opendaylight.controller.cluster.datastore.modification.Modification;
51 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
52 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
53 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
54 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
55 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
56 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
57 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
58 import org.opendaylight.controller.cluster.raft.RaftActor;
59 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
60 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
61 import org.opendaylight.controller.cluster.raft.RaftState;
62 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
63 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
64 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
65 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
66 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
67 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
70 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
71 import scala.concurrent.duration.Duration;
72 import scala.concurrent.duration.FiniteDuration;
75 * A Shard represents a portion of the logical data tree <br/>
77 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
80 public class Shard extends RaftActor {
82 protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
85 static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
88 static final String DEFAULT_NAME = "default";
90 // The state of this Shard
91 private final ShardDataTree store;
93 /// The name of this shard
94 private final String name;
96 private final ShardStats shardMBean;
98 private DatastoreContext datastoreContext;
100 private final ShardCommitCoordinator commitCoordinator;
102 private long transactionCommitTimeout;
104 private Cancellable txCommitTimeoutCheckSchedule;
106 private final Optional<ActorRef> roleChangeNotifier;
108 private final MessageTracker appendEntriesReplyTracker;
110 private final ShardTransactionActorFactory transactionActorFactory;
112 private final ShardSnapshotCohort snapshotCohort;
114 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
115 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
117 protected Shard(AbstractBuilder<?, ?> builder) {
118 super(builder.getId().toString(), builder.getPeerAddresses(),
119 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
121 this.name = builder.getId().toString();
122 this.datastoreContext = builder.getDatastoreContext();
124 setPersistence(datastoreContext.isPersistent());
126 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
128 store = new ShardDataTree(builder.getSchemaContext());
130 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
131 datastoreContext.getDataStoreMXBeanType());
132 shardMBean.setShard(this);
134 if (isMetricsCaptureEnabled()) {
135 getContext().become(new MeteringBehavior(this));
138 commitCoordinator = new ShardCommitCoordinator(store,
139 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
140 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
142 setTransactionCommitTimeout();
144 // create a notifier actor for each cluster member
145 roleChangeNotifier = createRoleChangeNotifier(name.toString());
147 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
148 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
150 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
151 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
152 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
154 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
159 private void setTransactionCommitTimeout() {
160 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
161 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
164 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
165 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
166 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
167 return Optional.of(shardRoleChangeNotifier);
171 public void postStop() {
172 LOG.info("Stopping Shard {}", persistenceId());
176 if(txCommitTimeoutCheckSchedule != null) {
177 txCommitTimeoutCheckSchedule.cancel();
180 shardMBean.unregisterMBean();
184 public void onReceiveRecover(final Object message) throws Exception {
185 if(LOG.isDebugEnabled()) {
186 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
187 message.getClass().toString(), getSender());
190 if (message instanceof RecoveryFailure){
191 LOG.error("{}: Recovery failed because of this cause",
192 persistenceId(), ((RecoveryFailure) message).cause());
194 // Even though recovery failed, we still need to finish our recovery, eg send the
195 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
196 onRecoveryComplete();
198 super.onReceiveRecover(message);
199 if(LOG.isTraceEnabled()) {
200 appendEntriesReplyTracker.begin();
206 public void onReceiveCommand(final Object message) throws Exception {
208 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
210 if(context.error().isPresent()){
211 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
216 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
217 handleCreateTransaction(message);
218 } else if (BatchedModifications.class.isInstance(message)) {
219 handleBatchedModifications((BatchedModifications)message);
220 } else if (message instanceof ForwardedReadyTransaction) {
221 commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
223 } else if (message instanceof ReadyLocalTransaction) {
224 handleReadyLocalTransaction((ReadyLocalTransaction)message);
225 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
226 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
227 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
228 handleCommitTransaction(CommitTransaction.fromSerializable(message));
229 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
230 handleAbortTransaction(AbortTransaction.fromSerializable(message));
231 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
232 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
233 } else if (message instanceof RegisterChangeListener) {
234 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
235 } else if (message instanceof RegisterDataTreeChangeListener) {
236 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
237 } else if (message instanceof UpdateSchemaContext) {
238 updateSchemaContext((UpdateSchemaContext) message);
239 } else if (message instanceof PeerAddressResolved) {
240 PeerAddressResolved resolved = (PeerAddressResolved) message;
241 setPeerAddress(resolved.getPeerId().toString(),
242 resolved.getPeerAddress());
243 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
244 handleTransactionCommitTimeoutCheck();
245 } else if(message instanceof DatastoreContext) {
246 onDatastoreContext((DatastoreContext)message);
247 } else if(message instanceof RegisterRoleChangeListener){
248 roleChangeNotifier.get().forward(message, context());
249 } else if (message instanceof FollowerInitialSyncUpStatus) {
250 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
251 context().parent().tell(message, self());
252 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
253 sender().tell(getShardMBean(), self());
254 } else if(message instanceof GetShardDataTree){
255 sender().tell(store.getDataTree(), self());
257 super.onReceiveCommand(message);
264 private boolean hasLeader() {
265 return getLeaderId() != null;
268 public int getPendingTxCommitQueueSize() {
269 return commitCoordinator.getQueueSize();
273 protected Optional<ActorRef> getRoleChangeNotifier() {
274 return roleChangeNotifier;
278 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
279 return new ShardLeaderStateChanged(memberId, leaderId,
280 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
281 leaderPayloadVersion);
284 protected void onDatastoreContext(DatastoreContext context) {
285 datastoreContext = context;
287 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
289 setTransactionCommitTimeout();
291 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
292 setPersistence(true);
293 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
294 setPersistence(false);
297 updateConfigParams(datastoreContext.getShardRaftConfig());
300 private void handleTransactionCommitTimeoutCheck() {
301 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
302 if(cohortEntry != null) {
303 if(cohortEntry.isExpired(transactionCommitTimeout)) {
304 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
305 persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
307 doAbortTransaction(cohortEntry.getTransactionID(), null);
311 commitCoordinator.cleanupExpiredCohortEntries();
314 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
315 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
318 void continueCommit(final CohortEntry cohortEntry) {
319 final DataTreeCandidate candidate = cohortEntry.getCandidate();
321 // If we do not have any followers and we are not using persistence
322 // or if cohortEntry has no modifications
323 // we can apply modification to the state immediately
324 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
325 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
327 Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
328 DataTreeCandidatePayload.create(candidate));
332 private void handleCommitTransaction(final CommitTransaction commit) {
333 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
334 shardMBean.incrementFailedTransactionsCount();
338 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
339 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
342 cohortEntry.commit();
344 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
346 shardMBean.incrementCommittedTransactionCount();
347 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
349 } catch (Exception e) {
350 sender.tell(new akka.actor.Status.Failure(e), getSelf());
352 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
354 shardMBean.incrementFailedTransactionsCount();
356 commitCoordinator.currentTransactionComplete(transactionID, true);
360 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
361 // With persistence enabled, this method is called via applyState by the leader strategy
362 // after the commit has been replicated to a majority of the followers.
364 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
365 if (cohortEntry == null) {
366 // The transaction is no longer the current commit. This can happen if the transaction
367 // was aborted prior, most likely due to timeout in the front-end. We need to finish
368 // committing the transaction though since it was successfully persisted and replicated
369 // however we can't use the original cohort b/c it was already preCommitted and may
370 // conflict with the current commit or may have been aborted so we commit with a new
372 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
373 if(cohortEntry != null) {
375 store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
376 } catch (DataValidationFailedException e) {
377 shardMBean.incrementFailedTransactionsCount();
378 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
381 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
383 // This really shouldn't happen - it likely means that persistence or replication
384 // took so long to complete such that the cohort entry was expired from the cache.
385 IllegalStateException ex = new IllegalStateException(
386 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
387 persistenceId(), transactionID));
388 LOG.error(ex.getMessage());
389 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
392 finishCommit(sender, transactionID, cohortEntry);
396 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
397 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
398 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
401 private void noLeaderError(String errMessage, Object message) {
402 // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
403 // it more resilient in case we're in the process of electing a new leader.
404 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(errMessage, persistenceId())), getSelf());
407 protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
409 commitCoordinator.handleBatchedModifications(batched, sender, this);
410 } catch (Exception e) {
411 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
412 batched.getTransactionID(), e);
413 sender.tell(new akka.actor.Status.Failure(e), getSelf());
417 private void handleBatchedModifications(BatchedModifications batched) {
418 // This message is sent to prepare the modifications transaction directly on the Shard as an
419 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
420 // BatchedModifications message, the caller sets the ready flag in the message indicating
421 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
422 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
423 // ReadyTransaction message.
425 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
426 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
427 // the primary/leader shard. However with timing and caching on the front-end, there's a small
428 // window where it could have a stale leader during leadership transitions.
431 failIfIsolatedLeader(getSender());
433 handleBatchedModificationsLocal(batched, getSender());
435 ActorSelection leader = getLeader();
437 // TODO: what if this is not the first batch and leadership changed in between batched messages?
438 // We could check if the commitCoordinator already has a cached entry and forward all the previous
439 // batched modifications.
440 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
441 leader.forward(batched, getContext());
443 noLeaderError("Could not commit transaction " + batched.getTransactionID(), batched);
448 private boolean failIfIsolatedLeader(ActorRef sender) {
449 if(isIsolatedLeader()) {
450 sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
451 "Shard %s was the leader but has lost contact with all of its followers. Either all" +
452 " other follower nodes are down or this node is isolated by a network partition.",
453 persistenceId()))), getSelf());
460 protected boolean isIsolatedLeader() {
461 return getRaftState() == RaftState.IsolatedLeader;
464 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
466 failIfIsolatedLeader(getSender());
469 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
470 } catch (Exception e) {
471 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
472 message.getTransactionID(), e);
473 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
476 ActorSelection leader = getLeader();
477 if (leader != null) {
478 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
479 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
480 leader.forward(message, getContext());
482 noLeaderError("Could not commit transaction " + message.getTransactionID(), message);
487 private void handleAbortTransaction(final AbortTransaction abort) {
488 doAbortTransaction(abort.getTransactionID(), getSender());
491 void doAbortTransaction(final String transactionID, final ActorRef sender) {
492 commitCoordinator.handleAbort(transactionID, sender, this);
495 private void handleCreateTransaction(final Object message) {
497 createTransaction(CreateTransaction.fromSerializable(message));
498 } else if (getLeader() != null) {
499 getLeader().forward(message, getContext());
501 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
502 "Could not create a shard transaction", persistenceId())), getSelf());
506 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
507 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
510 private ActorRef createTypedTransactionActor(int transactionType,
511 ShardTransactionIdentifier transactionId, String transactionChainId,
512 short clientVersion ) {
514 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
515 transactionId, transactionChainId, clientVersion);
518 private void createTransaction(CreateTransaction createTransaction) {
520 if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
521 failIfIsolatedLeader(getSender())) {
525 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
526 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
527 createTransaction.getVersion());
529 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
530 createTransaction.getTransactionId()).toSerializable(), getSelf());
531 } catch (Exception e) {
532 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
536 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
537 String transactionChainId, short clientVersion) {
540 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
542 if(LOG.isDebugEnabled()) {
543 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
546 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
547 transactionChainId, clientVersion);
549 return transactionActor;
552 private void commitWithNewTransaction(final Modification modification) {
553 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
554 modification.apply(tx.getSnapshot());
556 snapshotCohort.syncCommitTransaction(tx);
557 shardMBean.incrementCommittedTransactionCount();
558 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
559 } catch (Exception e) {
560 shardMBean.incrementFailedTransactionsCount();
561 LOG.error("{}: Failed to commit", persistenceId(), e);
565 private void updateSchemaContext(final UpdateSchemaContext message) {
566 updateSchemaContext(message.getSchemaContext());
570 void updateSchemaContext(final SchemaContext schemaContext) {
571 store.updateSchemaContext(schemaContext);
574 private boolean isMetricsCaptureEnabled() {
575 CommonConfig config = new CommonConfig(getContext().system().settings().config());
576 return config.isMetricCaptureEnabled();
581 public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
582 return snapshotCohort;
587 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
588 return new ShardRecoveryCoordinator(store, store.getSchemaContext(), persistenceId(), LOG);
592 protected void onRecoveryComplete() {
593 //notify shard manager
594 getContext().parent().tell(new ActorInitialized(), getSelf());
596 // Being paranoid here - this method should only be called once but just in case...
597 if(txCommitTimeoutCheckSchedule == null) {
598 // Schedule a message to be periodically sent to check if the current in-progress
599 // transaction should be expired and aborted.
600 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
601 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
602 period, period, getSelf(),
603 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
608 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
609 if (data instanceof DataTreeCandidatePayload) {
610 if (clientActor == null) {
611 // No clientActor indicates a replica coming from the leader
613 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
614 } catch (DataValidationFailedException | IOException e) {
615 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
618 // Replication consensus reached, proceed to commit
619 finishCommit(clientActor, identifier);
621 } else if (data instanceof ModificationPayload) {
623 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
624 } catch (ClassNotFoundException | IOException e) {
625 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
627 } else if (data instanceof CompositeModificationPayload) {
628 Object modification = ((CompositeModificationPayload) data).getModification();
630 applyModificationToState(clientActor, identifier, modification);
631 } else if(data instanceof CompositeModificationByteStringPayload ){
632 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
634 applyModificationToState(clientActor, identifier, modification);
636 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
637 persistenceId(), data, data.getClass().getClassLoader(),
638 CompositeModificationPayload.class.getClassLoader());
642 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
643 if(modification == null) {
645 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
646 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
647 } else if(clientActor == null) {
648 // There's no clientActor to which to send a commit reply so we must be applying
649 // replicated state from the leader.
650 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
652 // This must be the OK to commit after replication consensus.
653 finishCommit(clientActor, identifier);
658 protected void onStateChanged() {
659 boolean isLeader = isLeader();
660 boolean hasLeader = hasLeader();
661 changeSupport.onLeadershipChange(isLeader, hasLeader);
662 treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
664 // If this actor is no longer the leader close all the transaction chains
666 if(LOG.isDebugEnabled()) {
668 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
669 persistenceId(), getId());
672 store.closeAllTransactionChains();
677 protected void onLeaderChanged(String oldLeader, String newLeader) {
678 shardMBean.incrementLeadershipChangeCount();
682 public String persistenceId() {
687 ShardCommitCoordinator getCommitCoordinator() {
688 return commitCoordinator;
691 public DatastoreContext getDatastoreContext() {
692 return datastoreContext;
696 public ShardDataTree getDataStore() {
701 ShardStats getShardMBean() {
705 public static Builder builder() {
706 return new Builder();
709 public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
710 private final Class<S> shardClass;
711 private ShardIdentifier id;
712 private Map<String, String> peerAddresses = Collections.emptyMap();
713 private DatastoreContext datastoreContext;
714 private SchemaContext schemaContext;
715 private volatile boolean sealed;
717 protected AbstractBuilder(Class<S> shardClass) {
718 this.shardClass = shardClass;
721 protected void checkSealed() {
722 Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
725 @SuppressWarnings("unchecked")
730 public T id(ShardIdentifier id) {
736 public T peerAddresses(Map<String, String> peerAddresses) {
738 this.peerAddresses = peerAddresses;
742 public T datastoreContext(DatastoreContext datastoreContext) {
744 this.datastoreContext = datastoreContext;
748 public T schemaContext(SchemaContext schemaContext) {
750 this.schemaContext = schemaContext;
754 public ShardIdentifier getId() {
758 public Map<String, String> getPeerAddresses() {
759 return peerAddresses;
762 public DatastoreContext getDatastoreContext() {
763 return datastoreContext;
766 public SchemaContext getSchemaContext() {
767 return schemaContext;
770 protected void verify() {
771 Preconditions.checkNotNull(id, "id should not be null");
772 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
773 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
774 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
777 public Props props() {
780 return Props.create(shardClass, this);
784 public static class Builder extends AbstractBuilder<Builder, Shard> {