2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.japi.Creator;
16 import akka.persistence.RecoveryFailure;
17 import akka.serialization.Serialization;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Optional;
20 import com.google.common.base.Preconditions;
21 import com.google.common.util.concurrent.FutureCallback;
22 import com.google.common.util.concurrent.Futures;
23 import com.google.common.util.concurrent.ListenableFuture;
24 import java.io.IOException;
25 import java.util.HashMap;
27 import java.util.concurrent.TimeUnit;
28 import javax.annotation.Nonnull;
29 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
30 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
31 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
32 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
33 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
34 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
35 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
36 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
37 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
38 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
39 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
40 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
41 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
42 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
43 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
45 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
46 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
47 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
53 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
54 import org.opendaylight.controller.cluster.datastore.modification.Modification;
55 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
56 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
57 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
58 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
59 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
60 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
61 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
62 import org.opendaylight.controller.cluster.raft.RaftActor;
63 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
64 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
65 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
66 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
78 * A Shard represents a portion of the logical data tree <br/>
80 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
83 public class Shard extends RaftActor {
85 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
88 static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
91 static final String DEFAULT_NAME = "default";
93 // The state of this Shard
94 private final ShardDataTree store;
96 /// The name of this shard
97 private final String name;
99 private final ShardStats shardMBean;
101 private DatastoreContext datastoreContext;
103 private final ShardCommitCoordinator commitCoordinator;
105 private long transactionCommitTimeout;
107 private Cancellable txCommitTimeoutCheckSchedule;
109 private final Optional<ActorRef> roleChangeNotifier;
111 private final MessageTracker appendEntriesReplyTracker;
113 private final ShardTransactionActorFactory transactionActorFactory;
115 private final ShardSnapshotCohort snapshotCohort;
117 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
118 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
120 protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
121 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
122 super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()),
123 DataStoreVersions.CURRENT_VERSION);
125 this.name = name.toString();
126 this.datastoreContext = datastoreContext;
128 setPersistence(datastoreContext.isPersistent());
130 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
132 store = new ShardDataTree(schemaContext);
134 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
135 datastoreContext.getDataStoreMXBeanType());
136 shardMBean.setShardActor(getSelf());
138 if (isMetricsCaptureEnabled()) {
139 getContext().become(new MeteringBehavior(this));
142 commitCoordinator = new ShardCommitCoordinator(store,
143 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
144 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
146 setTransactionCommitTimeout();
148 // create a notifier actor for each cluster member
149 roleChangeNotifier = createRoleChangeNotifier(name.toString());
151 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
152 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
154 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
155 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
156 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
158 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
161 private void setTransactionCommitTimeout() {
162 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
163 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
166 public static Props props(final ShardIdentifier name,
167 final Map<String, String> peerAddresses,
168 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
169 Preconditions.checkNotNull(name, "name should not be null");
170 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
171 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
172 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
174 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
177 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
178 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
179 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
180 return Optional.of(shardRoleChangeNotifier);
184 public void postStop() {
185 LOG.info("Stopping Shard {}", persistenceId());
189 if(txCommitTimeoutCheckSchedule != null) {
190 txCommitTimeoutCheckSchedule.cancel();
193 shardMBean.unregisterMBean();
197 public void onReceiveRecover(final Object message) throws Exception {
198 if(LOG.isDebugEnabled()) {
199 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
200 message.getClass().toString(), getSender());
203 if (message instanceof RecoveryFailure){
204 LOG.error("{}: Recovery failed because of this cause",
205 persistenceId(), ((RecoveryFailure) message).cause());
207 // Even though recovery failed, we still need to finish our recovery, eg send the
208 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
209 onRecoveryComplete();
211 super.onReceiveRecover(message);
212 if(LOG.isTraceEnabled()) {
213 appendEntriesReplyTracker.begin();
219 public void onReceiveCommand(final Object message) throws Exception {
221 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
223 if(context.error().isPresent()){
224 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
229 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
230 handleCreateTransaction(message);
231 } else if (BatchedModifications.class.isInstance(message)) {
232 handleBatchedModifications((BatchedModifications)message);
233 } else if (message instanceof ForwardedReadyTransaction) {
234 commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
236 } else if (message instanceof ReadyLocalTransaction) {
237 handleReadyLocalTransaction((ReadyLocalTransaction)message);
238 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
239 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
240 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
241 handleCommitTransaction(CommitTransaction.fromSerializable(message));
242 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
243 handleAbortTransaction(AbortTransaction.fromSerializable(message));
244 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
245 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
246 } else if (message instanceof RegisterChangeListener) {
247 changeSupport.onMessage((RegisterChangeListener) message, isLeader());
248 } else if (message instanceof RegisterDataTreeChangeListener) {
249 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
250 } else if (message instanceof UpdateSchemaContext) {
251 updateSchemaContext((UpdateSchemaContext) message);
252 } else if (message instanceof PeerAddressResolved) {
253 PeerAddressResolved resolved = (PeerAddressResolved) message;
254 setPeerAddress(resolved.getPeerId().toString(),
255 resolved.getPeerAddress());
256 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
257 handleTransactionCommitTimeoutCheck();
258 } else if(message instanceof DatastoreContext) {
259 onDatastoreContext((DatastoreContext)message);
260 } else if(message instanceof RegisterRoleChangeListener){
261 roleChangeNotifier.get().forward(message, context());
262 } else if (message instanceof FollowerInitialSyncUpStatus) {
263 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
264 context().parent().tell(message, self());
265 } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
266 sender().tell(getShardMBean(), self());
268 super.onReceiveCommand(message);
276 protected Optional<ActorRef> getRoleChangeNotifier() {
277 return roleChangeNotifier;
281 protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
282 return new ShardLeaderStateChanged(memberId, leaderId,
283 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
284 leaderPayloadVersion);
287 private void onDatastoreContext(DatastoreContext context) {
288 datastoreContext = context;
290 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
292 setTransactionCommitTimeout();
294 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
295 setPersistence(true);
296 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
297 setPersistence(false);
300 updateConfigParams(datastoreContext.getShardRaftConfig());
303 private void handleTransactionCommitTimeoutCheck() {
304 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
305 if(cohortEntry != null) {
306 if(cohortEntry.isExpired(transactionCommitTimeout)) {
307 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
308 persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
310 doAbortTransaction(cohortEntry.getTransactionID(), null);
314 commitCoordinator.cleanupExpiredCohortEntries();
317 private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
318 return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
321 void continueCommit(final CohortEntry cohortEntry) throws Exception {
322 final DataTreeCandidate candidate = cohortEntry.getCohort().getCandidate();
324 // If we do not have any followers and we are not using persistence
325 // or if cohortEntry has no modifications
326 // we can apply modification to the state immediately
327 if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
328 applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
330 Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
331 DataTreeCandidatePayload.create(candidate));
335 private void handleCommitTransaction(final CommitTransaction commit) {
336 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
337 shardMBean.incrementFailedTransactionsCount();
341 private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
342 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
345 // We block on the future here so we don't have to worry about possibly accessing our
346 // state on a different thread outside of our dispatcher. Also, the data store
347 // currently uses a same thread executor anyway.
348 cohortEntry.getCohort().commit().get();
350 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
352 shardMBean.incrementCommittedTransactionCount();
353 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
355 } catch (Exception e) {
356 sender.tell(new akka.actor.Status.Failure(e), getSelf());
358 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
360 shardMBean.incrementFailedTransactionsCount();
362 commitCoordinator.currentTransactionComplete(transactionID, true);
366 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
367 // With persistence enabled, this method is called via applyState by the leader strategy
368 // after the commit has been replicated to a majority of the followers.
370 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
371 if (cohortEntry == null) {
372 // The transaction is no longer the current commit. This can happen if the transaction
373 // was aborted prior, most likely due to timeout in the front-end. We need to finish
374 // committing the transaction though since it was successfully persisted and replicated
375 // however we can't use the original cohort b/c it was already preCommitted and may
376 // conflict with the current commit or may have been aborted so we commit with a new
378 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
379 if(cohortEntry != null) {
381 store.applyForeignCandidate(transactionID, cohortEntry.getCohort().getCandidate());
382 } catch (DataValidationFailedException e) {
383 shardMBean.incrementFailedTransactionsCount();
384 LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
387 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
389 // This really shouldn't happen - it likely means that persistence or replication
390 // took so long to complete such that the cohort entry was expired from the cache.
391 IllegalStateException ex = new IllegalStateException(
392 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
393 persistenceId(), transactionID));
394 LOG.error(ex.getMessage());
395 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
398 finishCommit(sender, transactionID, cohortEntry);
402 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
403 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
404 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
407 private void noLeaderError(Object message) {
408 // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
409 // it more resilient in case we're in the process of electing a new leader.
410 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
411 "Could not find the leader for shard %s. This typically happens" +
412 " when the system is coming up or recovering and a leader is being elected. Try again" +
413 " later.", persistenceId()))), getSelf());
416 private void handleBatchedModifications(BatchedModifications batched) {
417 // This message is sent to prepare the modifications transaction directly on the Shard as an
418 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
419 // BatchedModifications message, the caller sets the ready flag in the message indicating
420 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
421 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
422 // ReadyTransaction message.
424 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
425 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
426 // the primary/leader shard. However with timing and caching on the front-end, there's a small
427 // window where it could have a stale leader during leadership transitions.
431 commitCoordinator.handleBatchedModifications(batched, getSender(), this);
432 } catch (Exception e) {
433 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
434 batched.getTransactionID(), e);
435 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
438 ActorSelection leader = getLeader();
440 // TODO: what if this is not the first batch and leadership changed in between batched messages?
441 // We could check if the commitCoordinator already has a cached entry and forward all the previous
442 // batched modifications.
443 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
444 leader.forward(batched, getContext());
446 noLeaderError(batched);
451 private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
454 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
455 } catch (Exception e) {
456 LOG.error("{}: Error handling LocalModifications for Tx {}", persistenceId(),
457 message.getTransactionID(), e);
458 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
461 ActorSelection leader = getLeader();
462 if (leader != null) {
463 LOG.debug("{}: Forwarding LocalModifications to leader {}", persistenceId(), leader);
464 leader.forward(message, getContext());
466 noLeaderError(message);
471 private void handleAbortTransaction(final AbortTransaction abort) {
472 doAbortTransaction(abort.getTransactionID(), getSender());
475 void doAbortTransaction(final String transactionID, final ActorRef sender) {
476 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
477 if(cohortEntry != null) {
478 LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
480 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
481 // aborted during replication in which case we may still commit locally if replication
483 commitCoordinator.currentTransactionComplete(transactionID, false);
485 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
486 final ActorRef self = getSelf();
488 Futures.addCallback(future, new FutureCallback<Void>() {
490 public void onSuccess(final Void v) {
491 shardMBean.incrementAbortTransactionsCount();
494 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
499 public void onFailure(final Throwable t) {
500 LOG.error("{}: An exception happened during abort", persistenceId(), t);
503 sender.tell(new akka.actor.Status.Failure(t), self);
510 private void handleCreateTransaction(final Object message) {
512 createTransaction(CreateTransaction.fromSerializable(message));
513 } else if (getLeader() != null) {
514 getLeader().forward(message, getContext());
516 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
517 "Could not find leader for shard %s so transaction cannot be created. This typically happens" +
518 " when the system is coming up or recovering and a leader is being elected. Try again" +
519 " later.", persistenceId()))), getSelf());
523 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
524 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
527 private ActorRef createTypedTransactionActor(int transactionType,
528 ShardTransactionIdentifier transactionId, String transactionChainId,
529 short clientVersion ) {
531 return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
532 transactionId, transactionChainId, clientVersion);
535 private void createTransaction(CreateTransaction createTransaction) {
537 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
538 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
539 createTransaction.getVersion());
541 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
542 createTransaction.getTransactionId()).toSerializable(), getSelf());
543 } catch (Exception e) {
544 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
548 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
549 String transactionChainId, short clientVersion) {
552 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
554 if(LOG.isDebugEnabled()) {
555 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
558 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
559 transactionChainId, clientVersion);
561 return transactionActor;
564 private void commitWithNewTransaction(final Modification modification) {
565 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
566 modification.apply(tx.getSnapshot());
568 snapshotCohort.syncCommitTransaction(tx);
569 shardMBean.incrementCommittedTransactionCount();
570 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
571 } catch (Exception e) {
572 shardMBean.incrementFailedTransactionsCount();
573 LOG.error("{}: Failed to commit", persistenceId(), e);
577 private void updateSchemaContext(final UpdateSchemaContext message) {
578 updateSchemaContext(message.getSchemaContext());
582 void updateSchemaContext(final SchemaContext schemaContext) {
583 store.updateSchemaContext(schemaContext);
586 private boolean isMetricsCaptureEnabled() {
587 CommonConfig config = new CommonConfig(getContext().system().settings().config());
588 return config.isMetricCaptureEnabled();
592 protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
593 return snapshotCohort;
598 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
599 return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
603 protected void onRecoveryComplete() {
604 store.recoveryDone();
605 //notify shard manager
606 getContext().parent().tell(new ActorInitialized(), getSelf());
608 // Being paranoid here - this method should only be called once but just in case...
609 if(txCommitTimeoutCheckSchedule == null) {
610 // Schedule a message to be periodically sent to check if the current in-progress
611 // transaction should be expired and aborted.
612 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
613 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
614 period, period, getSelf(),
615 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
620 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
621 if (data instanceof DataTreeCandidatePayload) {
622 if (clientActor == null) {
623 // No clientActor indicates a replica coming from the leader
625 store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
626 } catch (DataValidationFailedException | IOException e) {
627 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
630 // Replication consensus reached, proceed to commit
631 finishCommit(clientActor, identifier);
633 } else if (data instanceof ModificationPayload) {
635 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
636 } catch (ClassNotFoundException | IOException e) {
637 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
639 } else if (data instanceof CompositeModificationPayload) {
640 Object modification = ((CompositeModificationPayload) data).getModification();
642 applyModificationToState(clientActor, identifier, modification);
643 } else if(data instanceof CompositeModificationByteStringPayload ){
644 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
646 applyModificationToState(clientActor, identifier, modification);
648 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
649 persistenceId(), data, data.getClass().getClassLoader(),
650 CompositeModificationPayload.class.getClassLoader());
654 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
655 if(modification == null) {
657 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
658 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
659 } else if(clientActor == null) {
660 // There's no clientActor to which to send a commit reply so we must be applying
661 // replicated state from the leader.
662 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
664 // This must be the OK to commit after replication consensus.
665 finishCommit(clientActor, identifier);
670 protected void onStateChanged() {
671 boolean isLeader = isLeader();
672 changeSupport.onLeadershipChange(isLeader);
673 treeChangeSupport.onLeadershipChange(isLeader);
675 // If this actor is no longer the leader close all the transaction chains
677 if(LOG.isDebugEnabled()) {
679 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
680 persistenceId(), getId());
683 store.closeAllTransactionChains();
688 protected void onLeaderChanged(String oldLeader, String newLeader) {
689 shardMBean.incrementLeadershipChangeCount();
693 public String persistenceId() {
698 ShardCommitCoordinator getCommitCoordinator() {
699 return commitCoordinator;
703 private static class ShardCreator implements Creator<Shard> {
705 private static final long serialVersionUID = 1L;
707 final ShardIdentifier name;
708 final Map<String, String> peerAddresses;
709 final DatastoreContext datastoreContext;
710 final SchemaContext schemaContext;
712 ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
713 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
715 this.peerAddresses = peerAddresses;
716 this.datastoreContext = datastoreContext;
717 this.schemaContext = schemaContext;
721 public Shard create() throws Exception {
722 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
727 public ShardDataTree getDataStore() {
732 ShardStats getShardMBean() {