2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.japi.Creator;
16 import akka.persistence.RecoveryFailure;
17 import akka.serialization.Serialization;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Optional;
20 import com.google.common.base.Preconditions;
21 import com.google.common.util.concurrent.FutureCallback;
22 import com.google.common.util.concurrent.Futures;
23 import com.google.common.util.concurrent.ListenableFuture;
24 import java.io.IOException;
25 import java.util.HashMap;
27 import java.util.concurrent.ExecutionException;
28 import java.util.concurrent.TimeUnit;
29 import javax.annotation.Nonnull;
30 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
31 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
32 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
33 import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
34 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
35 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
36 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
37 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
38 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
39 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
41 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
42 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
43 import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
44 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
46 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
48 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
49 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
50 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
51 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
52 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
53 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
54 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
55 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
56 import org.opendaylight.controller.cluster.datastore.modification.Modification;
57 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
58 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
59 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
60 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
61 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
62 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
63 import org.opendaylight.controller.cluster.raft.RaftActor;
64 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
65 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
66 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
67 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
69 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
70 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
71 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
72 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
78 * A Shard represents a portion of the logical data tree <br/>
80 * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
83 public class Shard extends RaftActor {
85 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
88 static final String DEFAULT_NAME = "default";
90 // The state of this Shard
91 private final InMemoryDOMDataStore store;
93 /// The name of this shard
94 private final String name;
96 private final ShardStats shardMBean;
98 private DatastoreContext datastoreContext;
100 private final ShardCommitCoordinator commitCoordinator;
102 private long transactionCommitTimeout;
104 private Cancellable txCommitTimeoutCheckSchedule;
106 private final Optional<ActorRef> roleChangeNotifier;
108 private final MessageTracker appendEntriesReplyTracker;
110 private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
111 Serialization.serializedActorPath(getSelf()));
113 private final DOMTransactionFactory domTransactionFactory;
115 private final ShardTransactionActorFactory transactionActorFactory;
117 private final ShardSnapshotCohort snapshotCohort;
119 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
120 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
122 protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
123 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
124 super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
126 this.name = name.toString();
127 this.datastoreContext = datastoreContext;
129 setPersistence(datastoreContext.isPersistent());
131 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
133 store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
134 datastoreContext.getDataStoreProperties());
136 if (schemaContext != null) {
137 store.onGlobalContextUpdated(schemaContext);
140 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
141 datastoreContext.getDataStoreMXBeanType());
142 shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
143 shardMBean.setShardActor(getSelf());
145 if (isMetricsCaptureEnabled()) {
146 getContext().become(new MeteringBehavior(this));
149 domTransactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
151 commitCoordinator = new ShardCommitCoordinator(domTransactionFactory,
152 TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
153 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
155 setTransactionCommitTimeout();
157 // create a notifier actor for each cluster member
158 roleChangeNotifier = createRoleChangeNotifier(name.toString());
160 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
161 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
163 transactionActorFactory = new ShardTransactionActorFactory(domTransactionFactory, datastoreContext,
164 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
165 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
167 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
170 private void setTransactionCommitTimeout() {
171 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
172 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
175 public static Props props(final ShardIdentifier name,
176 final Map<String, String> peerAddresses,
177 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
178 Preconditions.checkNotNull(name, "name should not be null");
179 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
180 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
181 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
183 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
186 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
187 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
188 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
189 return Optional.of(shardRoleChangeNotifier);
193 public void postStop() {
194 LOG.info("Stopping Shard {}", persistenceId());
198 if(txCommitTimeoutCheckSchedule != null) {
199 txCommitTimeoutCheckSchedule.cancel();
202 shardMBean.unregisterMBean();
206 public void onReceiveRecover(final Object message) throws Exception {
207 if(LOG.isDebugEnabled()) {
208 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
209 message.getClass().toString(), getSender());
212 if (message instanceof RecoveryFailure){
213 LOG.error("{}: Recovery failed because of this cause",
214 persistenceId(), ((RecoveryFailure) message).cause());
216 // Even though recovery failed, we still need to finish our recovery, eg send the
217 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
218 onRecoveryComplete();
220 super.onReceiveRecover(message);
221 if(LOG.isTraceEnabled()) {
222 appendEntriesReplyTracker.begin();
228 public void onReceiveCommand(final Object message) throws Exception {
230 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
232 if(context.error().isPresent()){
233 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
238 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
239 handleCreateTransaction(message);
240 } else if (BatchedModifications.class.isInstance(message)) {
241 handleBatchedModifications((BatchedModifications)message);
242 } else if (message instanceof ForwardedReadyTransaction) {
243 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
244 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
245 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
246 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
247 handleCommitTransaction(CommitTransaction.fromSerializable(message));
248 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
249 handleAbortTransaction(AbortTransaction.fromSerializable(message));
250 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
251 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
252 } else if (message instanceof RegisterChangeListener) {
253 changeSupport.onMessage((RegisterChangeListener) message, isLeader());
254 } else if (message instanceof RegisterDataTreeChangeListener) {
255 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
256 } else if (message instanceof UpdateSchemaContext) {
257 updateSchemaContext((UpdateSchemaContext) message);
258 } else if (message instanceof PeerAddressResolved) {
259 PeerAddressResolved resolved = (PeerAddressResolved) message;
260 setPeerAddress(resolved.getPeerId().toString(),
261 resolved.getPeerAddress());
262 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
263 handleTransactionCommitTimeoutCheck();
264 } else if(message instanceof DatastoreContext) {
265 onDatastoreContext((DatastoreContext)message);
266 } else if(message instanceof RegisterRoleChangeListener){
267 roleChangeNotifier.get().forward(message, context());
268 } else if (message instanceof FollowerInitialSyncUpStatus){
269 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
270 context().parent().tell(message, self());
272 super.onReceiveCommand(message);
280 protected Optional<ActorRef> getRoleChangeNotifier() {
281 return roleChangeNotifier;
284 private void onDatastoreContext(DatastoreContext context) {
285 datastoreContext = context;
287 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
289 setTransactionCommitTimeout();
291 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
292 setPersistence(true);
293 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
294 setPersistence(false);
297 updateConfigParams(datastoreContext.getShardRaftConfig());
300 private void handleTransactionCommitTimeoutCheck() {
301 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
302 if(cohortEntry != null) {
303 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
304 if(elapsed > transactionCommitTimeout) {
305 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
306 persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
308 doAbortTransaction(cohortEntry.getTransactionID(), null);
313 private void handleCommitTransaction(final CommitTransaction commit) {
314 final String transactionID = commit.getTransactionID();
316 LOG.debug("{}: Committing transaction {}", persistenceId(), transactionID);
318 // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
320 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
321 if(cohortEntry == null) {
322 // We're not the current Tx - the Tx was likely expired b/c it took too long in
323 // between the canCommit and commit messages.
324 IllegalStateException ex = new IllegalStateException(
325 String.format("%s: Cannot commit transaction %s - it is not the current transaction",
326 persistenceId(), transactionID));
327 LOG.error(ex.getMessage());
328 shardMBean.incrementFailedTransactionsCount();
329 getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
333 // We perform the preCommit phase here atomically with the commit phase. This is an
334 // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
335 // coordination of preCommit across shards in case of failure but preCommit should not
336 // normally fail since we ensure only one concurrent 3-phase commit.
339 // We block on the future here so we don't have to worry about possibly accessing our
340 // state on a different thread outside of our dispatcher. Also, the data store
341 // currently uses a same thread executor anyway.
342 cohortEntry.getCohort().preCommit().get();
344 // If we do not have any followers and we are not using persistence
345 // or if cohortEntry has no modifications
346 // we can apply modification to the state immediately
347 if((!hasFollowers() && !persistence().isRecoveryApplicable()) || (!cohortEntry.hasModifications())){
348 applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
350 Shard.this.persistData(getSender(), transactionID,
351 new ModificationPayload(cohortEntry.getModification()));
353 } catch (Exception e) {
354 LOG.error("{} An exception occurred while preCommitting transaction {}",
355 persistenceId(), cohortEntry.getTransactionID(), e);
356 shardMBean.incrementFailedTransactionsCount();
357 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
360 cohortEntry.updateLastAccessTime();
363 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
364 // With persistence enabled, this method is called via applyState by the leader strategy
365 // after the commit has been replicated to a majority of the followers.
367 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
368 if(cohortEntry == null) {
369 // The transaction is no longer the current commit. This can happen if the transaction
370 // was aborted prior, most likely due to timeout in the front-end. We need to finish
371 // committing the transaction though since it was successfully persisted and replicated
372 // however we can't use the original cohort b/c it was already preCommitted and may
373 // conflict with the current commit or may have been aborted so we commit with a new
375 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
376 if(cohortEntry != null) {
377 commitWithNewTransaction(cohortEntry.getModification());
378 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
380 // This really shouldn't happen - it likely means that persistence or replication
381 // took so long to complete such that the cohort entry was expired from the cache.
382 IllegalStateException ex = new IllegalStateException(
383 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
384 persistenceId(), transactionID));
385 LOG.error(ex.getMessage());
386 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
392 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
395 // We block on the future here so we don't have to worry about possibly accessing our
396 // state on a different thread outside of our dispatcher. Also, the data store
397 // currently uses a same thread executor anyway.
398 cohortEntry.getCohort().commit().get();
400 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
402 shardMBean.incrementCommittedTransactionCount();
403 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
405 } catch (Exception e) {
406 sender.tell(new akka.actor.Status.Failure(e), getSelf());
408 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
410 shardMBean.incrementFailedTransactionsCount();
412 commitCoordinator.currentTransactionComplete(transactionID, true);
416 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
417 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
418 commitCoordinator.handleCanCommit(canCommit, getSender(), self());
421 private void handleBatchedModifications(BatchedModifications batched) {
422 // This message is sent to prepare the modificationsa transaction directly on the Shard as an
423 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
424 // BatchedModifications message, the caller sets the ready flag in the message indicating
425 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
426 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
427 // ReadyTransaction message.
429 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
430 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
431 // the primary/leader shard. However with timing and caching on the front-end, there's a small
432 // window where it could have a stale leader during leadership transitions.
436 boolean ready = commitCoordinator.handleTransactionModifications(batched);
438 sender().tell(READY_TRANSACTION_REPLY, self());
440 sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
442 } catch (Exception e) {
443 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
444 batched.getTransactionID(), e);
445 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
448 ActorSelection leader = getLeader();
450 // TODO: what if this is not the first batch and leadership changed in between batched messages?
451 // We could check if the commitCoordinator already has a cached entry and forward all the previous
452 // batched modifications.
453 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
454 leader.forward(batched, getContext());
456 // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
457 // it more resilient in case we're in the process of electing a new leader.
458 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
459 "Could not find the leader for shard %s. This typically happens" +
460 " when the system is coming up or recovering and a leader is being elected. Try again" +
461 " later.", persistenceId()))), getSelf());
466 private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
467 LOG.debug("{}: Readying transaction {}, client version {}", persistenceId(),
468 ready.getTransactionID(), ready.getTxnClientVersion());
470 // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
471 // commitCoordinator in preparation for the subsequent three phase commit initiated by
473 commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
474 (MutableCompositeModification) ready.getModification());
476 // Return our actor path as we'll handle the three phase commit, except if the Tx client
477 // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
478 // node. In that case, the subsequent 3-phase commit messages won't contain the
479 // transactionId so to maintain backwards compatibility, we create a separate cohort actor
480 // to provide the compatible behavior.
481 if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
482 ActorRef replyActorPath = getSelf();
483 if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
484 LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
485 replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
486 ready.getTransactionID()));
489 ReadyTransactionReply readyTransactionReply =
490 new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
491 ready.getTxnClientVersion());
492 getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
493 readyTransactionReply, getSelf());
495 getSender().tell(READY_TRANSACTION_REPLY, getSelf());
499 private void handleAbortTransaction(final AbortTransaction abort) {
500 doAbortTransaction(abort.getTransactionID(), getSender());
503 void doAbortTransaction(final String transactionID, final ActorRef sender) {
504 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
505 if(cohortEntry != null) {
506 LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
508 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
509 // aborted during replication in which case we may still commit locally if replication
511 commitCoordinator.currentTransactionComplete(transactionID, false);
513 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
514 final ActorRef self = getSelf();
516 Futures.addCallback(future, new FutureCallback<Void>() {
518 public void onSuccess(final Void v) {
519 shardMBean.incrementAbortTransactionsCount();
522 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
527 public void onFailure(final Throwable t) {
528 LOG.error("{}: An exception happened during abort", persistenceId(), t);
531 sender.tell(new akka.actor.Status.Failure(t), self);
538 private void handleCreateTransaction(final Object message) {
540 createTransaction(CreateTransaction.fromSerializable(message));
541 } else if (getLeader() != null) {
542 getLeader().forward(message, getContext());
544 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
545 "Could not find leader for shard %s so transaction cannot be created. This typically happens" +
546 " when the system is coming up or recovering and a leader is being elected. Try again" +
547 " later.", persistenceId()))), getSelf());
551 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
552 domTransactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
555 private ActorRef createTypedTransactionActor(int transactionType,
556 ShardTransactionIdentifier transactionId, String transactionChainId,
557 short clientVersion ) {
559 return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
560 transactionId, transactionChainId, clientVersion);
563 private void createTransaction(CreateTransaction createTransaction) {
565 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
566 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
567 createTransaction.getVersion());
569 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
570 createTransaction.getTransactionId()).toSerializable(), getSelf());
571 } catch (Exception e) {
572 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
576 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
577 String transactionChainId, short clientVersion) {
580 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
582 if(LOG.isDebugEnabled()) {
583 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
586 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
587 transactionChainId, clientVersion);
589 return transactionActor;
592 private void commitWithNewTransaction(final Modification modification) {
593 DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
594 modification.apply(tx);
596 snapshotCohort.syncCommitTransaction(tx);
597 shardMBean.incrementCommittedTransactionCount();
598 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
599 } catch (InterruptedException | ExecutionException e) {
600 shardMBean.incrementFailedTransactionsCount();
601 LOG.error("{}: Failed to commit", persistenceId(), e);
605 private void updateSchemaContext(final UpdateSchemaContext message) {
606 updateSchemaContext(message.getSchemaContext());
610 void updateSchemaContext(final SchemaContext schemaContext) {
611 store.onGlobalContextUpdated(schemaContext);
614 private boolean isMetricsCaptureEnabled() {
615 CommonConfig config = new CommonConfig(getContext().system().settings().config());
616 return config.isMetricCaptureEnabled();
620 protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
621 return snapshotCohort;
626 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
627 return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
631 protected void onRecoveryComplete() {
632 //notify shard manager
633 getContext().parent().tell(new ActorInitialized(), getSelf());
635 // Being paranoid here - this method should only be called once but just in case...
636 if(txCommitTimeoutCheckSchedule == null) {
637 // Schedule a message to be periodically sent to check if the current in-progress
638 // transaction should be expired and aborted.
639 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
640 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
641 period, period, getSelf(),
642 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
647 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
649 if(data instanceof ModificationPayload) {
651 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
652 } catch (ClassNotFoundException | IOException e) {
653 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
656 else if (data instanceof CompositeModificationPayload) {
657 Object modification = ((CompositeModificationPayload) data).getModification();
659 applyModificationToState(clientActor, identifier, modification);
660 } else if(data instanceof CompositeModificationByteStringPayload ){
661 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
663 applyModificationToState(clientActor, identifier, modification);
665 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
666 persistenceId(), data, data.getClass().getClassLoader(),
667 CompositeModificationPayload.class.getClassLoader());
671 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
672 if(modification == null) {
674 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
675 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
676 } else if(clientActor == null) {
677 // There's no clientActor to which to send a commit reply so we must be applying
678 // replicated state from the leader.
679 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
681 // This must be the OK to commit after replication consensus.
682 finishCommit(clientActor, identifier);
687 protected void onStateChanged() {
688 boolean isLeader = isLeader();
689 changeSupport.onLeadershipChange(isLeader);
690 treeChangeSupport.onLeadershipChange(isLeader);
692 // If this actor is no longer the leader close all the transaction chains
694 if(LOG.isDebugEnabled()) {
696 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
697 persistenceId(), getId());
700 domTransactionFactory.closeAllTransactionChains();
705 public String persistenceId() {
710 ShardCommitCoordinator getCommitCoordinator() {
711 return commitCoordinator;
715 private static class ShardCreator implements Creator<Shard> {
717 private static final long serialVersionUID = 1L;
719 final ShardIdentifier name;
720 final Map<String, String> peerAddresses;
721 final DatastoreContext datastoreContext;
722 final SchemaContext schemaContext;
724 ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
725 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
727 this.peerAddresses = peerAddresses;
728 this.datastoreContext = datastoreContext;
729 this.schemaContext = schemaContext;
733 public Shard create() throws Exception {
734 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
739 public InMemoryDOMDataStore getDataStore() {
744 ShardStats getShardMBean() {