2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.japi.Creator;
16 import akka.persistence.RecoveryFailure;
17 import akka.serialization.Serialization;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Optional;
20 import com.google.common.base.Preconditions;
21 import com.google.common.util.concurrent.FutureCallback;
22 import com.google.common.util.concurrent.Futures;
23 import com.google.common.util.concurrent.ListenableFuture;
24 import java.io.IOException;
25 import java.util.HashMap;
27 import java.util.concurrent.ExecutionException;
28 import java.util.concurrent.TimeUnit;
29 import javax.annotation.Nonnull;
30 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
31 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
32 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
33 import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
34 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
35 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
36 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
37 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
38 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
39 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
41 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
42 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
43 import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
44 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
46 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
48 import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
49 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
51 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
52 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
53 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
54 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
55 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
56 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
57 import org.opendaylight.controller.cluster.datastore.modification.Modification;
58 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
59 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
60 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
61 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
62 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
63 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
64 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
65 import org.opendaylight.controller.cluster.raft.RaftActor;
66 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
67 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
68 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
69 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
70 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
71 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
72 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
73 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
74 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
75 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
76 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
77 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
78 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
79 import scala.concurrent.duration.Duration;
80 import scala.concurrent.duration.FiniteDuration;
83 * A Shard represents a portion of the logical data tree <br/>
85 * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
88 public class Shard extends RaftActor {
90 private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
92 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
95 static final String DEFAULT_NAME = "default";
97 // The state of this Shard
98 private final InMemoryDOMDataStore store;
100 /// The name of this shard
101 private final String name;
103 private final ShardStats shardMBean;
105 private DatastoreContext datastoreContext;
107 private SchemaContext schemaContext;
109 private int createSnapshotTransactionCounter;
111 private final ShardCommitCoordinator commitCoordinator;
113 private long transactionCommitTimeout;
115 private Cancellable txCommitTimeoutCheckSchedule;
117 private final Optional<ActorRef> roleChangeNotifier;
119 private final MessageTracker appendEntriesReplyTracker;
121 private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
122 Serialization.serializedActorPath(getSelf()));
124 private final DOMTransactionFactory transactionFactory;
126 private final String txnDispatcherPath;
128 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
129 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
131 protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
132 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
133 super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
135 this.name = name.toString();
136 this.datastoreContext = datastoreContext;
137 this.schemaContext = schemaContext;
138 this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
139 .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
141 setPersistence(datastoreContext.isPersistent());
143 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
145 store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
146 datastoreContext.getDataStoreProperties());
148 if (schemaContext != null) {
149 store.onGlobalContextUpdated(schemaContext);
152 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
153 datastoreContext.getDataStoreMXBeanType());
154 shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
155 shardMBean.setShardActor(getSelf());
157 if (isMetricsCaptureEnabled()) {
158 getContext().become(new MeteringBehavior(this));
161 transactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
163 commitCoordinator = new ShardCommitCoordinator(transactionFactory,
164 TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
165 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
167 setTransactionCommitTimeout();
169 // create a notifier actor for each cluster member
170 roleChangeNotifier = createRoleChangeNotifier(name.toString());
172 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
173 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
176 private void setTransactionCommitTimeout() {
177 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
178 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
181 public static Props props(final ShardIdentifier name,
182 final Map<String, String> peerAddresses,
183 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
184 Preconditions.checkNotNull(name, "name should not be null");
185 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
186 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
187 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
189 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
192 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
193 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
194 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
195 return Optional.of(shardRoleChangeNotifier);
199 public void postStop() {
200 LOG.info("Stopping Shard {}", persistenceId());
204 if(txCommitTimeoutCheckSchedule != null) {
205 txCommitTimeoutCheckSchedule.cancel();
208 shardMBean.unregisterMBean();
212 public void onReceiveRecover(final Object message) throws Exception {
213 if(LOG.isDebugEnabled()) {
214 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
215 message.getClass().toString(), getSender());
218 if (message instanceof RecoveryFailure){
219 LOG.error("{}: Recovery failed because of this cause",
220 persistenceId(), ((RecoveryFailure) message).cause());
222 // Even though recovery failed, we still need to finish our recovery, eg send the
223 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
224 onRecoveryComplete();
226 super.onReceiveRecover(message);
227 if(LOG.isTraceEnabled()) {
228 appendEntriesReplyTracker.begin();
234 public void onReceiveCommand(final Object message) throws Exception {
236 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
238 if(context.error().isPresent()){
239 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
244 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
245 handleCreateTransaction(message);
246 } else if (BatchedModifications.class.isInstance(message)) {
247 handleBatchedModifications((BatchedModifications)message);
248 } else if (message instanceof ForwardedReadyTransaction) {
249 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
250 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
251 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
252 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
253 handleCommitTransaction(CommitTransaction.fromSerializable(message));
254 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
255 handleAbortTransaction(AbortTransaction.fromSerializable(message));
256 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
257 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
258 } else if (message instanceof RegisterChangeListener) {
259 changeSupport.onMessage((RegisterChangeListener) message, isLeader());
260 } else if (message instanceof RegisterDataTreeChangeListener) {
261 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
262 } else if (message instanceof UpdateSchemaContext) {
263 updateSchemaContext((UpdateSchemaContext) message);
264 } else if (message instanceof PeerAddressResolved) {
265 PeerAddressResolved resolved = (PeerAddressResolved) message;
266 setPeerAddress(resolved.getPeerId().toString(),
267 resolved.getPeerAddress());
268 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
269 handleTransactionCommitTimeoutCheck();
270 } else if(message instanceof DatastoreContext) {
271 onDatastoreContext((DatastoreContext)message);
272 } else if(message instanceof RegisterRoleChangeListener){
273 roleChangeNotifier.get().forward(message, context());
274 } else if (message instanceof FollowerInitialSyncUpStatus){
275 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
276 context().parent().tell(message, self());
278 super.onReceiveCommand(message);
286 protected Optional<ActorRef> getRoleChangeNotifier() {
287 return roleChangeNotifier;
290 private void onDatastoreContext(DatastoreContext context) {
291 datastoreContext = context;
293 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
295 setTransactionCommitTimeout();
297 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
298 setPersistence(true);
299 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
300 setPersistence(false);
303 updateConfigParams(datastoreContext.getShardRaftConfig());
306 private void handleTransactionCommitTimeoutCheck() {
307 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
308 if(cohortEntry != null) {
309 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
310 if(elapsed > transactionCommitTimeout) {
311 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
312 persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
314 doAbortTransaction(cohortEntry.getTransactionID(), null);
319 private void handleCommitTransaction(final CommitTransaction commit) {
320 final String transactionID = commit.getTransactionID();
322 LOG.debug("{}: Committing transaction {}", persistenceId(), transactionID);
324 // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
326 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
327 if(cohortEntry == null) {
328 // We're not the current Tx - the Tx was likely expired b/c it took too long in
329 // between the canCommit and commit messages.
330 IllegalStateException ex = new IllegalStateException(
331 String.format("%s: Cannot commit transaction %s - it is not the current transaction",
332 persistenceId(), transactionID));
333 LOG.error(ex.getMessage());
334 shardMBean.incrementFailedTransactionsCount();
335 getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
339 // We perform the preCommit phase here atomically with the commit phase. This is an
340 // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
341 // coordination of preCommit across shards in case of failure but preCommit should not
342 // normally fail since we ensure only one concurrent 3-phase commit.
345 // We block on the future here so we don't have to worry about possibly accessing our
346 // state on a different thread outside of our dispatcher. Also, the data store
347 // currently uses a same thread executor anyway.
348 cohortEntry.getCohort().preCommit().get();
350 // If we do not have any followers and we are not using persistence
351 // or if cohortEntry has no modifications
352 // we can apply modification to the state immediately
353 if((!hasFollowers() && !persistence().isRecoveryApplicable()) || (!cohortEntry.hasModifications())){
354 applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
356 Shard.this.persistData(getSender(), transactionID,
357 new ModificationPayload(cohortEntry.getModification()));
359 } catch (Exception e) {
360 LOG.error("{} An exception occurred while preCommitting transaction {}",
361 persistenceId(), cohortEntry.getTransactionID(), e);
362 shardMBean.incrementFailedTransactionsCount();
363 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
366 cohortEntry.updateLastAccessTime();
369 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
370 // With persistence enabled, this method is called via applyState by the leader strategy
371 // after the commit has been replicated to a majority of the followers.
373 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
374 if(cohortEntry == null) {
375 // The transaction is no longer the current commit. This can happen if the transaction
376 // was aborted prior, most likely due to timeout in the front-end. We need to finish
377 // committing the transaction though since it was successfully persisted and replicated
378 // however we can't use the original cohort b/c it was already preCommitted and may
379 // conflict with the current commit or may have been aborted so we commit with a new
381 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
382 if(cohortEntry != null) {
383 commitWithNewTransaction(cohortEntry.getModification());
384 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
386 // This really shouldn't happen - it likely means that persistence or replication
387 // took so long to complete such that the cohort entry was expired from the cache.
388 IllegalStateException ex = new IllegalStateException(
389 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
390 persistenceId(), transactionID));
391 LOG.error(ex.getMessage());
392 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
398 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
401 // We block on the future here so we don't have to worry about possibly accessing our
402 // state on a different thread outside of our dispatcher. Also, the data store
403 // currently uses a same thread executor anyway.
404 cohortEntry.getCohort().commit().get();
406 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
408 shardMBean.incrementCommittedTransactionCount();
409 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
411 } catch (Exception e) {
412 sender.tell(new akka.actor.Status.Failure(e), getSelf());
414 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
416 shardMBean.incrementFailedTransactionsCount();
418 commitCoordinator.currentTransactionComplete(transactionID, true);
422 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
423 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
424 commitCoordinator.handleCanCommit(canCommit, getSender(), self());
427 private void handleBatchedModifications(BatchedModifications batched) {
428 // This message is sent to prepare the modificationsa transaction directly on the Shard as an
429 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
430 // BatchedModifications message, the caller sets the ready flag in the message indicating
431 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
432 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
433 // ReadyTransaction message.
435 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
436 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
437 // the primary/leader shard. However with timing and caching on the front-end, there's a small
438 // window where it could have a stale leader during leadership transitions.
442 boolean ready = commitCoordinator.handleTransactionModifications(batched);
444 sender().tell(READY_TRANSACTION_REPLY, self());
446 sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
448 } catch (Exception e) {
449 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
450 batched.getTransactionID(), e);
451 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
454 ActorSelection leader = getLeader();
456 // TODO: what if this is not the first batch and leadership changed in between batched messages?
457 // We could check if the commitCoordinator already has a cached entry and forward all the previous
458 // batched modifications.
459 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
460 leader.forward(batched, getContext());
462 // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
463 // it more resilient in case we're in the process of electing a new leader.
464 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
465 "Could not find the leader for shard %s. This typically happens" +
466 " when the system is coming up or recovering and a leader is being elected. Try again" +
467 " later.", persistenceId()))), getSelf());
472 private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
473 LOG.debug("{}: Readying transaction {}, client version {}", persistenceId(),
474 ready.getTransactionID(), ready.getTxnClientVersion());
476 // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
477 // commitCoordinator in preparation for the subsequent three phase commit initiated by
479 commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
480 (MutableCompositeModification) ready.getModification());
482 // Return our actor path as we'll handle the three phase commit, except if the Tx client
483 // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
484 // node. In that case, the subsequent 3-phase commit messages won't contain the
485 // transactionId so to maintain backwards compatibility, we create a separate cohort actor
486 // to provide the compatible behavior.
487 if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
488 ActorRef replyActorPath = getSelf();
489 if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
490 LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
491 replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
492 ready.getTransactionID()));
495 ReadyTransactionReply readyTransactionReply =
496 new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
497 ready.getTxnClientVersion());
498 getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
499 readyTransactionReply, getSelf());
501 getSender().tell(READY_TRANSACTION_REPLY, getSelf());
505 private void handleAbortTransaction(final AbortTransaction abort) {
506 doAbortTransaction(abort.getTransactionID(), getSender());
509 void doAbortTransaction(final String transactionID, final ActorRef sender) {
510 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
511 if(cohortEntry != null) {
512 LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
514 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
515 // aborted during replication in which case we may still commit locally if replication
517 commitCoordinator.currentTransactionComplete(transactionID, false);
519 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
520 final ActorRef self = getSelf();
522 Futures.addCallback(future, new FutureCallback<Void>() {
524 public void onSuccess(final Void v) {
525 shardMBean.incrementAbortTransactionsCount();
528 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
533 public void onFailure(final Throwable t) {
534 LOG.error("{}: An exception happened during abort", persistenceId(), t);
537 sender.tell(new akka.actor.Status.Failure(t), self);
544 private void handleCreateTransaction(final Object message) {
546 createTransaction(CreateTransaction.fromSerializable(message));
547 } else if (getLeader() != null) {
548 getLeader().forward(message, getContext());
550 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
551 "Could not find leader for shard %s so transaction cannot be created. This typically happens" +
552 " when the system is coming up or recovering and a leader is being elected. Try again" +
553 " later.", persistenceId()))), getSelf());
557 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
558 transactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
561 private ActorRef createTypedTransactionActor(int transactionType,
562 ShardTransactionIdentifier transactionId, String transactionChainId,
563 short clientVersion ) {
565 DOMStoreTransaction transaction = transactionFactory.newTransaction(
566 TransactionProxy.TransactionType.fromInt(transactionType), transactionId.toString(),
569 return createShardTransaction(transaction, transactionId, clientVersion);
572 private ActorRef createShardTransaction(DOMStoreTransaction transaction, ShardTransactionIdentifier transactionId,
573 short clientVersion){
574 return getContext().actorOf(
575 ShardTransaction.props(transaction, getSelf(),
576 schemaContext, datastoreContext, shardMBean,
577 transactionId.getRemoteTransactionId(), clientVersion)
578 .withDispatcher(txnDispatcherPath),
579 transactionId.toString());
583 private void createTransaction(CreateTransaction createTransaction) {
585 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
586 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
587 createTransaction.getVersion());
589 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
590 createTransaction.getTransactionId()).toSerializable(), getSelf());
591 } catch (Exception e) {
592 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
596 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
597 String transactionChainId, short clientVersion) {
600 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
602 if(LOG.isDebugEnabled()) {
603 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
606 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
607 transactionChainId, clientVersion);
609 return transactionActor;
612 private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
613 throws ExecutionException, InterruptedException {
614 DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
615 commitCohort.preCommit().get();
616 commitCohort.commit().get();
619 private void commitWithNewTransaction(final Modification modification) {
620 DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
621 modification.apply(tx);
623 syncCommitTransaction(tx);
624 shardMBean.incrementCommittedTransactionCount();
625 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
626 } catch (InterruptedException | ExecutionException e) {
627 shardMBean.incrementFailedTransactionsCount();
628 LOG.error("{}: Failed to commit", persistenceId(), e);
632 private void updateSchemaContext(final UpdateSchemaContext message) {
633 this.schemaContext = message.getSchemaContext();
634 updateSchemaContext(message.getSchemaContext());
635 store.onGlobalContextUpdated(message.getSchemaContext());
639 void updateSchemaContext(final SchemaContext schemaContext) {
640 store.onGlobalContextUpdated(schemaContext);
643 private boolean isMetricsCaptureEnabled() {
644 CommonConfig config = new CommonConfig(getContext().system().settings().config());
645 return config.isMetricCaptureEnabled();
650 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
651 return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
655 protected void onRecoveryComplete() {
656 //notify shard manager
657 getContext().parent().tell(new ActorInitialized(), getSelf());
659 // Being paranoid here - this method should only be called once but just in case...
660 if(txCommitTimeoutCheckSchedule == null) {
661 // Schedule a message to be periodically sent to check if the current in-progress
662 // transaction should be expired and aborted.
663 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
664 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
665 period, period, getSelf(),
666 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
671 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
673 if(data instanceof ModificationPayload) {
675 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
676 } catch (ClassNotFoundException | IOException e) {
677 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
680 else if (data instanceof CompositeModificationPayload) {
681 Object modification = ((CompositeModificationPayload) data).getModification();
683 applyModificationToState(clientActor, identifier, modification);
684 } else if(data instanceof CompositeModificationByteStringPayload ){
685 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
687 applyModificationToState(clientActor, identifier, modification);
689 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
690 persistenceId(), data, data.getClass().getClassLoader(),
691 CompositeModificationPayload.class.getClassLoader());
695 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
696 if(modification == null) {
698 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
699 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
700 } else if(clientActor == null) {
701 // There's no clientActor to which to send a commit reply so we must be applying
702 // replicated state from the leader.
703 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
705 // This must be the OK to commit after replication consensus.
706 finishCommit(clientActor, identifier);
711 protected void createSnapshot() {
712 // Create a transaction actor. We are really going to treat the transaction as a worker
713 // so that this actor does not get block building the snapshot. THe transaction actor will
714 // after processing the CreateSnapshot message.
716 ActorRef createSnapshotTransaction = createTransaction(
717 TransactionProxy.TransactionType.READ_ONLY.ordinal(),
718 "createSnapshot" + ++createSnapshotTransactionCounter, "",
719 DataStoreVersions.CURRENT_VERSION);
721 createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, self());
726 protected void applySnapshot(final byte[] snapshotBytes) {
727 // Since this will be done only on Recovery or when this actor is a Follower
728 // we can safely commit everything in here. We not need to worry about event notifications
729 // as they would have already been disabled on the follower
731 LOG.info("{}: Applying snapshot", persistenceId());
733 DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
735 NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
737 // delete everything first
738 transaction.delete(DATASTORE_ROOT);
740 // Add everything from the remote node back
741 transaction.write(DATASTORE_ROOT, node);
742 syncCommitTransaction(transaction);
743 } catch (InterruptedException | ExecutionException e) {
744 LOG.error("{}: An exception occurred when applying snapshot", persistenceId(), e);
746 LOG.info("{}: Done applying snapshot", persistenceId());
751 protected void onStateChanged() {
752 boolean isLeader = isLeader();
753 changeSupport.onLeadershipChange(isLeader);
754 treeChangeSupport.onLeadershipChange(isLeader);
756 // If this actor is no longer the leader close all the transaction chains
758 if(LOG.isDebugEnabled()) {
760 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
761 persistenceId(), getId());
764 transactionFactory.closeAllTransactionChains();
769 public String persistenceId() {
774 ShardCommitCoordinator getCommitCoordinator() {
775 return commitCoordinator;
779 private static class ShardCreator implements Creator<Shard> {
781 private static final long serialVersionUID = 1L;
783 final ShardIdentifier name;
784 final Map<String, String> peerAddresses;
785 final DatastoreContext datastoreContext;
786 final SchemaContext schemaContext;
788 ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
789 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
791 this.peerAddresses = peerAddresses;
792 this.datastoreContext = datastoreContext;
793 this.schemaContext = schemaContext;
797 public Shard create() throws Exception {
798 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
803 public InMemoryDOMDataStore getDataStore() {
808 ShardStats getShardMBean() {