2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.japi.Creator;
16 import akka.persistence.RecoveryFailure;
17 import akka.serialization.Serialization;
18 import com.google.common.annotations.VisibleForTesting;
19 import com.google.common.base.Optional;
20 import com.google.common.base.Preconditions;
21 import com.google.common.util.concurrent.FutureCallback;
22 import com.google.common.util.concurrent.Futures;
23 import com.google.common.util.concurrent.ListenableFuture;
24 import java.io.IOException;
25 import java.util.HashMap;
27 import java.util.concurrent.TimeUnit;
28 import javax.annotation.Nonnull;
29 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
30 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
31 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
32 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
33 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
34 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
35 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
36 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
37 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
38 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
39 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
40 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
41 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
42 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
43 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
45 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
46 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
47 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
52 import org.opendaylight.controller.cluster.datastore.modification.Modification;
53 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
54 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
55 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
56 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
57 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
58 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
59 import org.opendaylight.controller.cluster.raft.RaftActor;
60 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
61 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
62 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
63 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
64 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
65 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
66 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
67 import scala.concurrent.duration.Duration;
68 import scala.concurrent.duration.FiniteDuration;
71 * A Shard represents a portion of the logical data tree <br/>
73 * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
76 public class Shard extends RaftActor {
78 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
81 static final String DEFAULT_NAME = "default";
83 // The state of this Shard
84 private final ShardDataTree store;
86 /// The name of this shard
87 private final String name;
89 private final ShardStats shardMBean;
91 private DatastoreContext datastoreContext;
93 private final ShardCommitCoordinator commitCoordinator;
95 private long transactionCommitTimeout;
97 private Cancellable txCommitTimeoutCheckSchedule;
99 private final Optional<ActorRef> roleChangeNotifier;
101 private final MessageTracker appendEntriesReplyTracker;
103 private final ShardTransactionActorFactory transactionActorFactory;
105 private final ShardSnapshotCohort snapshotCohort;
107 private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
108 private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
110 protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
111 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
112 super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
114 this.name = name.toString();
115 this.datastoreContext = datastoreContext;
117 setPersistence(datastoreContext.isPersistent());
119 LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
121 store = new ShardDataTree(schemaContext);
123 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
124 datastoreContext.getDataStoreMXBeanType());
125 shardMBean.setShardActor(getSelf());
127 if (isMetricsCaptureEnabled()) {
128 getContext().become(new MeteringBehavior(this));
131 commitCoordinator = new ShardCommitCoordinator(store,
132 TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
133 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
135 setTransactionCommitTimeout();
137 // create a notifier actor for each cluster member
138 roleChangeNotifier = createRoleChangeNotifier(name.toString());
140 appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
141 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
143 transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
144 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
145 Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
147 snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
150 private void setTransactionCommitTimeout() {
151 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
152 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
155 public static Props props(final ShardIdentifier name,
156 final Map<String, String> peerAddresses,
157 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
158 Preconditions.checkNotNull(name, "name should not be null");
159 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
160 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
161 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
163 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
166 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
167 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
168 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
169 return Optional.of(shardRoleChangeNotifier);
173 public void postStop() {
174 LOG.info("Stopping Shard {}", persistenceId());
178 if(txCommitTimeoutCheckSchedule != null) {
179 txCommitTimeoutCheckSchedule.cancel();
182 shardMBean.unregisterMBean();
186 public void onReceiveRecover(final Object message) throws Exception {
187 if(LOG.isDebugEnabled()) {
188 LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
189 message.getClass().toString(), getSender());
192 if (message instanceof RecoveryFailure){
193 LOG.error("{}: Recovery failed because of this cause",
194 persistenceId(), ((RecoveryFailure) message).cause());
196 // Even though recovery failed, we still need to finish our recovery, eg send the
197 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
198 onRecoveryComplete();
200 super.onReceiveRecover(message);
201 if(LOG.isTraceEnabled()) {
202 appendEntriesReplyTracker.begin();
208 public void onReceiveCommand(final Object message) throws Exception {
210 MessageTracker.Context context = appendEntriesReplyTracker.received(message);
212 if(context.error().isPresent()){
213 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
218 if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
219 handleCreateTransaction(message);
220 } else if (BatchedModifications.class.isInstance(message)) {
221 handleBatchedModifications((BatchedModifications)message);
222 } else if (message instanceof ForwardedReadyTransaction) {
223 commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
225 } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
226 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
227 } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
228 handleCommitTransaction(CommitTransaction.fromSerializable(message));
229 } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
230 handleAbortTransaction(AbortTransaction.fromSerializable(message));
231 } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
232 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
233 } else if (message instanceof RegisterChangeListener) {
234 changeSupport.onMessage((RegisterChangeListener) message, isLeader());
235 } else if (message instanceof RegisterDataTreeChangeListener) {
236 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
237 } else if (message instanceof UpdateSchemaContext) {
238 updateSchemaContext((UpdateSchemaContext) message);
239 } else if (message instanceof PeerAddressResolved) {
240 PeerAddressResolved resolved = (PeerAddressResolved) message;
241 setPeerAddress(resolved.getPeerId().toString(),
242 resolved.getPeerAddress());
243 } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
244 handleTransactionCommitTimeoutCheck();
245 } else if(message instanceof DatastoreContext) {
246 onDatastoreContext((DatastoreContext)message);
247 } else if(message instanceof RegisterRoleChangeListener){
248 roleChangeNotifier.get().forward(message, context());
249 } else if (message instanceof FollowerInitialSyncUpStatus){
250 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
251 context().parent().tell(message, self());
253 super.onReceiveCommand(message);
261 protected Optional<ActorRef> getRoleChangeNotifier() {
262 return roleChangeNotifier;
265 private void onDatastoreContext(DatastoreContext context) {
266 datastoreContext = context;
268 commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
270 setTransactionCommitTimeout();
272 if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
273 setPersistence(true);
274 } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
275 setPersistence(false);
278 updateConfigParams(datastoreContext.getShardRaftConfig());
281 private void handleTransactionCommitTimeoutCheck() {
282 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
283 if(cohortEntry != null) {
284 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
285 if(elapsed > transactionCommitTimeout) {
286 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
287 persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
289 doAbortTransaction(cohortEntry.getTransactionID(), null);
294 void continueCommit(final CohortEntry cohortEntry) throws Exception {
295 // If we do not have any followers and we are not using persistence
296 // or if cohortEntry has no modifications
297 // we can apply modification to the state immediately
298 if((!hasFollowers() && !persistence().isRecoveryApplicable()) || (!cohortEntry.hasModifications())){
299 applyModificationToState(getSender(), cohortEntry.getTransactionID(), cohortEntry.getModification());
301 Shard.this.persistData(getSender(), cohortEntry.getTransactionID(),
302 new ModificationPayload(cohortEntry.getModification()));
306 private void handleCommitTransaction(final CommitTransaction commit) {
307 if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
308 shardMBean.incrementFailedTransactionsCount();
312 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
313 // With persistence enabled, this method is called via applyState by the leader strategy
314 // after the commit has been replicated to a majority of the followers.
316 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
317 if(cohortEntry == null) {
318 // The transaction is no longer the current commit. This can happen if the transaction
319 // was aborted prior, most likely due to timeout in the front-end. We need to finish
320 // committing the transaction though since it was successfully persisted and replicated
321 // however we can't use the original cohort b/c it was already preCommitted and may
322 // conflict with the current commit or may have been aborted so we commit with a new
324 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
325 if(cohortEntry != null) {
326 commitWithNewTransaction(cohortEntry.getModification());
327 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
329 // This really shouldn't happen - it likely means that persistence or replication
330 // took so long to complete such that the cohort entry was expired from the cache.
331 IllegalStateException ex = new IllegalStateException(
332 String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
333 persistenceId(), transactionID));
334 LOG.error(ex.getMessage());
335 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
341 LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
344 // We block on the future here so we don't have to worry about possibly accessing our
345 // state on a different thread outside of our dispatcher. Also, the data store
346 // currently uses a same thread executor anyway.
347 cohortEntry.getCohort().commit().get();
349 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
351 shardMBean.incrementCommittedTransactionCount();
352 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
354 } catch (Exception e) {
355 sender.tell(new akka.actor.Status.Failure(e), getSelf());
357 LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
359 shardMBean.incrementFailedTransactionsCount();
361 commitCoordinator.currentTransactionComplete(transactionID, true);
365 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
366 LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
367 commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
370 private void handleBatchedModifications(BatchedModifications batched) {
371 // This message is sent to prepare the modificationsa transaction directly on the Shard as an
372 // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
373 // BatchedModifications message, the caller sets the ready flag in the message indicating
374 // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
375 // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
376 // ReadyTransaction message.
378 // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
379 // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
380 // the primary/leader shard. However with timing and caching on the front-end, there's a small
381 // window where it could have a stale leader during leadership transitions.
385 commitCoordinator.handleBatchedModifications(batched, getSender(), this);
386 } catch (Exception e) {
387 LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
388 batched.getTransactionID(), e);
389 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
392 ActorSelection leader = getLeader();
394 // TODO: what if this is not the first batch and leadership changed in between batched messages?
395 // We could check if the commitCoordinator already has a cached entry and forward all the previous
396 // batched modifications.
397 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
398 leader.forward(batched, getContext());
400 // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
401 // it more resilient in case we're in the process of electing a new leader.
402 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
403 "Could not find the leader for shard %s. This typically happens" +
404 " when the system is coming up or recovering and a leader is being elected. Try again" +
405 " later.", persistenceId()))), getSelf());
410 private void handleAbortTransaction(final AbortTransaction abort) {
411 doAbortTransaction(abort.getTransactionID(), getSender());
414 void doAbortTransaction(final String transactionID, final ActorRef sender) {
415 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
416 if(cohortEntry != null) {
417 LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
419 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
420 // aborted during replication in which case we may still commit locally if replication
422 commitCoordinator.currentTransactionComplete(transactionID, false);
424 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
425 final ActorRef self = getSelf();
427 Futures.addCallback(future, new FutureCallback<Void>() {
429 public void onSuccess(final Void v) {
430 shardMBean.incrementAbortTransactionsCount();
433 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
438 public void onFailure(final Throwable t) {
439 LOG.error("{}: An exception happened during abort", persistenceId(), t);
442 sender.tell(new akka.actor.Status.Failure(t), self);
449 private void handleCreateTransaction(final Object message) {
451 createTransaction(CreateTransaction.fromSerializable(message));
452 } else if (getLeader() != null) {
453 getLeader().forward(message, getContext());
455 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
456 "Could not find leader for shard %s so transaction cannot be created. This typically happens" +
457 " when the system is coming up or recovering and a leader is being elected. Try again" +
458 " later.", persistenceId()))), getSelf());
462 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
463 store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
466 private ActorRef createTypedTransactionActor(int transactionType,
467 ShardTransactionIdentifier transactionId, String transactionChainId,
468 short clientVersion ) {
470 return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
471 transactionId, transactionChainId, clientVersion);
474 private void createTransaction(CreateTransaction createTransaction) {
476 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
477 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
478 createTransaction.getVersion());
480 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
481 createTransaction.getTransactionId()).toSerializable(), getSelf());
482 } catch (Exception e) {
483 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
487 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
488 String transactionChainId, short clientVersion) {
491 ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
493 if(LOG.isDebugEnabled()) {
494 LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
497 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
498 transactionChainId, clientVersion);
500 return transactionActor;
503 private void commitWithNewTransaction(final Modification modification) {
504 ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
505 modification.apply(tx.getSnapshot());
507 snapshotCohort.syncCommitTransaction(tx);
508 shardMBean.incrementCommittedTransactionCount();
509 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
510 } catch (Exception e) {
511 shardMBean.incrementFailedTransactionsCount();
512 LOG.error("{}: Failed to commit", persistenceId(), e);
516 private void updateSchemaContext(final UpdateSchemaContext message) {
517 updateSchemaContext(message.getSchemaContext());
521 void updateSchemaContext(final SchemaContext schemaContext) {
522 store.updateSchemaContext(schemaContext);
525 private boolean isMetricsCaptureEnabled() {
526 CommonConfig config = new CommonConfig(getContext().system().settings().config());
527 return config.isMetricCaptureEnabled();
531 protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
532 return snapshotCohort;
537 protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
538 return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
542 protected void onRecoveryComplete() {
543 //notify shard manager
544 getContext().parent().tell(new ActorInitialized(), getSelf());
546 // Being paranoid here - this method should only be called once but just in case...
547 if(txCommitTimeoutCheckSchedule == null) {
548 // Schedule a message to be periodically sent to check if the current in-progress
549 // transaction should be expired and aborted.
550 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
551 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
552 period, period, getSelf(),
553 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
558 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
560 if(data instanceof ModificationPayload) {
562 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
563 } catch (ClassNotFoundException | IOException e) {
564 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
567 else if (data instanceof CompositeModificationPayload) {
568 Object modification = ((CompositeModificationPayload) data).getModification();
570 applyModificationToState(clientActor, identifier, modification);
571 } else if(data instanceof CompositeModificationByteStringPayload ){
572 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
574 applyModificationToState(clientActor, identifier, modification);
576 LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
577 persistenceId(), data, data.getClass().getClassLoader(),
578 CompositeModificationPayload.class.getClassLoader());
582 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
583 if(modification == null) {
585 "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
586 persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
587 } else if(clientActor == null) {
588 // There's no clientActor to which to send a commit reply so we must be applying
589 // replicated state from the leader.
590 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
592 // This must be the OK to commit after replication consensus.
593 finishCommit(clientActor, identifier);
598 protected void onStateChanged() {
599 boolean isLeader = isLeader();
600 changeSupport.onLeadershipChange(isLeader);
601 treeChangeSupport.onLeadershipChange(isLeader);
603 // If this actor is no longer the leader close all the transaction chains
605 if(LOG.isDebugEnabled()) {
607 "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
608 persistenceId(), getId());
611 store.closeAllTransactionChains();
616 protected void onLeaderChanged(String oldLeader, String newLeader) {
617 shardMBean.incrementLeadershipChangeCount();
621 public String persistenceId() {
626 ShardCommitCoordinator getCommitCoordinator() {
627 return commitCoordinator;
631 private static class ShardCreator implements Creator<Shard> {
633 private static final long serialVersionUID = 1L;
635 final ShardIdentifier name;
636 final Map<String, String> peerAddresses;
637 final DatastoreContext datastoreContext;
638 final SchemaContext schemaContext;
640 ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
641 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
643 this.peerAddresses = peerAddresses;
644 this.datastoreContext = datastoreContext;
645 this.schemaContext = schemaContext;
649 public Shard create() throws Exception {
650 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
655 public ShardDataTree getDataStore() {
660 ShardStats getShardMBean() {