Bug 4823: Offload generation of DCNs from Shard
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collections;
21 import java.util.Map;
22 import java.util.concurrent.TimeUnit;
23 import javax.annotation.Nonnull;
24 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
25 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
26 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
27 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
28 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
30 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
32 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
33 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
34 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
35 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
37 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
39 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
41 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
42 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
43 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
45 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
46 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
48 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
49 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
50 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
51 import org.opendaylight.controller.cluster.datastore.modification.Modification;
52 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
53 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
54 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
55 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
56 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
57 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
58 import org.opendaylight.controller.cluster.raft.RaftActor;
59 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
60 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
61 import org.opendaylight.controller.cluster.raft.RaftState;
62 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
63 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
64 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
65 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
67 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
72 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
73 import scala.concurrent.duration.Duration;
74 import scala.concurrent.duration.FiniteDuration;
75
76 /**
77  * A Shard represents a portion of the logical data tree <br/>
78  * <p>
79  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
80  * </p>
81  */
82 public class Shard extends RaftActor {
83
84     protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
85
86     @VisibleForTesting
87     static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
88
89     @VisibleForTesting
90     static final String DEFAULT_NAME = "default";
91
92     // The state of this Shard
93     private final ShardDataTree store;
94
95     /// The name of this shard
96     private final String name;
97
98     private final ShardStats shardMBean;
99
100     private DatastoreContext datastoreContext;
101
102     private final ShardCommitCoordinator commitCoordinator;
103
104     private long transactionCommitTimeout;
105
106     private Cancellable txCommitTimeoutCheckSchedule;
107
108     private final Optional<ActorRef> roleChangeNotifier;
109
110     private final MessageTracker appendEntriesReplyTracker;
111
112     private final ShardTransactionActorFactory transactionActorFactory;
113
114     private final ShardSnapshotCohort snapshotCohort;
115
116     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
117     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
118
119
120     private ShardSnapshot restoreFromSnapshot;
121
122     private final ShardTransactionMessageRetrySupport messageRetrySupport;
123
124     protected Shard(AbstractBuilder<?, ?> builder) {
125         super(builder.getId().toString(), builder.getPeerAddresses(),
126                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
127
128         this.name = builder.getId().toString();
129         this.datastoreContext = builder.getDatastoreContext();
130         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
131
132         setPersistence(datastoreContext.isPersistent());
133
134         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
135
136         store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
137                 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
138                 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
139
140         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
141                 datastoreContext.getDataStoreMXBeanType());
142         shardMBean.setShard(this);
143
144         if (isMetricsCaptureEnabled()) {
145             getContext().become(new MeteringBehavior(this));
146         }
147
148         commitCoordinator = new ShardCommitCoordinator(store,
149                 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
150                 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
151
152         setTransactionCommitTimeout();
153
154         // create a notifier actor for each cluster member
155         roleChangeNotifier = createRoleChangeNotifier(name.toString());
156
157         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
158                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
159
160         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
161                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
162                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
163
164         snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
165
166         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
167     }
168
169     private void setTransactionCommitTimeout() {
170         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
171                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
172     }
173
174     private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
175         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
176             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
177         return Optional.of(shardRoleChangeNotifier);
178     }
179
180     @Override
181     public void postStop() {
182         LOG.info("Stopping Shard {}", persistenceId());
183
184         super.postStop();
185
186         messageRetrySupport.close();
187
188         if(txCommitTimeoutCheckSchedule != null) {
189             txCommitTimeoutCheckSchedule.cancel();
190         }
191
192         commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
193
194         shardMBean.unregisterMBean();
195     }
196
197     @Override
198     public void onReceiveRecover(final Object message) throws Exception {
199         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
200             getSender());
201
202         super.onReceiveRecover(message);
203         if (LOG.isTraceEnabled()) {
204             appendEntriesReplyTracker.begin();
205         }
206     }
207
208     @Override
209     public void onReceiveCommand(final Object message) throws Exception {
210
211         MessageTracker.Context context = appendEntriesReplyTracker.received(message);
212
213         if(context.error().isPresent()){
214             LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
215                 context.error());
216         }
217
218         try {
219             if (CreateTransaction.isSerializedType(message)) {
220                 handleCreateTransaction(message);
221             } else if (BatchedModifications.class.isInstance(message)) {
222                 handleBatchedModifications((BatchedModifications)message);
223             } else if (message instanceof ForwardedReadyTransaction) {
224                 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
225             } else if (message instanceof ReadyLocalTransaction) {
226                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
227             } else if (CanCommitTransaction.isSerializedType(message)) {
228                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
229             } else if (CommitTransaction.isSerializedType(message)) {
230                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
231             } else if (AbortTransaction.isSerializedType(message)) {
232                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
233             } else if (CloseTransactionChain.isSerializedType(message)) {
234                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
235             } else if (message instanceof RegisterChangeListener) {
236                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
237             } else if (message instanceof RegisterDataTreeChangeListener) {
238                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
239             } else if (message instanceof UpdateSchemaContext) {
240                 updateSchemaContext((UpdateSchemaContext) message);
241             } else if (message instanceof PeerAddressResolved) {
242                 PeerAddressResolved resolved = (PeerAddressResolved) message;
243                 setPeerAddress(resolved.getPeerId().toString(),
244                         resolved.getPeerAddress());
245             } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
246                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
247             } else if(message instanceof DatastoreContext) {
248                 onDatastoreContext((DatastoreContext)message);
249             } else if(message instanceof RegisterRoleChangeListener){
250                 roleChangeNotifier.get().forward(message, context());
251             } else if (message instanceof FollowerInitialSyncUpStatus) {
252                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
253                 context().parent().tell(message, self());
254             } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
255                 sender().tell(getShardMBean(), self());
256             } else if(message instanceof GetShardDataTree) {
257                 sender().tell(store.getDataTree(), self());
258             } else if(message instanceof ServerRemoved){
259                 context().parent().forward(message, context());
260             } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
261                 messageRetrySupport.onTimerMessage(message);
262             } else {
263                 super.onReceiveCommand(message);
264             }
265         } finally {
266             context.done();
267         }
268     }
269
270     private boolean hasLeader() {
271         return getLeaderId() != null;
272     }
273
274     public int getPendingTxCommitQueueSize() {
275         return commitCoordinator.getQueueSize();
276     }
277
278     public int getCohortCacheSize() {
279         return commitCoordinator.getCohortCacheSize();
280     }
281
282     @Override
283     protected Optional<ActorRef> getRoleChangeNotifier() {
284         return roleChangeNotifier;
285     }
286
287     @Override
288     protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
289         return new ShardLeaderStateChanged(memberId, leaderId,
290                 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
291                 leaderPayloadVersion);
292     }
293
294     protected void onDatastoreContext(DatastoreContext context) {
295         datastoreContext = context;
296
297         commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
298
299         setTransactionCommitTimeout();
300
301         if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
302             setPersistence(true);
303         } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
304             setPersistence(false);
305         }
306
307         updateConfigParams(datastoreContext.getShardRaftConfig());
308     }
309
310     private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
311         return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
312     }
313
314     void continueCommit(final CohortEntry cohortEntry) {
315         final DataTreeCandidate candidate = cohortEntry.getCandidate();
316
317         // If we do not have any followers and we are not using persistence
318         // or if cohortEntry has no modifications
319         // we can apply modification to the state immediately
320         if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
321             applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
322         } else {
323             Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
324                     DataTreeCandidatePayload.create(candidate));
325         }
326     }
327
328     private void handleCommitTransaction(final CommitTransaction commit) {
329         if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
330             shardMBean.incrementFailedTransactionsCount();
331         }
332     }
333
334     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
335         LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
336
337         try {
338             cohortEntry.commit();
339
340             sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
341
342             shardMBean.incrementCommittedTransactionCount();
343             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
344
345         } catch (Exception e) {
346             sender.tell(new akka.actor.Status.Failure(e), getSelf());
347
348             LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
349                     transactionID, e);
350             shardMBean.incrementFailedTransactionsCount();
351         } finally {
352             commitCoordinator.currentTransactionComplete(transactionID, true);
353         }
354     }
355
356     private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
357         // With persistence enabled, this method is called via applyState by the leader strategy
358         // after the commit has been replicated to a majority of the followers.
359
360         CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
361         if (cohortEntry == null) {
362             // The transaction is no longer the current commit. This can happen if the transaction
363             // was aborted prior, most likely due to timeout in the front-end. We need to finish
364             // committing the transaction though since it was successfully persisted and replicated
365             // however we can't use the original cohort b/c it was already preCommitted and may
366             // conflict with the current commit or may have been aborted so we commit with a new
367             // transaction.
368             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
369             if(cohortEntry != null) {
370                 try {
371                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
372                 } catch (DataValidationFailedException e) {
373                     shardMBean.incrementFailedTransactionsCount();
374                     LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
375                 }
376
377                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
378                         getSelf());
379             } else {
380                 // This really shouldn't happen - it likely means that persistence or replication
381                 // took so long to complete such that the cohort entry was expired from the cache.
382                 IllegalStateException ex = new IllegalStateException(
383                         String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
384                                 persistenceId(), transactionID));
385                 LOG.error(ex.getMessage());
386                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
387             }
388         } else {
389             finishCommit(sender, transactionID, cohortEntry);
390         }
391     }
392
393     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
394         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
395         commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
396     }
397
398     protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
399         try {
400             commitCoordinator.handleBatchedModifications(batched, sender, this);
401         } catch (Exception e) {
402             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
403                     batched.getTransactionID(), e);
404             sender.tell(new akka.actor.Status.Failure(e), getSelf());
405         }
406     }
407
408     private void handleBatchedModifications(BatchedModifications batched) {
409         // This message is sent to prepare the modifications transaction directly on the Shard as an
410         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
411         // BatchedModifications message, the caller sets the ready flag in the message indicating
412         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
413         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
414         // ReadyTransaction message.
415
416         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
417         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
418         // the primary/leader shard. However with timing and caching on the front-end, there's a small
419         // window where it could have a stale leader during leadership transitions.
420         //
421         boolean isLeaderActive = isLeaderActive();
422         if (isLeader() && isLeaderActive) {
423             handleBatchedModificationsLocal(batched, getSender());
424         } else {
425             ActorSelection leader = getLeader();
426             if (!isLeaderActive || leader == null) {
427                 messageRetrySupport.addMessageToRetry(batched, getSender(),
428                         "Could not commit transaction " + batched.getTransactionID());
429             } else {
430                 // TODO: what if this is not the first batch and leadership changed in between batched messages?
431                 // We could check if the commitCoordinator already has a cached entry and forward all the previous
432                 // batched modifications.
433                 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
434                 leader.forward(batched, getContext());
435             }
436         }
437     }
438
439     private boolean failIfIsolatedLeader(ActorRef sender) {
440         if(isIsolatedLeader()) {
441             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
442                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
443                     " other follower nodes are down or this node is isolated by a network partition.",
444                     persistenceId()))), getSelf());
445             return true;
446         }
447
448         return false;
449     }
450
451     protected boolean isIsolatedLeader() {
452         return getRaftState() == RaftState.IsolatedLeader;
453     }
454
455     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
456         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
457
458         boolean isLeaderActive = isLeaderActive();
459         if (isLeader() && isLeaderActive) {
460             try {
461                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
462             } catch (Exception e) {
463                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
464                         message.getTransactionID(), e);
465                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
466             }
467         } else {
468             ActorSelection leader = getLeader();
469             if (!isLeaderActive || leader == null) {
470                 messageRetrySupport.addMessageToRetry(message, getSender(),
471                         "Could not commit transaction " + message.getTransactionID());
472             } else {
473                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
474                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
475                 leader.forward(message, getContext());
476             }
477         }
478     }
479
480     private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
481         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
482
483         boolean isLeaderActive = isLeaderActive();
484         if (isLeader() && isLeaderActive) {
485             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
486         } else {
487             ActorSelection leader = getLeader();
488             if (!isLeaderActive || leader == null) {
489                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
490                         "Could not commit transaction " + forwardedReady.getTransactionID());
491             } else {
492                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
493
494                 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
495                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
496                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
497                 leader.forward(readyLocal, getContext());
498             }
499         }
500     }
501
502     private void handleAbortTransaction(final AbortTransaction abort) {
503         doAbortTransaction(abort.getTransactionID(), getSender());
504     }
505
506     void doAbortTransaction(final String transactionID, final ActorRef sender) {
507         commitCoordinator.handleAbort(transactionID, sender, this);
508     }
509
510     private void handleCreateTransaction(final Object message) {
511         if (isLeader()) {
512             createTransaction(CreateTransaction.fromSerializable(message));
513         } else if (getLeader() != null) {
514             getLeader().forward(message, getContext());
515         } else {
516             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
517                     "Could not create a shard transaction", persistenceId())), getSelf());
518         }
519     }
520
521     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
522         store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
523     }
524
525     private ActorRef createTypedTransactionActor(int transactionType,
526             ShardTransactionIdentifier transactionId, String transactionChainId) {
527
528         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
529                 transactionId, transactionChainId);
530     }
531
532     private void createTransaction(CreateTransaction createTransaction) {
533         try {
534             if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
535                     failIfIsolatedLeader(getSender())) {
536                 return;
537             }
538
539             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
540                 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
541
542             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
543                     createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
544         } catch (Exception e) {
545             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
546         }
547     }
548
549     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
550             String transactionChainId) {
551
552
553         ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
554
555         if(LOG.isDebugEnabled()) {
556             LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
557         }
558
559         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
560                 transactionChainId);
561
562         return transactionActor;
563     }
564
565     private void commitWithNewTransaction(final Modification modification) {
566         ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
567         modification.apply(tx.getSnapshot());
568         try {
569             snapshotCohort.syncCommitTransaction(tx);
570             shardMBean.incrementCommittedTransactionCount();
571             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
572         } catch (Exception e) {
573             shardMBean.incrementFailedTransactionsCount();
574             LOG.error("{}: Failed to commit", persistenceId(), e);
575         }
576     }
577
578     private void updateSchemaContext(final UpdateSchemaContext message) {
579         updateSchemaContext(message.getSchemaContext());
580     }
581
582     @VisibleForTesting
583     void updateSchemaContext(final SchemaContext schemaContext) {
584         store.updateSchemaContext(schemaContext);
585     }
586
587     private boolean isMetricsCaptureEnabled() {
588         CommonConfig config = new CommonConfig(getContext().system().settings().config());
589         return config.isMetricCaptureEnabled();
590     }
591
592     @Override
593     @VisibleForTesting
594     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
595         return snapshotCohort;
596     }
597
598     @Override
599     @Nonnull
600     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
601         return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
602                 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
603     }
604
605     @Override
606     protected void onRecoveryComplete() {
607         restoreFromSnapshot = null;
608
609         //notify shard manager
610         getContext().parent().tell(new ActorInitialized(), getSelf());
611
612         // Being paranoid here - this method should only be called once but just in case...
613         if(txCommitTimeoutCheckSchedule == null) {
614             // Schedule a message to be periodically sent to check if the current in-progress
615             // transaction should be expired and aborted.
616             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
617             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
618                     period, period, getSelf(),
619                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
620         }
621     }
622
623     @Override
624     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
625         if (data instanceof DataTreeCandidatePayload) {
626             if (clientActor == null) {
627                 // No clientActor indicates a replica coming from the leader
628                 try {
629                     store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
630                 } catch (DataValidationFailedException | IOException e) {
631                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
632                 }
633             } else {
634                 // Replication consensus reached, proceed to commit
635                 finishCommit(clientActor, identifier);
636             }
637         } else if (data instanceof CompositeModificationPayload) {
638             Object modification = ((CompositeModificationPayload) data).getModification();
639
640             applyModificationToState(clientActor, identifier, modification);
641         } else if(data instanceof CompositeModificationByteStringPayload ){
642             Object modification = ((CompositeModificationByteStringPayload) data).getModification();
643
644             applyModificationToState(clientActor, identifier, modification);
645         } else {
646             LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
647                     persistenceId(), data, data.getClass().getClassLoader(),
648                     CompositeModificationPayload.class.getClassLoader());
649         }
650     }
651
652     private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
653         if(modification == null) {
654             LOG.error(
655                     "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
656                     persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
657         } else if(clientActor == null) {
658             // There's no clientActor to which to send a commit reply so we must be applying
659             // replicated state from the leader.
660             commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
661         } else {
662             // This must be the OK to commit after replication consensus.
663             finishCommit(clientActor, identifier);
664         }
665     }
666
667     @Override
668     protected void onStateChanged() {
669         boolean isLeader = isLeader();
670         boolean hasLeader = hasLeader();
671         changeSupport.onLeadershipChange(isLeader, hasLeader);
672         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
673
674         // If this actor is no longer the leader close all the transaction chains
675         if (!isLeader) {
676             if(LOG.isDebugEnabled()) {
677                 LOG.debug(
678                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
679                     persistenceId(), getId());
680             }
681
682             store.closeAllTransactionChains();
683
684             commitCoordinator.abortPendingTransactions(
685                     "The transacton was aborted due to inflight leadership change.", this);
686         }
687
688         if(hasLeader && !isIsolatedLeader()) {
689             messageRetrySupport.retryMessages();
690         }
691     }
692
693     @Override
694     protected void onLeaderChanged(String oldLeader, String newLeader) {
695         shardMBean.incrementLeadershipChangeCount();
696
697         if(hasLeader() && !isIsolatedLeader()) {
698             messageRetrySupport.retryMessages();
699         }
700     }
701
702     @Override
703     protected void pauseLeader(Runnable operation) {
704         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
705         commitCoordinator.setRunOnPendingTransactionsComplete(operation);
706     }
707
708     @Override
709     public String persistenceId() {
710         return this.name;
711     }
712
713     @VisibleForTesting
714     ShardCommitCoordinator getCommitCoordinator() {
715         return commitCoordinator;
716     }
717
718     public DatastoreContext getDatastoreContext() {
719         return datastoreContext;
720     }
721
722     @VisibleForTesting
723     public ShardDataTree getDataStore() {
724         return store;
725     }
726
727     @VisibleForTesting
728     ShardStats getShardMBean() {
729         return shardMBean;
730     }
731
732     public static Builder builder() {
733         return new Builder();
734     }
735
736     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
737         private final Class<S> shardClass;
738         private ShardIdentifier id;
739         private Map<String, String> peerAddresses = Collections.emptyMap();
740         private DatastoreContext datastoreContext;
741         private SchemaContext schemaContext;
742         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
743         private volatile boolean sealed;
744
745         protected AbstractBuilder(Class<S> shardClass) {
746             this.shardClass = shardClass;
747         }
748
749         protected void checkSealed() {
750             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
751         }
752
753         @SuppressWarnings("unchecked")
754         private T self() {
755             return (T) this;
756         }
757
758         public T id(ShardIdentifier id) {
759             checkSealed();
760             this.id = id;
761             return self();
762         }
763
764         public T peerAddresses(Map<String, String> peerAddresses) {
765             checkSealed();
766             this.peerAddresses = peerAddresses;
767             return self();
768         }
769
770         public T datastoreContext(DatastoreContext datastoreContext) {
771             checkSealed();
772             this.datastoreContext = datastoreContext;
773             return self();
774         }
775
776         public T schemaContext(SchemaContext schemaContext) {
777             checkSealed();
778             this.schemaContext = schemaContext;
779             return self();
780         }
781
782         public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
783             checkSealed();
784             this.restoreFromSnapshot = restoreFromSnapshot;
785             return self();
786         }
787
788         public ShardIdentifier getId() {
789             return id;
790         }
791
792         public Map<String, String> getPeerAddresses() {
793             return peerAddresses;
794         }
795
796         public DatastoreContext getDatastoreContext() {
797             return datastoreContext;
798         }
799
800         public SchemaContext getSchemaContext() {
801             return schemaContext;
802         }
803
804         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
805             return restoreFromSnapshot;
806         }
807
808         public TreeType getTreeType() {
809             switch (datastoreContext.getLogicalStoreType()) {
810             case CONFIGURATION:
811                 return TreeType.CONFIGURATION;
812             case OPERATIONAL:
813                 return TreeType.OPERATIONAL;
814             }
815
816             throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
817         }
818
819         protected void verify() {
820             Preconditions.checkNotNull(id, "id should not be null");
821             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
822             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
823             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
824         }
825
826         public Props props() {
827             sealed = true;
828             verify();
829             return Props.create(shardClass, this);
830         }
831     }
832
833     public static class Builder extends AbstractBuilder<Builder, Shard> {
834         private Builder() {
835             super(Shard.class);
836         }
837     }
838 }