Move MessageTracker to sal-clustering-commons
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
22 import java.util.Map;
23 import java.util.concurrent.ExecutionException;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
27 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
28 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
29 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
30 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
31 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
33 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
35 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
37 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
38 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
40 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
44 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
45 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
46 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
52 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
53 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
54 import org.opendaylight.controller.cluster.datastore.modification.Modification;
55 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
56 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
57 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
58 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
59 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
60 import org.opendaylight.controller.cluster.raft.RaftActor;
61 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
62 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
63 import org.opendaylight.controller.cluster.raft.RaftState;
64 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
65 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
66 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
68 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
73 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
74 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
75 import scala.concurrent.duration.Duration;
76 import scala.concurrent.duration.FiniteDuration;
77
78 /**
79  * A Shard represents a portion of the logical data tree <br/>
80  * <p>
81  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
82  * </p>
83  */
84 public class Shard extends RaftActor {
85
86     protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
87
88     @VisibleForTesting
89     static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
90
91     // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
92     public static final String DEFAULT_NAME = "default";
93
94     // The state of this Shard
95     private final ShardDataTree store;
96
97     /// The name of this shard
98     private final String name;
99
100     private final ShardStats shardMBean;
101
102     private DatastoreContext datastoreContext;
103
104     private final ShardCommitCoordinator commitCoordinator;
105
106     private long transactionCommitTimeout;
107
108     private Cancellable txCommitTimeoutCheckSchedule;
109
110     private final Optional<ActorRef> roleChangeNotifier;
111
112     private final MessageTracker appendEntriesReplyTracker;
113
114     private final ShardTransactionActorFactory transactionActorFactory;
115
116     private final ShardSnapshotCohort snapshotCohort;
117
118     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
119     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
120
121
122     private ShardSnapshot restoreFromSnapshot;
123
124     private final ShardTransactionMessageRetrySupport messageRetrySupport;
125
126     protected Shard(AbstractBuilder<?, ?> builder) {
127         super(builder.getId().toString(), builder.getPeerAddresses(),
128                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
129
130         this.name = builder.getId().toString();
131         this.datastoreContext = builder.getDatastoreContext();
132         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
133
134         setPersistence(datastoreContext.isPersistent());
135
136         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
137
138         store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
139                 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
140                 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
141
142         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
143                 datastoreContext.getDataStoreMXBeanType());
144         shardMBean.setShard(this);
145
146         if (isMetricsCaptureEnabled()) {
147             getContext().become(new MeteringBehavior(this));
148         }
149
150         commitCoordinator = new ShardCommitCoordinator(store,
151                 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
152                 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
153
154         setTransactionCommitTimeout();
155
156         // create a notifier actor for each cluster member
157         roleChangeNotifier = createRoleChangeNotifier(name.toString());
158
159         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
160                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
161
162         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
163                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
164                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
165
166         snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
167
168         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
169     }
170
171     private void setTransactionCommitTimeout() {
172         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
173                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
174     }
175
176     private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
177         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
178             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
179         return Optional.of(shardRoleChangeNotifier);
180     }
181
182     @Override
183     public void postStop() {
184         LOG.info("Stopping Shard {}", persistenceId());
185
186         super.postStop();
187
188         messageRetrySupport.close();
189
190         if(txCommitTimeoutCheckSchedule != null) {
191             txCommitTimeoutCheckSchedule.cancel();
192         }
193
194         commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
195
196         shardMBean.unregisterMBean();
197     }
198
199     @Override
200     protected void handleRecover(final Object message) {
201         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
202             getSender());
203
204         super.handleRecover(message);
205         if (LOG.isTraceEnabled()) {
206             appendEntriesReplyTracker.begin();
207         }
208     }
209
210     @Override
211     protected void handleCommand(final Object message) {
212
213         MessageTracker.Context context = appendEntriesReplyTracker.received(message);
214
215         if(context.error().isPresent()){
216             LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
217                 context.error());
218         }
219
220         try {
221             if (CreateTransaction.isSerializedType(message)) {
222                 handleCreateTransaction(message);
223             } else if (message instanceof BatchedModifications) {
224                 handleBatchedModifications((BatchedModifications)message);
225             } else if (message instanceof ForwardedReadyTransaction) {
226                 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
227             } else if (message instanceof ReadyLocalTransaction) {
228                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
229             } else if (CanCommitTransaction.isSerializedType(message)) {
230                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
231             } else if (CommitTransaction.isSerializedType(message)) {
232                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
233             } else if (AbortTransaction.isSerializedType(message)) {
234                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
235             } else if (CloseTransactionChain.isSerializedType(message)) {
236                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
237             } else if (message instanceof RegisterChangeListener) {
238                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
239             } else if (message instanceof RegisterDataTreeChangeListener) {
240                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
241             } else if (message instanceof UpdateSchemaContext) {
242                 updateSchemaContext((UpdateSchemaContext) message);
243             } else if (message instanceof PeerAddressResolved) {
244                 PeerAddressResolved resolved = (PeerAddressResolved) message;
245                 setPeerAddress(resolved.getPeerId().toString(),
246                         resolved.getPeerAddress());
247             } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
248                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
249             } else if(message instanceof DatastoreContext) {
250                 onDatastoreContext((DatastoreContext)message);
251             } else if(message instanceof RegisterRoleChangeListener){
252                 roleChangeNotifier.get().forward(message, context());
253             } else if (message instanceof FollowerInitialSyncUpStatus) {
254                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
255                 context().parent().tell(message, self());
256             } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
257                 sender().tell(getShardMBean(), self());
258             } else if(message instanceof GetShardDataTree) {
259                 sender().tell(store.getDataTree(), self());
260             } else if(message instanceof ServerRemoved){
261                 context().parent().forward(message, context());
262             } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
263                 messageRetrySupport.onTimerMessage(message);
264             } else {
265                 super.handleCommand(message);
266             }
267         } finally {
268             context.done();
269         }
270     }
271
272     private boolean hasLeader() {
273         return getLeaderId() != null;
274     }
275
276     public int getPendingTxCommitQueueSize() {
277         return commitCoordinator.getQueueSize();
278     }
279
280     public int getCohortCacheSize() {
281         return commitCoordinator.getCohortCacheSize();
282     }
283
284     @Override
285     protected Optional<ActorRef> getRoleChangeNotifier() {
286         return roleChangeNotifier;
287     }
288
289     @Override
290     protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
291         return new ShardLeaderStateChanged(memberId, leaderId,
292                 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
293                 leaderPayloadVersion);
294     }
295
296     protected void onDatastoreContext(DatastoreContext context) {
297         datastoreContext = context;
298
299         commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
300
301         setTransactionCommitTimeout();
302
303         if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
304             setPersistence(true);
305         } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
306             setPersistence(false);
307         }
308
309         updateConfigParams(datastoreContext.getShardRaftConfig());
310     }
311
312     private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
313         return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
314     }
315
316     void continueCommit(final CohortEntry cohortEntry) {
317         final DataTreeCandidate candidate = cohortEntry.getCandidate();
318
319         // If we do not have any followers and we are not using persistence
320         // or if cohortEntry has no modifications
321         // we can apply modification to the state immediately
322         if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
323             applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
324         } else {
325             Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
326                     DataTreeCandidatePayload.create(candidate));
327         }
328     }
329
330     private void handleCommitTransaction(final CommitTransaction commit) {
331         if (isLeader()) {
332             if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
333                 shardMBean.incrementFailedTransactionsCount();
334             }
335         } else {
336             ActorSelection leader = getLeader();
337             if (leader == null) {
338                 messageRetrySupport.addMessageToRetry(commit, getSender(),
339                         "Could not commit transaction " + commit.getTransactionID());
340             } else {
341                 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
342                 leader.forward(commit, getContext());
343             }
344         }
345     }
346
347     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
348         LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
349
350         try {
351             try {
352                 cohortEntry.commit();
353             } catch(ExecutionException e) {
354                 // We may get a "store tree and candidate base differ" IllegalStateException from commit under
355                 // certain edge case scenarios so we'll try to re-apply the candidate from scratch as a last
356                 // resort. Eg, we're a follower and a tx payload is replicated but the leader goes down before
357                 // applying it to the state. We then become the leader and a second tx is pre-committed and
358                 // replicated. When consensus occurs, this will cause the first tx to be applied as a foreign
359                 // candidate via applyState prior to the second tx. Since the second tx has already been
360                 // pre-committed, when it gets here to commit it will get an IllegalStateException.
361
362                 // FIXME - this is not an ideal way to handle this scenario. This is temporary - a cleaner
363                 // solution will be forthcoming.
364                 if(e.getCause() instanceof IllegalStateException) {
365                     LOG.debug("{}: commit failed for transaction {} - retrying as foreign candidate", persistenceId(),
366                             transactionID, e);
367                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
368                 } else {
369                     throw e;
370                 }
371             }
372
373             sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
374
375             shardMBean.incrementCommittedTransactionCount();
376             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
377
378         } catch (Exception e) {
379             sender.tell(new akka.actor.Status.Failure(e), getSelf());
380
381             LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
382                     transactionID, e);
383             shardMBean.incrementFailedTransactionsCount();
384         } finally {
385             commitCoordinator.currentTransactionComplete(transactionID, true);
386         }
387     }
388
389     private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
390         // With persistence enabled, this method is called via applyState by the leader strategy
391         // after the commit has been replicated to a majority of the followers.
392
393         CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
394         if (cohortEntry == null) {
395             // The transaction is no longer the current commit. This can happen if the transaction
396             // was aborted prior, most likely due to timeout in the front-end. We need to finish
397             // committing the transaction though since it was successfully persisted and replicated
398             // however we can't use the original cohort b/c it was already preCommitted and may
399             // conflict with the current commit or may have been aborted so we commit with a new
400             // transaction.
401             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
402             if(cohortEntry != null) {
403                 try {
404                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
405                 } catch (DataValidationFailedException e) {
406                     shardMBean.incrementFailedTransactionsCount();
407                     LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
408                 }
409
410                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
411                         getSelf());
412             } else {
413                 // This really shouldn't happen - it likely means that persistence or replication
414                 // took so long to complete such that the cohort entry was expired from the cache.
415                 IllegalStateException ex = new IllegalStateException(
416                         String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
417                                 persistenceId(), transactionID));
418                 LOG.error(ex.getMessage());
419                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
420             }
421         } else {
422             finishCommit(sender, transactionID, cohortEntry);
423         }
424     }
425
426     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
427         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
428
429         if (isLeader()) {
430             commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
431         } else {
432             ActorSelection leader = getLeader();
433             if (leader == null) {
434                 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
435                         "Could not canCommit transaction " + canCommit.getTransactionID());
436             } else {
437                 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
438                 leader.forward(canCommit, getContext());
439             }
440         }
441     }
442
443     protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
444         try {
445             commitCoordinator.handleBatchedModifications(batched, sender, this);
446         } catch (Exception e) {
447             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
448                     batched.getTransactionID(), e);
449             sender.tell(new akka.actor.Status.Failure(e), getSelf());
450         }
451     }
452
453     private void handleBatchedModifications(BatchedModifications batched) {
454         // This message is sent to prepare the modifications transaction directly on the Shard as an
455         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
456         // BatchedModifications message, the caller sets the ready flag in the message indicating
457         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
458         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
459         // ReadyTransaction message.
460
461         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
462         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
463         // the primary/leader shard. However with timing and caching on the front-end, there's a small
464         // window where it could have a stale leader during leadership transitions.
465         //
466         boolean isLeaderActive = isLeaderActive();
467         if (isLeader() && isLeaderActive) {
468             handleBatchedModificationsLocal(batched, getSender());
469         } else {
470             ActorSelection leader = getLeader();
471             if (!isLeaderActive || leader == null) {
472                 messageRetrySupport.addMessageToRetry(batched, getSender(),
473                         "Could not commit transaction " + batched.getTransactionID());
474             } else {
475                 // If this is not the first batch and leadership changed in between batched messages,
476                 // we need to reconstruct previous BatchedModifications from the transaction
477                 // DataTreeModification, honoring the max batched modification count, and forward all the
478                 // previous BatchedModifications to the new leader.
479                 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
480                         batched, datastoreContext.getShardBatchedModificationCount());
481
482                 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
483                         newModifications.size(), leader);
484
485                 for(BatchedModifications bm: newModifications) {
486                     leader.forward(bm, getContext());
487                 }
488             }
489         }
490     }
491
492     private boolean failIfIsolatedLeader(ActorRef sender) {
493         if(isIsolatedLeader()) {
494             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
495                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
496                     " other follower nodes are down or this node is isolated by a network partition.",
497                     persistenceId()))), getSelf());
498             return true;
499         }
500
501         return false;
502     }
503
504     protected boolean isIsolatedLeader() {
505         return getRaftState() == RaftState.IsolatedLeader;
506     }
507
508     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
509         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
510
511         boolean isLeaderActive = isLeaderActive();
512         if (isLeader() && isLeaderActive) {
513             try {
514                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
515             } catch (Exception e) {
516                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
517                         message.getTransactionID(), e);
518                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
519             }
520         } else {
521             ActorSelection leader = getLeader();
522             if (!isLeaderActive || leader == null) {
523                 messageRetrySupport.addMessageToRetry(message, getSender(),
524                         "Could not commit transaction " + message.getTransactionID());
525             } else {
526                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
527                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
528                 leader.forward(message, getContext());
529             }
530         }
531     }
532
533     private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
534         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
535
536         boolean isLeaderActive = isLeaderActive();
537         if (isLeader() && isLeaderActive) {
538             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
539         } else {
540             ActorSelection leader = getLeader();
541             if (!isLeaderActive || leader == null) {
542                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
543                         "Could not commit transaction " + forwardedReady.getTransactionID());
544             } else {
545                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
546
547                 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
548                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
549                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
550                 leader.forward(readyLocal, getContext());
551             }
552         }
553     }
554
555     private void handleAbortTransaction(final AbortTransaction abort) {
556         doAbortTransaction(abort.getTransactionID(), getSender());
557     }
558
559     void doAbortTransaction(final String transactionID, final ActorRef sender) {
560         commitCoordinator.handleAbort(transactionID, sender, this);
561     }
562
563     private void handleCreateTransaction(final Object message) {
564         if (isLeader()) {
565             createTransaction(CreateTransaction.fromSerializable(message));
566         } else if (getLeader() != null) {
567             getLeader().forward(message, getContext());
568         } else {
569             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
570                     "Could not create a shard transaction", persistenceId())), getSelf());
571         }
572     }
573
574     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
575         store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
576     }
577
578     private ActorRef createTypedTransactionActor(int transactionType,
579             ShardTransactionIdentifier transactionId, String transactionChainId) {
580
581         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
582                 transactionId, transactionChainId);
583     }
584
585     private void createTransaction(CreateTransaction createTransaction) {
586         try {
587             if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
588                     failIfIsolatedLeader(getSender())) {
589                 return;
590             }
591
592             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
593                 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
594
595             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
596                     createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
597         } catch (Exception e) {
598             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
599         }
600     }
601
602     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
603             String transactionChainId) {
604
605
606         ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
607
608         if(LOG.isDebugEnabled()) {
609             LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
610         }
611
612         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
613                 transactionChainId);
614
615         return transactionActor;
616     }
617
618     private void commitWithNewTransaction(final Modification modification) {
619         ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
620         modification.apply(tx.getSnapshot());
621         try {
622             snapshotCohort.syncCommitTransaction(tx);
623             shardMBean.incrementCommittedTransactionCount();
624             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
625         } catch (Exception e) {
626             shardMBean.incrementFailedTransactionsCount();
627             LOG.error("{}: Failed to commit", persistenceId(), e);
628         }
629     }
630
631     private void updateSchemaContext(final UpdateSchemaContext message) {
632         updateSchemaContext(message.getSchemaContext());
633     }
634
635     @VisibleForTesting
636     void updateSchemaContext(final SchemaContext schemaContext) {
637         store.updateSchemaContext(schemaContext);
638     }
639
640     private boolean isMetricsCaptureEnabled() {
641         CommonConfig config = new CommonConfig(getContext().system().settings().config());
642         return config.isMetricCaptureEnabled();
643     }
644
645     @Override
646     @VisibleForTesting
647     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
648         return snapshotCohort;
649     }
650
651     @Override
652     @Nonnull
653     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
654         return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
655                 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
656     }
657
658     @Override
659     protected void onRecoveryComplete() {
660         restoreFromSnapshot = null;
661
662         //notify shard manager
663         getContext().parent().tell(new ActorInitialized(), getSelf());
664
665         // Being paranoid here - this method should only be called once but just in case...
666         if(txCommitTimeoutCheckSchedule == null) {
667             // Schedule a message to be periodically sent to check if the current in-progress
668             // transaction should be expired and aborted.
669             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
670             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
671                     period, period, getSelf(),
672                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
673         }
674     }
675
676     @Override
677     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
678         if (data instanceof DataTreeCandidatePayload) {
679             if (clientActor == null) {
680                 // No clientActor indicates a replica coming from the leader
681                 try {
682                     store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
683                 } catch (DataValidationFailedException | IOException e) {
684                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
685                 }
686             } else {
687                 // Replication consensus reached, proceed to commit
688                 finishCommit(clientActor, identifier);
689             }
690         } else if (data instanceof CompositeModificationPayload) {
691             Object modification = ((CompositeModificationPayload) data).getModification();
692
693             applyModificationToState(clientActor, identifier, modification);
694         } else if(data instanceof CompositeModificationByteStringPayload ){
695             Object modification = ((CompositeModificationByteStringPayload) data).getModification();
696
697             applyModificationToState(clientActor, identifier, modification);
698         } else {
699             LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
700                     persistenceId(), data, data.getClass().getClassLoader(),
701                     CompositeModificationPayload.class.getClassLoader());
702         }
703     }
704
705     private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
706         if(modification == null) {
707             LOG.error(
708                     "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
709                     persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
710         } else if(clientActor == null) {
711             // There's no clientActor to which to send a commit reply so we must be applying
712             // replicated state from the leader.
713             commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
714         } else {
715             // This must be the OK to commit after replication consensus.
716             finishCommit(clientActor, identifier);
717         }
718     }
719
720     @Override
721     protected void onStateChanged() {
722         boolean isLeader = isLeader();
723         boolean hasLeader = hasLeader();
724         changeSupport.onLeadershipChange(isLeader, hasLeader);
725         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
726
727         // If this actor is no longer the leader close all the transaction chains
728         if (!isLeader) {
729             if(LOG.isDebugEnabled()) {
730                 LOG.debug(
731                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
732                     persistenceId(), getId());
733             }
734
735             store.closeAllTransactionChains();
736         }
737
738         if(hasLeader && !isIsolatedLeader()) {
739             messageRetrySupport.retryMessages();
740         }
741     }
742
743     @Override
744     protected void onLeaderChanged(String oldLeader, String newLeader) {
745         shardMBean.incrementLeadershipChangeCount();
746
747         boolean hasLeader = hasLeader();
748         if(hasLeader && !isLeader()) {
749             // Another leader was elected. If we were the previous leader and had pending transactions, convert
750             // them to transaction messages and send to the new leader.
751             ActorSelection leader = getLeader();
752             if(leader != null) {
753                 Collection<Object> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
754                         datastoreContext.getShardBatchedModificationCount());
755
756                 if(!messagesToForward.isEmpty()) {
757                     LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
758                             messagesToForward.size(), leader);
759
760                     for(Object message: messagesToForward) {
761                         leader.tell(message, self());
762                     }
763                 }
764             } else {
765                 commitCoordinator.abortPendingTransactions(
766                         "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
767                         this);
768             }
769         }
770
771         if(hasLeader && !isIsolatedLeader()) {
772             messageRetrySupport.retryMessages();
773         }
774     }
775
776     @Override
777     protected void pauseLeader(Runnable operation) {
778         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
779         commitCoordinator.setRunOnPendingTransactionsComplete(operation);
780     }
781
782     @Override
783     public String persistenceId() {
784         return this.name;
785     }
786
787     @VisibleForTesting
788     ShardCommitCoordinator getCommitCoordinator() {
789         return commitCoordinator;
790     }
791
792     public DatastoreContext getDatastoreContext() {
793         return datastoreContext;
794     }
795
796     @VisibleForTesting
797     public ShardDataTree getDataStore() {
798         return store;
799     }
800
801     @VisibleForTesting
802     ShardStats getShardMBean() {
803         return shardMBean;
804     }
805
806     public static Builder builder() {
807         return new Builder();
808     }
809
810     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
811         private final Class<S> shardClass;
812         private ShardIdentifier id;
813         private Map<String, String> peerAddresses = Collections.emptyMap();
814         private DatastoreContext datastoreContext;
815         private SchemaContext schemaContext;
816         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
817         private volatile boolean sealed;
818
819         protected AbstractBuilder(Class<S> shardClass) {
820             this.shardClass = shardClass;
821         }
822
823         protected void checkSealed() {
824             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
825         }
826
827         @SuppressWarnings("unchecked")
828         private T self() {
829             return (T) this;
830         }
831
832         public T id(ShardIdentifier id) {
833             checkSealed();
834             this.id = id;
835             return self();
836         }
837
838         public T peerAddresses(Map<String, String> peerAddresses) {
839             checkSealed();
840             this.peerAddresses = peerAddresses;
841             return self();
842         }
843
844         public T datastoreContext(DatastoreContext datastoreContext) {
845             checkSealed();
846             this.datastoreContext = datastoreContext;
847             return self();
848         }
849
850         public T schemaContext(SchemaContext schemaContext) {
851             checkSealed();
852             this.schemaContext = schemaContext;
853             return self();
854         }
855
856         public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
857             checkSealed();
858             this.restoreFromSnapshot = restoreFromSnapshot;
859             return self();
860         }
861
862         public ShardIdentifier getId() {
863             return id;
864         }
865
866         public Map<String, String> getPeerAddresses() {
867             return peerAddresses;
868         }
869
870         public DatastoreContext getDatastoreContext() {
871             return datastoreContext;
872         }
873
874         public SchemaContext getSchemaContext() {
875             return schemaContext;
876         }
877
878         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
879             return restoreFromSnapshot;
880         }
881
882         public TreeType getTreeType() {
883             switch (datastoreContext.getLogicalStoreType()) {
884             case CONFIGURATION:
885                 return TreeType.CONFIGURATION;
886             case OPERATIONAL:
887                 return TreeType.OPERATIONAL;
888             }
889
890             throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
891         }
892
893         protected void verify() {
894             Preconditions.checkNotNull(id, "id should not be null");
895             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
896             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
897             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
898         }
899
900         public Props props() {
901             sealed = true;
902             verify();
903             return Props.create(shardClass, this);
904         }
905     }
906
907     public static class Builder extends AbstractBuilder<Builder, Shard> {
908         private Builder() {
909             super(Shard.class);
910         }
911     }
912 }