138b71c10e111969c59777eedffce7c384f5afdd
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collection;
21 import java.util.Collections;
22 import java.util.Map;
23 import java.util.concurrent.TimeUnit;
24 import javax.annotation.Nonnull;
25 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
26 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
27 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
28 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
30 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
32 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
33 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
34 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
35 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
36 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
43 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
44 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
46 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
47 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
49 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
50 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
51 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
52 import org.opendaylight.controller.cluster.datastore.modification.Modification;
53 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
54 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
55 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
56 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
57 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
58 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
59 import org.opendaylight.controller.cluster.raft.RaftActor;
60 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
61 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
62 import org.opendaylight.controller.cluster.raft.RaftState;
63 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
64 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
65 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
72 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
73 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
74 import scala.concurrent.duration.Duration;
75 import scala.concurrent.duration.FiniteDuration;
76
77 /**
78  * A Shard represents a portion of the logical data tree <br/>
79  * <p>
80  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
81  * </p>
82  */
83 public class Shard extends RaftActor {
84
85     protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
86
87     @VisibleForTesting
88     static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
89
90     @VisibleForTesting
91     static final String DEFAULT_NAME = "default";
92
93     // The state of this Shard
94     private final ShardDataTree store;
95
96     /// The name of this shard
97     private final String name;
98
99     private final ShardStats shardMBean;
100
101     private DatastoreContext datastoreContext;
102
103     private final ShardCommitCoordinator commitCoordinator;
104
105     private long transactionCommitTimeout;
106
107     private Cancellable txCommitTimeoutCheckSchedule;
108
109     private final Optional<ActorRef> roleChangeNotifier;
110
111     private final MessageTracker appendEntriesReplyTracker;
112
113     private final ShardTransactionActorFactory transactionActorFactory;
114
115     private final ShardSnapshotCohort snapshotCohort;
116
117     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
118     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
119
120
121     private ShardSnapshot restoreFromSnapshot;
122
123     private final ShardTransactionMessageRetrySupport messageRetrySupport;
124
125     protected Shard(AbstractBuilder<?, ?> builder) {
126         super(builder.getId().toString(), builder.getPeerAddresses(),
127                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
128
129         this.name = builder.getId().toString();
130         this.datastoreContext = builder.getDatastoreContext();
131         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
132
133         setPersistence(datastoreContext.isPersistent());
134
135         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
136
137         store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType(),
138                 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher"),
139                 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher"), name);
140
141         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
142                 datastoreContext.getDataStoreMXBeanType());
143         shardMBean.setShard(this);
144
145         if (isMetricsCaptureEnabled()) {
146             getContext().become(new MeteringBehavior(this));
147         }
148
149         commitCoordinator = new ShardCommitCoordinator(store,
150                 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
151                 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
152
153         setTransactionCommitTimeout();
154
155         // create a notifier actor for each cluster member
156         roleChangeNotifier = createRoleChangeNotifier(name.toString());
157
158         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
159                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
160
161         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
162                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
163                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
164
165         snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
166
167         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
168     }
169
170     private void setTransactionCommitTimeout() {
171         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
172                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
173     }
174
175     private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
176         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
177             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
178         return Optional.of(shardRoleChangeNotifier);
179     }
180
181     @Override
182     public void postStop() {
183         LOG.info("Stopping Shard {}", persistenceId());
184
185         super.postStop();
186
187         messageRetrySupport.close();
188
189         if(txCommitTimeoutCheckSchedule != null) {
190             txCommitTimeoutCheckSchedule.cancel();
191         }
192
193         commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
194
195         shardMBean.unregisterMBean();
196     }
197
198     @Override
199     public void onReceiveRecover(final Object message) throws Exception {
200         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
201             getSender());
202
203         super.onReceiveRecover(message);
204         if (LOG.isTraceEnabled()) {
205             appendEntriesReplyTracker.begin();
206         }
207     }
208
209     @Override
210     public void onReceiveCommand(final Object message) throws Exception {
211
212         MessageTracker.Context context = appendEntriesReplyTracker.received(message);
213
214         if(context.error().isPresent()){
215             LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
216                 context.error());
217         }
218
219         try {
220             if (CreateTransaction.isSerializedType(message)) {
221                 handleCreateTransaction(message);
222             } else if (BatchedModifications.class.isInstance(message)) {
223                 handleBatchedModifications((BatchedModifications)message);
224             } else if (message instanceof ForwardedReadyTransaction) {
225                 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
226             } else if (message instanceof ReadyLocalTransaction) {
227                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
228             } else if (CanCommitTransaction.isSerializedType(message)) {
229                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
230             } else if (CommitTransaction.isSerializedType(message)) {
231                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
232             } else if (AbortTransaction.isSerializedType(message)) {
233                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
234             } else if (CloseTransactionChain.isSerializedType(message)) {
235                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
236             } else if (message instanceof RegisterChangeListener) {
237                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
238             } else if (message instanceof RegisterDataTreeChangeListener) {
239                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
240             } else if (message instanceof UpdateSchemaContext) {
241                 updateSchemaContext((UpdateSchemaContext) message);
242             } else if (message instanceof PeerAddressResolved) {
243                 PeerAddressResolved resolved = (PeerAddressResolved) message;
244                 setPeerAddress(resolved.getPeerId().toString(),
245                         resolved.getPeerAddress());
246             } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
247                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
248             } else if(message instanceof DatastoreContext) {
249                 onDatastoreContext((DatastoreContext)message);
250             } else if(message instanceof RegisterRoleChangeListener){
251                 roleChangeNotifier.get().forward(message, context());
252             } else if (message instanceof FollowerInitialSyncUpStatus) {
253                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
254                 context().parent().tell(message, self());
255             } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
256                 sender().tell(getShardMBean(), self());
257             } else if(message instanceof GetShardDataTree) {
258                 sender().tell(store.getDataTree(), self());
259             } else if(message instanceof ServerRemoved){
260                 context().parent().forward(message, context());
261             } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
262                 messageRetrySupport.onTimerMessage(message);
263             } else {
264                 super.onReceiveCommand(message);
265             }
266         } finally {
267             context.done();
268         }
269     }
270
271     private boolean hasLeader() {
272         return getLeaderId() != null;
273     }
274
275     public int getPendingTxCommitQueueSize() {
276         return commitCoordinator.getQueueSize();
277     }
278
279     public int getCohortCacheSize() {
280         return commitCoordinator.getCohortCacheSize();
281     }
282
283     @Override
284     protected Optional<ActorRef> getRoleChangeNotifier() {
285         return roleChangeNotifier;
286     }
287
288     @Override
289     protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
290         return new ShardLeaderStateChanged(memberId, leaderId,
291                 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
292                 leaderPayloadVersion);
293     }
294
295     protected void onDatastoreContext(DatastoreContext context) {
296         datastoreContext = context;
297
298         commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
299
300         setTransactionCommitTimeout();
301
302         if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
303             setPersistence(true);
304         } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
305             setPersistence(false);
306         }
307
308         updateConfigParams(datastoreContext.getShardRaftConfig());
309     }
310
311     private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
312         return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
313     }
314
315     void continueCommit(final CohortEntry cohortEntry) {
316         final DataTreeCandidate candidate = cohortEntry.getCandidate();
317
318         // If we do not have any followers and we are not using persistence
319         // or if cohortEntry has no modifications
320         // we can apply modification to the state immediately
321         if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
322             applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
323         } else {
324             Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
325                     DataTreeCandidatePayload.create(candidate));
326         }
327     }
328
329     private void handleCommitTransaction(final CommitTransaction commit) {
330         if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
331             shardMBean.incrementFailedTransactionsCount();
332         }
333     }
334
335     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
336         LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
337
338         try {
339             cohortEntry.commit();
340
341             sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
342
343             shardMBean.incrementCommittedTransactionCount();
344             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
345
346         } catch (Exception e) {
347             sender.tell(new akka.actor.Status.Failure(e), getSelf());
348
349             LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
350                     transactionID, e);
351             shardMBean.incrementFailedTransactionsCount();
352         } finally {
353             commitCoordinator.currentTransactionComplete(transactionID, true);
354         }
355     }
356
357     private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
358         // With persistence enabled, this method is called via applyState by the leader strategy
359         // after the commit has been replicated to a majority of the followers.
360
361         CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
362         if (cohortEntry == null) {
363             // The transaction is no longer the current commit. This can happen if the transaction
364             // was aborted prior, most likely due to timeout in the front-end. We need to finish
365             // committing the transaction though since it was successfully persisted and replicated
366             // however we can't use the original cohort b/c it was already preCommitted and may
367             // conflict with the current commit or may have been aborted so we commit with a new
368             // transaction.
369             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
370             if(cohortEntry != null) {
371                 try {
372                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
373                 } catch (DataValidationFailedException e) {
374                     shardMBean.incrementFailedTransactionsCount();
375                     LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
376                 }
377
378                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
379                         getSelf());
380             } else {
381                 // This really shouldn't happen - it likely means that persistence or replication
382                 // took so long to complete such that the cohort entry was expired from the cache.
383                 IllegalStateException ex = new IllegalStateException(
384                         String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
385                                 persistenceId(), transactionID));
386                 LOG.error(ex.getMessage());
387                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
388             }
389         } else {
390             finishCommit(sender, transactionID, cohortEntry);
391         }
392     }
393
394     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
395         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
396         commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
397     }
398
399     protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
400         try {
401             commitCoordinator.handleBatchedModifications(batched, sender, this);
402         } catch (Exception e) {
403             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
404                     batched.getTransactionID(), e);
405             sender.tell(new akka.actor.Status.Failure(e), getSelf());
406         }
407     }
408
409     private void handleBatchedModifications(BatchedModifications batched) {
410         // This message is sent to prepare the modifications transaction directly on the Shard as an
411         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
412         // BatchedModifications message, the caller sets the ready flag in the message indicating
413         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
414         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
415         // ReadyTransaction message.
416
417         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
418         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
419         // the primary/leader shard. However with timing and caching on the front-end, there's a small
420         // window where it could have a stale leader during leadership transitions.
421         //
422         boolean isLeaderActive = isLeaderActive();
423         if (isLeader() && isLeaderActive) {
424             handleBatchedModificationsLocal(batched, getSender());
425         } else {
426             ActorSelection leader = getLeader();
427             if (!isLeaderActive || leader == null) {
428                 messageRetrySupport.addMessageToRetry(batched, getSender(),
429                         "Could not commit transaction " + batched.getTransactionID());
430             } else {
431                 // If this is not the first batch and leadership changed in between batched messages,
432                 // we need to reconstruct previous BatchedModifications from the transaction
433                 // DataTreeModification, honoring the max batched modification count, and forward all the
434                 // previous BatchedModifications to the new leader.
435                 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
436                         batched, datastoreContext.getShardBatchedModificationCount());
437
438                 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
439                         newModifications.size(), leader);
440
441                 for(BatchedModifications bm: newModifications) {
442                     leader.forward(bm, getContext());
443                 }
444             }
445         }
446     }
447
448     private boolean failIfIsolatedLeader(ActorRef sender) {
449         if(isIsolatedLeader()) {
450             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
451                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
452                     " other follower nodes are down or this node is isolated by a network partition.",
453                     persistenceId()))), getSelf());
454             return true;
455         }
456
457         return false;
458     }
459
460     protected boolean isIsolatedLeader() {
461         return getRaftState() == RaftState.IsolatedLeader;
462     }
463
464     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
465         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
466
467         boolean isLeaderActive = isLeaderActive();
468         if (isLeader() && isLeaderActive) {
469             try {
470                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
471             } catch (Exception e) {
472                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
473                         message.getTransactionID(), e);
474                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
475             }
476         } else {
477             ActorSelection leader = getLeader();
478             if (!isLeaderActive || leader == null) {
479                 messageRetrySupport.addMessageToRetry(message, getSender(),
480                         "Could not commit transaction " + message.getTransactionID());
481             } else {
482                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
483                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
484                 leader.forward(message, getContext());
485             }
486         }
487     }
488
489     private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
490         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
491
492         boolean isLeaderActive = isLeaderActive();
493         if (isLeader() && isLeaderActive) {
494             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
495         } else {
496             ActorSelection leader = getLeader();
497             if (!isLeaderActive || leader == null) {
498                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
499                         "Could not commit transaction " + forwardedReady.getTransactionID());
500             } else {
501                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
502
503                 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
504                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
505                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
506                 leader.forward(readyLocal, getContext());
507             }
508         }
509     }
510
511     private void handleAbortTransaction(final AbortTransaction abort) {
512         doAbortTransaction(abort.getTransactionID(), getSender());
513     }
514
515     void doAbortTransaction(final String transactionID, final ActorRef sender) {
516         commitCoordinator.handleAbort(transactionID, sender, this);
517     }
518
519     private void handleCreateTransaction(final Object message) {
520         if (isLeader()) {
521             createTransaction(CreateTransaction.fromSerializable(message));
522         } else if (getLeader() != null) {
523             getLeader().forward(message, getContext());
524         } else {
525             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
526                     "Could not create a shard transaction", persistenceId())), getSelf());
527         }
528     }
529
530     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
531         store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
532     }
533
534     private ActorRef createTypedTransactionActor(int transactionType,
535             ShardTransactionIdentifier transactionId, String transactionChainId) {
536
537         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
538                 transactionId, transactionChainId);
539     }
540
541     private void createTransaction(CreateTransaction createTransaction) {
542         try {
543             if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
544                     failIfIsolatedLeader(getSender())) {
545                 return;
546             }
547
548             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
549                 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
550
551             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
552                     createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
553         } catch (Exception e) {
554             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
555         }
556     }
557
558     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
559             String transactionChainId) {
560
561
562         ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
563
564         if(LOG.isDebugEnabled()) {
565             LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
566         }
567
568         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
569                 transactionChainId);
570
571         return transactionActor;
572     }
573
574     private void commitWithNewTransaction(final Modification modification) {
575         ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
576         modification.apply(tx.getSnapshot());
577         try {
578             snapshotCohort.syncCommitTransaction(tx);
579             shardMBean.incrementCommittedTransactionCount();
580             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
581         } catch (Exception e) {
582             shardMBean.incrementFailedTransactionsCount();
583             LOG.error("{}: Failed to commit", persistenceId(), e);
584         }
585     }
586
587     private void updateSchemaContext(final UpdateSchemaContext message) {
588         updateSchemaContext(message.getSchemaContext());
589     }
590
591     @VisibleForTesting
592     void updateSchemaContext(final SchemaContext schemaContext) {
593         store.updateSchemaContext(schemaContext);
594     }
595
596     private boolean isMetricsCaptureEnabled() {
597         CommonConfig config = new CommonConfig(getContext().system().settings().config());
598         return config.isMetricCaptureEnabled();
599     }
600
601     @Override
602     @VisibleForTesting
603     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
604         return snapshotCohort;
605     }
606
607     @Override
608     @Nonnull
609     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
610         return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
611                 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
612     }
613
614     @Override
615     protected void onRecoveryComplete() {
616         restoreFromSnapshot = null;
617
618         //notify shard manager
619         getContext().parent().tell(new ActorInitialized(), getSelf());
620
621         // Being paranoid here - this method should only be called once but just in case...
622         if(txCommitTimeoutCheckSchedule == null) {
623             // Schedule a message to be periodically sent to check if the current in-progress
624             // transaction should be expired and aborted.
625             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
626             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
627                     period, period, getSelf(),
628                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
629         }
630     }
631
632     @Override
633     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
634         if (data instanceof DataTreeCandidatePayload) {
635             if (clientActor == null) {
636                 // No clientActor indicates a replica coming from the leader
637                 try {
638                     store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
639                 } catch (DataValidationFailedException | IOException e) {
640                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
641                 }
642             } else {
643                 // Replication consensus reached, proceed to commit
644                 finishCommit(clientActor, identifier);
645             }
646         } else if (data instanceof CompositeModificationPayload) {
647             Object modification = ((CompositeModificationPayload) data).getModification();
648
649             applyModificationToState(clientActor, identifier, modification);
650         } else if(data instanceof CompositeModificationByteStringPayload ){
651             Object modification = ((CompositeModificationByteStringPayload) data).getModification();
652
653             applyModificationToState(clientActor, identifier, modification);
654         } else {
655             LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
656                     persistenceId(), data, data.getClass().getClassLoader(),
657                     CompositeModificationPayload.class.getClassLoader());
658         }
659     }
660
661     private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
662         if(modification == null) {
663             LOG.error(
664                     "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
665                     persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
666         } else if(clientActor == null) {
667             // There's no clientActor to which to send a commit reply so we must be applying
668             // replicated state from the leader.
669             commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
670         } else {
671             // This must be the OK to commit after replication consensus.
672             finishCommit(clientActor, identifier);
673         }
674     }
675
676     @Override
677     protected void onStateChanged() {
678         boolean isLeader = isLeader();
679         boolean hasLeader = hasLeader();
680         changeSupport.onLeadershipChange(isLeader, hasLeader);
681         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
682
683         // If this actor is no longer the leader close all the transaction chains
684         if (!isLeader) {
685             if(LOG.isDebugEnabled()) {
686                 LOG.debug(
687                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
688                     persistenceId(), getId());
689             }
690
691             store.closeAllTransactionChains();
692
693             commitCoordinator.abortPendingTransactions(
694                     "The transacton was aborted due to inflight leadership change.", this);
695         }
696
697         if(hasLeader && !isIsolatedLeader()) {
698             messageRetrySupport.retryMessages();
699         }
700     }
701
702     @Override
703     protected void onLeaderChanged(String oldLeader, String newLeader) {
704         shardMBean.incrementLeadershipChangeCount();
705
706         if(hasLeader() && !isIsolatedLeader()) {
707             messageRetrySupport.retryMessages();
708         }
709     }
710
711     @Override
712     protected void pauseLeader(Runnable operation) {
713         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
714         commitCoordinator.setRunOnPendingTransactionsComplete(operation);
715     }
716
717     @Override
718     public String persistenceId() {
719         return this.name;
720     }
721
722     @VisibleForTesting
723     ShardCommitCoordinator getCommitCoordinator() {
724         return commitCoordinator;
725     }
726
727     public DatastoreContext getDatastoreContext() {
728         return datastoreContext;
729     }
730
731     @VisibleForTesting
732     public ShardDataTree getDataStore() {
733         return store;
734     }
735
736     @VisibleForTesting
737     ShardStats getShardMBean() {
738         return shardMBean;
739     }
740
741     public static Builder builder() {
742         return new Builder();
743     }
744
745     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
746         private final Class<S> shardClass;
747         private ShardIdentifier id;
748         private Map<String, String> peerAddresses = Collections.emptyMap();
749         private DatastoreContext datastoreContext;
750         private SchemaContext schemaContext;
751         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
752         private volatile boolean sealed;
753
754         protected AbstractBuilder(Class<S> shardClass) {
755             this.shardClass = shardClass;
756         }
757
758         protected void checkSealed() {
759             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
760         }
761
762         @SuppressWarnings("unchecked")
763         private T self() {
764             return (T) this;
765         }
766
767         public T id(ShardIdentifier id) {
768             checkSealed();
769             this.id = id;
770             return self();
771         }
772
773         public T peerAddresses(Map<String, String> peerAddresses) {
774             checkSealed();
775             this.peerAddresses = peerAddresses;
776             return self();
777         }
778
779         public T datastoreContext(DatastoreContext datastoreContext) {
780             checkSealed();
781             this.datastoreContext = datastoreContext;
782             return self();
783         }
784
785         public T schemaContext(SchemaContext schemaContext) {
786             checkSealed();
787             this.schemaContext = schemaContext;
788             return self();
789         }
790
791         public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
792             checkSealed();
793             this.restoreFromSnapshot = restoreFromSnapshot;
794             return self();
795         }
796
797         public ShardIdentifier getId() {
798             return id;
799         }
800
801         public Map<String, String> getPeerAddresses() {
802             return peerAddresses;
803         }
804
805         public DatastoreContext getDatastoreContext() {
806             return datastoreContext;
807         }
808
809         public SchemaContext getSchemaContext() {
810             return schemaContext;
811         }
812
813         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
814             return restoreFromSnapshot;
815         }
816
817         public TreeType getTreeType() {
818             switch (datastoreContext.getLogicalStoreType()) {
819             case CONFIGURATION:
820                 return TreeType.CONFIGURATION;
821             case OPERATIONAL:
822                 return TreeType.OPERATIONAL;
823             }
824
825             throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
826         }
827
828         protected void verify() {
829             Preconditions.checkNotNull(id, "id should not be null");
830             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
831             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
832             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
833         }
834
835         public Props props() {
836             sealed = true;
837             verify();
838             return Props.create(shardClass, this);
839         }
840     }
841
842     public static class Builder extends AbstractBuilder<Builder, Shard> {
843         private Builder() {
844             super(Shard.class);
845         }
846     }
847 }