BUG-4964: Bump to akka-2.4.1
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import java.io.IOException;
20 import java.util.Collections;
21 import java.util.Map;
22 import java.util.concurrent.TimeUnit;
23 import javax.annotation.Nonnull;
24 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
25 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
26 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
27 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
28 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
30 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
32 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
33 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
34 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
35 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
37 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
39 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
41 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
42 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
43 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
44 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
45 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
46 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
47 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
48 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
49 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
50 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
51 import org.opendaylight.controller.cluster.datastore.modification.Modification;
52 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
53 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
54 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
55 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
56 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
57 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
58 import org.opendaylight.controller.cluster.raft.RaftActor;
59 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
60 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
61 import org.opendaylight.controller.cluster.raft.RaftState;
62 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
63 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
64 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
65 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
67 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
72 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
73 import scala.concurrent.duration.Duration;
74 import scala.concurrent.duration.FiniteDuration;
75
76 /**
77  * A Shard represents a portion of the logical data tree <br/>
78  * <p>
79  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
80  * </p>
81  */
82 public class Shard extends RaftActor {
83
84     protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
85
86     @VisibleForTesting
87     static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
88
89     @VisibleForTesting
90     static final String DEFAULT_NAME = "default";
91
92     // The state of this Shard
93     private final ShardDataTree store;
94
95     /// The name of this shard
96     private final String name;
97
98     private final ShardStats shardMBean;
99
100     private DatastoreContext datastoreContext;
101
102     private final ShardCommitCoordinator commitCoordinator;
103
104     private long transactionCommitTimeout;
105
106     private Cancellable txCommitTimeoutCheckSchedule;
107
108     private final Optional<ActorRef> roleChangeNotifier;
109
110     private final MessageTracker appendEntriesReplyTracker;
111
112     private final ShardTransactionActorFactory transactionActorFactory;
113
114     private final ShardSnapshotCohort snapshotCohort;
115
116     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
117     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
118
119
120     private ShardSnapshot restoreFromSnapshot;
121
122     private final ShardTransactionMessageRetrySupport messageRetrySupport;
123
124     protected Shard(AbstractBuilder<?, ?> builder) {
125         super(builder.getId().toString(), builder.getPeerAddresses(),
126                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
127
128         this.name = builder.getId().toString();
129         this.datastoreContext = builder.getDatastoreContext();
130         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
131
132         setPersistence(datastoreContext.isPersistent());
133
134         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
135
136         store = new ShardDataTree(builder.getSchemaContext(), builder.getTreeType());
137
138         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
139                 datastoreContext.getDataStoreMXBeanType());
140         shardMBean.setShard(this);
141
142         if (isMetricsCaptureEnabled()) {
143             getContext().become(new MeteringBehavior(this));
144         }
145
146         commitCoordinator = new ShardCommitCoordinator(store,
147                 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
148                 datastoreContext.getShardTransactionCommitQueueCapacity(), LOG, this.name);
149
150         setTransactionCommitTimeout();
151
152         // create a notifier actor for each cluster member
153         roleChangeNotifier = createRoleChangeNotifier(name.toString());
154
155         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
156                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
157
158         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
159                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
160                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
161
162         snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
163
164         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
165     }
166
167     private void setTransactionCommitTimeout() {
168         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
169                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
170     }
171
172     private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
173         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
174             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
175         return Optional.of(shardRoleChangeNotifier);
176     }
177
178     @Override
179     public void postStop() {
180         LOG.info("Stopping Shard {}", persistenceId());
181
182         super.postStop();
183
184         messageRetrySupport.close();
185
186         if(txCommitTimeoutCheckSchedule != null) {
187             txCommitTimeoutCheckSchedule.cancel();
188         }
189
190         commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
191
192         shardMBean.unregisterMBean();
193     }
194
195     @Override
196     public void onReceiveRecover(final Object message) throws Exception {
197         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
198             getSender());
199
200         super.onReceiveRecover(message);
201         if (LOG.isTraceEnabled()) {
202             appendEntriesReplyTracker.begin();
203         }
204     }
205
206     @Override
207     public void onReceiveCommand(final Object message) throws Exception {
208
209         MessageTracker.Context context = appendEntriesReplyTracker.received(message);
210
211         if(context.error().isPresent()){
212             LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
213                 context.error());
214         }
215
216         try {
217             if (CreateTransaction.isSerializedType(message)) {
218                 handleCreateTransaction(message);
219             } else if (BatchedModifications.class.isInstance(message)) {
220                 handleBatchedModifications((BatchedModifications)message);
221             } else if (message instanceof ForwardedReadyTransaction) {
222                 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
223             } else if (message instanceof ReadyLocalTransaction) {
224                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
225             } else if (CanCommitTransaction.isSerializedType(message)) {
226                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
227             } else if (CommitTransaction.isSerializedType(message)) {
228                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
229             } else if (AbortTransaction.isSerializedType(message)) {
230                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
231             } else if (CloseTransactionChain.isSerializedType(message)) {
232                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
233             } else if (message instanceof RegisterChangeListener) {
234                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
235             } else if (message instanceof RegisterDataTreeChangeListener) {
236                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
237             } else if (message instanceof UpdateSchemaContext) {
238                 updateSchemaContext((UpdateSchemaContext) message);
239             } else if (message instanceof PeerAddressResolved) {
240                 PeerAddressResolved resolved = (PeerAddressResolved) message;
241                 setPeerAddress(resolved.getPeerId().toString(),
242                         resolved.getPeerAddress());
243             } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
244                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
245             } else if(message instanceof DatastoreContext) {
246                 onDatastoreContext((DatastoreContext)message);
247             } else if(message instanceof RegisterRoleChangeListener){
248                 roleChangeNotifier.get().forward(message, context());
249             } else if (message instanceof FollowerInitialSyncUpStatus) {
250                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
251                 context().parent().tell(message, self());
252             } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
253                 sender().tell(getShardMBean(), self());
254             } else if(message instanceof GetShardDataTree) {
255                 sender().tell(store.getDataTree(), self());
256             } else if(message instanceof ServerRemoved){
257                 context().parent().forward(message, context());
258             } else if(ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
259                 messageRetrySupport.onTimerMessage(message);
260             } else {
261                 super.onReceiveCommand(message);
262             }
263         } finally {
264             context.done();
265         }
266     }
267
268     private boolean hasLeader() {
269         return getLeaderId() != null;
270     }
271
272     public int getPendingTxCommitQueueSize() {
273         return commitCoordinator.getQueueSize();
274     }
275
276     public int getCohortCacheSize() {
277         return commitCoordinator.getCohortCacheSize();
278     }
279
280     @Override
281     protected Optional<ActorRef> getRoleChangeNotifier() {
282         return roleChangeNotifier;
283     }
284
285     @Override
286     protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
287         return new ShardLeaderStateChanged(memberId, leaderId,
288                 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
289                 leaderPayloadVersion);
290     }
291
292     protected void onDatastoreContext(DatastoreContext context) {
293         datastoreContext = context;
294
295         commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
296
297         setTransactionCommitTimeout();
298
299         if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
300             setPersistence(true);
301         } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
302             setPersistence(false);
303         }
304
305         updateConfigParams(datastoreContext.getShardRaftConfig());
306     }
307
308     private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
309         return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
310     }
311
312     void continueCommit(final CohortEntry cohortEntry) {
313         final DataTreeCandidate candidate = cohortEntry.getCandidate();
314
315         // If we do not have any followers and we are not using persistence
316         // or if cohortEntry has no modifications
317         // we can apply modification to the state immediately
318         if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
319             applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
320         } else {
321             Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
322                     DataTreeCandidatePayload.create(candidate));
323         }
324     }
325
326     private void handleCommitTransaction(final CommitTransaction commit) {
327         if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
328             shardMBean.incrementFailedTransactionsCount();
329         }
330     }
331
332     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
333         LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
334
335         try {
336             cohortEntry.commit();
337
338             sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), getSelf());
339
340             shardMBean.incrementCommittedTransactionCount();
341             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
342
343         } catch (Exception e) {
344             sender.tell(new akka.actor.Status.Failure(e), getSelf());
345
346             LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
347                     transactionID, e);
348             shardMBean.incrementFailedTransactionsCount();
349         } finally {
350             commitCoordinator.currentTransactionComplete(transactionID, true);
351         }
352     }
353
354     private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
355         // With persistence enabled, this method is called via applyState by the leader strategy
356         // after the commit has been replicated to a majority of the followers.
357
358         CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
359         if (cohortEntry == null) {
360             // The transaction is no longer the current commit. This can happen if the transaction
361             // was aborted prior, most likely due to timeout in the front-end. We need to finish
362             // committing the transaction though since it was successfully persisted and replicated
363             // however we can't use the original cohort b/c it was already preCommitted and may
364             // conflict with the current commit or may have been aborted so we commit with a new
365             // transaction.
366             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
367             if(cohortEntry != null) {
368                 try {
369                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
370                 } catch (DataValidationFailedException e) {
371                     shardMBean.incrementFailedTransactionsCount();
372                     LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
373                 }
374
375                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
376                         getSelf());
377             } else {
378                 // This really shouldn't happen - it likely means that persistence or replication
379                 // took so long to complete such that the cohort entry was expired from the cache.
380                 IllegalStateException ex = new IllegalStateException(
381                         String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
382                                 persistenceId(), transactionID));
383                 LOG.error(ex.getMessage());
384                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
385             }
386         } else {
387             finishCommit(sender, transactionID, cohortEntry);
388         }
389     }
390
391     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
392         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
393         commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
394     }
395
396     protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
397         try {
398             commitCoordinator.handleBatchedModifications(batched, sender, this);
399         } catch (Exception e) {
400             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
401                     batched.getTransactionID(), e);
402             sender.tell(new akka.actor.Status.Failure(e), getSelf());
403         }
404     }
405
406     private void handleBatchedModifications(BatchedModifications batched) {
407         // This message is sent to prepare the modifications transaction directly on the Shard as an
408         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
409         // BatchedModifications message, the caller sets the ready flag in the message indicating
410         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
411         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
412         // ReadyTransaction message.
413
414         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
415         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
416         // the primary/leader shard. However with timing and caching on the front-end, there's a small
417         // window where it could have a stale leader during leadership transitions.
418         //
419         boolean isLeaderActive = isLeaderActive();
420         if (isLeader() && isLeaderActive) {
421             handleBatchedModificationsLocal(batched, getSender());
422         } else {
423             ActorSelection leader = getLeader();
424             if (!isLeaderActive || leader == null) {
425                 messageRetrySupport.addMessageToRetry(batched, getSender(),
426                         "Could not commit transaction " + batched.getTransactionID());
427             } else {
428                 // TODO: what if this is not the first batch and leadership changed in between batched messages?
429                 // We could check if the commitCoordinator already has a cached entry and forward all the previous
430                 // batched modifications.
431                 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
432                 leader.forward(batched, getContext());
433             }
434         }
435     }
436
437     private boolean failIfIsolatedLeader(ActorRef sender) {
438         if(isIsolatedLeader()) {
439             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
440                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
441                     " other follower nodes are down or this node is isolated by a network partition.",
442                     persistenceId()))), getSelf());
443             return true;
444         }
445
446         return false;
447     }
448
449     protected boolean isIsolatedLeader() {
450         return getRaftState() == RaftState.IsolatedLeader;
451     }
452
453     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
454         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
455
456         boolean isLeaderActive = isLeaderActive();
457         if (isLeader() && isLeaderActive) {
458             try {
459                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
460             } catch (Exception e) {
461                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
462                         message.getTransactionID(), e);
463                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
464             }
465         } else {
466             ActorSelection leader = getLeader();
467             if (!isLeaderActive || leader == null) {
468                 messageRetrySupport.addMessageToRetry(message, getSender(),
469                         "Could not commit transaction " + message.getTransactionID());
470             } else {
471                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
472                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
473                 leader.forward(message, getContext());
474             }
475         }
476     }
477
478     private void handleForwardedReadyTransaction(ForwardedReadyTransaction forwardedReady) {
479         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
480
481         boolean isLeaderActive = isLeaderActive();
482         if (isLeader() && isLeaderActive) {
483             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
484         } else {
485             ActorSelection leader = getLeader();
486             if (!isLeaderActive || leader == null) {
487                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
488                         "Could not commit transaction " + forwardedReady.getTransactionID());
489             } else {
490                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
491
492                 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
493                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
494                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
495                 leader.forward(readyLocal, getContext());
496             }
497         }
498     }
499
500     private void handleAbortTransaction(final AbortTransaction abort) {
501         doAbortTransaction(abort.getTransactionID(), getSender());
502     }
503
504     void doAbortTransaction(final String transactionID, final ActorRef sender) {
505         commitCoordinator.handleAbort(transactionID, sender, this);
506     }
507
508     private void handleCreateTransaction(final Object message) {
509         if (isLeader()) {
510             createTransaction(CreateTransaction.fromSerializable(message));
511         } else if (getLeader() != null) {
512             getLeader().forward(message, getContext());
513         } else {
514             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
515                     "Could not create a shard transaction", persistenceId())), getSelf());
516         }
517     }
518
519     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
520         store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
521     }
522
523     private ActorRef createTypedTransactionActor(int transactionType,
524             ShardTransactionIdentifier transactionId, String transactionChainId) {
525
526         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
527                 transactionId, transactionChainId);
528     }
529
530     private void createTransaction(CreateTransaction createTransaction) {
531         try {
532             if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
533                     failIfIsolatedLeader(getSender())) {
534                 return;
535             }
536
537             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
538                 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
539
540             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
541                     createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
542         } catch (Exception e) {
543             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
544         }
545     }
546
547     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
548             String transactionChainId) {
549
550
551         ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
552
553         if(LOG.isDebugEnabled()) {
554             LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
555         }
556
557         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
558                 transactionChainId);
559
560         return transactionActor;
561     }
562
563     private void commitWithNewTransaction(final Modification modification) {
564         ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
565         modification.apply(tx.getSnapshot());
566         try {
567             snapshotCohort.syncCommitTransaction(tx);
568             shardMBean.incrementCommittedTransactionCount();
569             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
570         } catch (Exception e) {
571             shardMBean.incrementFailedTransactionsCount();
572             LOG.error("{}: Failed to commit", persistenceId(), e);
573         }
574     }
575
576     private void updateSchemaContext(final UpdateSchemaContext message) {
577         updateSchemaContext(message.getSchemaContext());
578     }
579
580     @VisibleForTesting
581     void updateSchemaContext(final SchemaContext schemaContext) {
582         store.updateSchemaContext(schemaContext);
583     }
584
585     private boolean isMetricsCaptureEnabled() {
586         CommonConfig config = new CommonConfig(getContext().system().settings().config());
587         return config.isMetricCaptureEnabled();
588     }
589
590     @Override
591     @VisibleForTesting
592     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
593         return snapshotCohort;
594     }
595
596     @Override
597     @Nonnull
598     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
599         return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
600                 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
601     }
602
603     @Override
604     protected void onRecoveryComplete() {
605         restoreFromSnapshot = null;
606
607         //notify shard manager
608         getContext().parent().tell(new ActorInitialized(), getSelf());
609
610         // Being paranoid here - this method should only be called once but just in case...
611         if(txCommitTimeoutCheckSchedule == null) {
612             // Schedule a message to be periodically sent to check if the current in-progress
613             // transaction should be expired and aborted.
614             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
615             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
616                     period, period, getSelf(),
617                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
618         }
619     }
620
621     @Override
622     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
623         if (data instanceof DataTreeCandidatePayload) {
624             if (clientActor == null) {
625                 // No clientActor indicates a replica coming from the leader
626                 try {
627                     store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
628                 } catch (DataValidationFailedException | IOException e) {
629                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
630                 }
631             } else {
632                 // Replication consensus reached, proceed to commit
633                 finishCommit(clientActor, identifier);
634             }
635         } else if (data instanceof CompositeModificationPayload) {
636             Object modification = ((CompositeModificationPayload) data).getModification();
637
638             applyModificationToState(clientActor, identifier, modification);
639         } else if(data instanceof CompositeModificationByteStringPayload ){
640             Object modification = ((CompositeModificationByteStringPayload) data).getModification();
641
642             applyModificationToState(clientActor, identifier, modification);
643         } else {
644             LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
645                     persistenceId(), data, data.getClass().getClassLoader(),
646                     CompositeModificationPayload.class.getClassLoader());
647         }
648     }
649
650     private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
651         if(modification == null) {
652             LOG.error(
653                     "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
654                     persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
655         } else if(clientActor == null) {
656             // There's no clientActor to which to send a commit reply so we must be applying
657             // replicated state from the leader.
658             commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
659         } else {
660             // This must be the OK to commit after replication consensus.
661             finishCommit(clientActor, identifier);
662         }
663     }
664
665     @Override
666     protected void onStateChanged() {
667         boolean isLeader = isLeader();
668         boolean hasLeader = hasLeader();
669         changeSupport.onLeadershipChange(isLeader, hasLeader);
670         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
671
672         // If this actor is no longer the leader close all the transaction chains
673         if (!isLeader) {
674             if(LOG.isDebugEnabled()) {
675                 LOG.debug(
676                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
677                     persistenceId(), getId());
678             }
679
680             store.closeAllTransactionChains();
681
682             commitCoordinator.abortPendingTransactions(
683                     "The transacton was aborted due to inflight leadership change.", this);
684         }
685
686         if(hasLeader && !isIsolatedLeader()) {
687             messageRetrySupport.retryMessages();
688         }
689     }
690
691     @Override
692     protected void onLeaderChanged(String oldLeader, String newLeader) {
693         shardMBean.incrementLeadershipChangeCount();
694
695         if(hasLeader() && !isIsolatedLeader()) {
696             messageRetrySupport.retryMessages();
697         }
698     }
699
700     @Override
701     protected void pauseLeader(Runnable operation) {
702         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
703         commitCoordinator.setRunOnPendingTransactionsComplete(operation);
704     }
705
706     @Override
707     public String persistenceId() {
708         return this.name;
709     }
710
711     @VisibleForTesting
712     ShardCommitCoordinator getCommitCoordinator() {
713         return commitCoordinator;
714     }
715
716     public DatastoreContext getDatastoreContext() {
717         return datastoreContext;
718     }
719
720     @VisibleForTesting
721     public ShardDataTree getDataStore() {
722         return store;
723     }
724
725     @VisibleForTesting
726     ShardStats getShardMBean() {
727         return shardMBean;
728     }
729
730     public static Builder builder() {
731         return new Builder();
732     }
733
734     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
735         private final Class<S> shardClass;
736         private ShardIdentifier id;
737         private Map<String, String> peerAddresses = Collections.emptyMap();
738         private DatastoreContext datastoreContext;
739         private SchemaContext schemaContext;
740         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
741         private volatile boolean sealed;
742
743         protected AbstractBuilder(Class<S> shardClass) {
744             this.shardClass = shardClass;
745         }
746
747         protected void checkSealed() {
748             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
749         }
750
751         @SuppressWarnings("unchecked")
752         private T self() {
753             return (T) this;
754         }
755
756         public T id(ShardIdentifier id) {
757             checkSealed();
758             this.id = id;
759             return self();
760         }
761
762         public T peerAddresses(Map<String, String> peerAddresses) {
763             checkSealed();
764             this.peerAddresses = peerAddresses;
765             return self();
766         }
767
768         public T datastoreContext(DatastoreContext datastoreContext) {
769             checkSealed();
770             this.datastoreContext = datastoreContext;
771             return self();
772         }
773
774         public T schemaContext(SchemaContext schemaContext) {
775             checkSealed();
776             this.schemaContext = schemaContext;
777             return self();
778         }
779
780         public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
781             checkSealed();
782             this.restoreFromSnapshot = restoreFromSnapshot;
783             return self();
784         }
785
786         public ShardIdentifier getId() {
787             return id;
788         }
789
790         public Map<String, String> getPeerAddresses() {
791             return peerAddresses;
792         }
793
794         public DatastoreContext getDatastoreContext() {
795             return datastoreContext;
796         }
797
798         public SchemaContext getSchemaContext() {
799             return schemaContext;
800         }
801
802         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
803             return restoreFromSnapshot;
804         }
805
806         public TreeType getTreeType() {
807             switch (datastoreContext.getLogicalStoreType()) {
808             case CONFIGURATION:
809                 return TreeType.CONFIGURATION;
810             case OPERATIONAL:
811                 return TreeType.OPERATIONAL;
812             }
813
814             throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
815         }
816
817         protected void verify() {
818             Preconditions.checkNotNull(id, "id should not be null");
819             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
820             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
821             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
822         }
823
824         public Props props() {
825             sealed = true;
826             verify();
827             return Props.create(shardClass, this);
828         }
829     }
830
831     public static class Builder extends AbstractBuilder<Builder, Shard> {
832         private Builder() {
833             super(Shard.class);
834         }
835     }
836 }