6df64e162888915d85315e2cacf0c05af3ebb170
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.persistence.RecoveryFailure;
16 import akka.serialization.Serialization;
17 import com.google.common.annotations.VisibleForTesting;
18 import com.google.common.base.Optional;
19 import com.google.common.base.Preconditions;
20 import java.io.IOException;
21 import java.util.Collections;
22 import java.util.Map;
23 import java.util.concurrent.TimeUnit;
24 import javax.annotation.Nonnull;
25 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
26 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
27 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
28 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
29 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
30 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
31 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
32 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
33 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
34 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
35 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
36 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
37 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
38 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
40 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
42 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
43 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
44 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
46 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
47 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
49 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
50 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
51 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
52 import org.opendaylight.controller.cluster.datastore.modification.Modification;
53 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
54 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
55 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
56 import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
57 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
58 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
59 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
60 import org.opendaylight.controller.cluster.raft.RaftActor;
61 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
62 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
63 import org.opendaylight.controller.cluster.raft.RaftState;
64 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
65 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
66 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
67 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
70 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
71 import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
72 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
73 import scala.concurrent.duration.Duration;
74 import scala.concurrent.duration.FiniteDuration;
75
76 /**
77  * A Shard represents a portion of the logical data tree <br/>
78  * <p>
79  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
80  * </p>
81  */
82 public class Shard extends RaftActor {
83
84     protected static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
85
86     @VisibleForTesting
87     static final Object GET_SHARD_MBEAN_MESSAGE = "getShardMBeanMessage";
88
89     @VisibleForTesting
90     static final String DEFAULT_NAME = "default";
91
92     // The state of this Shard
93     private final ShardDataTree store;
94
95     /// The name of this shard
96     private final String name;
97
98     private final ShardStats shardMBean;
99
100     private DatastoreContext datastoreContext;
101
102     private final ShardCommitCoordinator commitCoordinator;
103
104     private long transactionCommitTimeout;
105
106     private Cancellable txCommitTimeoutCheckSchedule;
107
108     private final Optional<ActorRef> roleChangeNotifier;
109
110     private final MessageTracker appendEntriesReplyTracker;
111
112     private final ShardTransactionActorFactory transactionActorFactory;
113
114     private final ShardSnapshotCohort snapshotCohort;
115
116     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
117     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
118
119     private ShardSnapshot restoreFromSnapshot;
120
121     protected Shard(AbstractBuilder<?, ?> builder) {
122         super(builder.getId().toString(), builder.getPeerAddresses(),
123                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
124
125         this.name = builder.getId().toString();
126         this.datastoreContext = builder.getDatastoreContext();
127         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
128
129         setPersistence(datastoreContext.isPersistent());
130
131         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
132
133         store = new ShardDataTree(builder.getSchemaContext());
134
135         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
136                 datastoreContext.getDataStoreMXBeanType());
137         shardMBean.setShard(this);
138
139         if (isMetricsCaptureEnabled()) {
140             getContext().become(new MeteringBehavior(this));
141         }
142
143         commitCoordinator = new ShardCommitCoordinator(store,
144                 datastoreContext.getShardCommitQueueExpiryTimeoutInMillis(),
145                 datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
146
147         setTransactionCommitTimeout();
148
149         // create a notifier actor for each cluster member
150         roleChangeNotifier = createRoleChangeNotifier(name.toString());
151
152         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
153                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
154
155         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
156                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
157                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
158
159         snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
160
161
162     }
163
164     private void setTransactionCommitTimeout() {
165         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
166                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
167     }
168
169     private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
170         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
171             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
172         return Optional.of(shardRoleChangeNotifier);
173     }
174
175     @Override
176     public void postStop() {
177         LOG.info("Stopping Shard {}", persistenceId());
178
179         super.postStop();
180
181         if(txCommitTimeoutCheckSchedule != null) {
182             txCommitTimeoutCheckSchedule.cancel();
183         }
184
185         shardMBean.unregisterMBean();
186     }
187
188     @Override
189     public void onReceiveRecover(final Object message) throws Exception {
190         if(LOG.isDebugEnabled()) {
191             LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
192                 message.getClass().toString(), getSender());
193         }
194
195         if (message instanceof RecoveryFailure){
196             LOG.error("{}: Recovery failed because of this cause",
197                     persistenceId(), ((RecoveryFailure) message).cause());
198
199             // Even though recovery failed, we still need to finish our recovery, eg send the
200             // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
201             onRecoveryComplete();
202         } else {
203             super.onReceiveRecover(message);
204             if(LOG.isTraceEnabled()) {
205                 appendEntriesReplyTracker.begin();
206             }
207         }
208     }
209
210     @Override
211     public void onReceiveCommand(final Object message) throws Exception {
212
213         MessageTracker.Context context = appendEntriesReplyTracker.received(message);
214
215         if(context.error().isPresent()){
216             LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
217                 context.error());
218         }
219
220         try {
221             if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
222                 handleCreateTransaction(message);
223             } else if (BatchedModifications.class.isInstance(message)) {
224                 handleBatchedModifications((BatchedModifications)message);
225             } else if (message instanceof ForwardedReadyTransaction) {
226                 commitCoordinator.handleForwardedReadyTransaction((ForwardedReadyTransaction) message,
227                         getSender(), this);
228             } else if (message instanceof ReadyLocalTransaction) {
229                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
230             } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
231                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
232             } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
233                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
234             } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
235                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
236             } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
237                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
238             } else if (message instanceof RegisterChangeListener) {
239                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
240             } else if (message instanceof RegisterDataTreeChangeListener) {
241                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
242             } else if (message instanceof UpdateSchemaContext) {
243                 updateSchemaContext((UpdateSchemaContext) message);
244             } else if (message instanceof PeerAddressResolved) {
245                 PeerAddressResolved resolved = (PeerAddressResolved) message;
246                 setPeerAddress(resolved.getPeerId().toString(),
247                         resolved.getPeerAddress());
248             } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
249                 handleTransactionCommitTimeoutCheck();
250             } else if(message instanceof DatastoreContext) {
251                 onDatastoreContext((DatastoreContext)message);
252             } else if(message instanceof RegisterRoleChangeListener){
253                 roleChangeNotifier.get().forward(message, context());
254             } else if (message instanceof FollowerInitialSyncUpStatus) {
255                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
256                 context().parent().tell(message, self());
257             } else if(GET_SHARD_MBEAN_MESSAGE.equals(message)){
258                 sender().tell(getShardMBean(), self());
259             } else if(message instanceof GetShardDataTree){
260                 sender().tell(store.getDataTree(), self());
261             } else {
262                 super.onReceiveCommand(message);
263             }
264         } finally {
265             context.done();
266         }
267     }
268
269     private boolean hasLeader() {
270         return getLeaderId() != null;
271     }
272
273     public int getPendingTxCommitQueueSize() {
274         return commitCoordinator.getQueueSize();
275     }
276
277     @Override
278     protected Optional<ActorRef> getRoleChangeNotifier() {
279         return roleChangeNotifier;
280     }
281
282     @Override
283     protected LeaderStateChanged newLeaderStateChanged(String memberId, String leaderId, short leaderPayloadVersion) {
284         return new ShardLeaderStateChanged(memberId, leaderId,
285                 isLeader() ? Optional.<DataTree>of(store.getDataTree()) : Optional.<DataTree>absent(),
286                 leaderPayloadVersion);
287     }
288
289     protected void onDatastoreContext(DatastoreContext context) {
290         datastoreContext = context;
291
292         commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
293
294         setTransactionCommitTimeout();
295
296         if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
297             setPersistence(true);
298         } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
299             setPersistence(false);
300         }
301
302         updateConfigParams(datastoreContext.getShardRaftConfig());
303     }
304
305     private void handleTransactionCommitTimeoutCheck() {
306         CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
307         if(cohortEntry != null) {
308             if(cohortEntry.isExpired(transactionCommitTimeout)) {
309                 LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
310                         persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
311
312                 doAbortTransaction(cohortEntry.getTransactionID(), null);
313             }
314         }
315
316         commitCoordinator.cleanupExpiredCohortEntries();
317     }
318
319     private static boolean isEmptyCommit(final DataTreeCandidate candidate) {
320         return ModificationType.UNMODIFIED.equals(candidate.getRootNode().getModificationType());
321     }
322
323     void continueCommit(final CohortEntry cohortEntry) {
324         final DataTreeCandidate candidate = cohortEntry.getCandidate();
325
326         // If we do not have any followers and we are not using persistence
327         // or if cohortEntry has no modifications
328         // we can apply modification to the state immediately
329         if ((!hasFollowers() && !persistence().isRecoveryApplicable()) || isEmptyCommit(candidate)) {
330             applyModificationToState(cohortEntry.getReplySender(), cohortEntry.getTransactionID(), candidate);
331         } else {
332             Shard.this.persistData(cohortEntry.getReplySender(), cohortEntry.getTransactionID(),
333                 DataTreeCandidatePayload.create(candidate));
334         }
335     }
336
337     private void handleCommitTransaction(final CommitTransaction commit) {
338         if(!commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this)) {
339             shardMBean.incrementFailedTransactionsCount();
340         }
341     }
342
343     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final String transactionID, @Nonnull final CohortEntry cohortEntry) {
344         LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
345
346         try {
347             cohortEntry.commit();
348
349             sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
350
351             shardMBean.incrementCommittedTransactionCount();
352             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
353
354         } catch (Exception e) {
355             sender.tell(new akka.actor.Status.Failure(e), getSelf());
356
357             LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
358                     transactionID, e);
359             shardMBean.incrementFailedTransactionsCount();
360         } finally {
361             commitCoordinator.currentTransactionComplete(transactionID, true);
362         }
363     }
364
365     private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
366         // With persistence enabled, this method is called via applyState by the leader strategy
367         // after the commit has been replicated to a majority of the followers.
368
369         CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
370         if (cohortEntry == null) {
371             // The transaction is no longer the current commit. This can happen if the transaction
372             // was aborted prior, most likely due to timeout in the front-end. We need to finish
373             // committing the transaction though since it was successfully persisted and replicated
374             // however we can't use the original cohort b/c it was already preCommitted and may
375             // conflict with the current commit or may have been aborted so we commit with a new
376             // transaction.
377             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
378             if(cohortEntry != null) {
379                 try {
380                     store.applyForeignCandidate(transactionID, cohortEntry.getCandidate());
381                 } catch (DataValidationFailedException e) {
382                     shardMBean.incrementFailedTransactionsCount();
383                     LOG.error("{}: Failed to re-apply transaction {}", persistenceId(), transactionID, e);
384                 }
385
386                 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
387             } else {
388                 // This really shouldn't happen - it likely means that persistence or replication
389                 // took so long to complete such that the cohort entry was expired from the cache.
390                 IllegalStateException ex = new IllegalStateException(
391                         String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
392                                 persistenceId(), transactionID));
393                 LOG.error(ex.getMessage());
394                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
395             }
396         } else {
397             finishCommit(sender, transactionID, cohortEntry);
398         }
399     }
400
401     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
402         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
403         commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
404     }
405
406     private void noLeaderError(String errMessage, Object message) {
407         // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
408         // it more resilient in case we're in the process of electing a new leader.
409         getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(errMessage, persistenceId())), getSelf());
410     }
411
412     protected void handleBatchedModificationsLocal(BatchedModifications batched, ActorRef sender) {
413         try {
414             commitCoordinator.handleBatchedModifications(batched, sender, this);
415         } catch (Exception e) {
416             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
417                     batched.getTransactionID(), e);
418             sender.tell(new akka.actor.Status.Failure(e), getSelf());
419         }
420     }
421
422     private void handleBatchedModifications(BatchedModifications batched) {
423         // This message is sent to prepare the modifications transaction directly on the Shard as an
424         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
425         // BatchedModifications message, the caller sets the ready flag in the message indicating
426         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
427         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
428         // ReadyTransaction message.
429
430         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
431         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
432         // the primary/leader shard. However with timing and caching on the front-end, there's a small
433         // window where it could have a stale leader during leadership transitions.
434         //
435         if(isLeader()) {
436             failIfIsolatedLeader(getSender());
437
438             handleBatchedModificationsLocal(batched, getSender());
439         } else {
440             ActorSelection leader = getLeader();
441             if(leader != null) {
442                 // TODO: what if this is not the first batch and leadership changed in between batched messages?
443                 // We could check if the commitCoordinator already has a cached entry and forward all the previous
444                 // batched modifications.
445                 LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
446                 leader.forward(batched, getContext());
447             } else {
448                 noLeaderError("Could not commit transaction " + batched.getTransactionID(), batched);
449             }
450         }
451     }
452
453     private boolean failIfIsolatedLeader(ActorRef sender) {
454         if(isIsolatedLeader()) {
455             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
456                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
457                     " other follower nodes are down or this node is isolated by a network partition.",
458                     persistenceId()))), getSelf());
459             return true;
460         }
461
462         return false;
463     }
464
465     protected boolean isIsolatedLeader() {
466         return getRaftState() == RaftState.IsolatedLeader;
467     }
468
469     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
470         if (isLeader()) {
471             failIfIsolatedLeader(getSender());
472
473             try {
474                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
475             } catch (Exception e) {
476                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
477                         message.getTransactionID(), e);
478                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
479             }
480         } else {
481             ActorSelection leader = getLeader();
482             if (leader != null) {
483                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
484                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
485                 leader.forward(message, getContext());
486             } else {
487                 noLeaderError("Could not commit transaction " + message.getTransactionID(), message);
488             }
489         }
490     }
491
492     private void handleAbortTransaction(final AbortTransaction abort) {
493         doAbortTransaction(abort.getTransactionID(), getSender());
494     }
495
496     void doAbortTransaction(final String transactionID, final ActorRef sender) {
497         commitCoordinator.handleAbort(transactionID, sender, this);
498     }
499
500     private void handleCreateTransaction(final Object message) {
501         if (isLeader()) {
502             createTransaction(CreateTransaction.fromSerializable(message));
503         } else if (getLeader() != null) {
504             getLeader().forward(message, getContext());
505         } else {
506             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
507                     "Could not create a shard transaction", persistenceId())), getSelf());
508         }
509     }
510
511     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
512         store.closeTransactionChain(closeTransactionChain.getTransactionChainId());
513     }
514
515     private ActorRef createTypedTransactionActor(int transactionType,
516             ShardTransactionIdentifier transactionId, String transactionChainId,
517             short clientVersion ) {
518
519         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
520                 transactionId, transactionChainId, clientVersion);
521     }
522
523     private void createTransaction(CreateTransaction createTransaction) {
524         try {
525             if(TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
526                     failIfIsolatedLeader(getSender())) {
527                 return;
528             }
529
530             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
531                 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
532                 createTransaction.getVersion());
533
534             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
535                     createTransaction.getTransactionId()).toSerializable(), getSelf());
536         } catch (Exception e) {
537             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
538         }
539     }
540
541     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
542             String transactionChainId, short clientVersion) {
543
544
545         ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
546
547         if(LOG.isDebugEnabled()) {
548             LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
549         }
550
551         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
552                 transactionChainId, clientVersion);
553
554         return transactionActor;
555     }
556
557     private void commitWithNewTransaction(final Modification modification) {
558         ReadWriteShardDataTreeTransaction tx = store.newReadWriteTransaction(modification.toString(), null);
559         modification.apply(tx.getSnapshot());
560         try {
561             snapshotCohort.syncCommitTransaction(tx);
562             shardMBean.incrementCommittedTransactionCount();
563             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
564         } catch (Exception e) {
565             shardMBean.incrementFailedTransactionsCount();
566             LOG.error("{}: Failed to commit", persistenceId(), e);
567         }
568     }
569
570     private void updateSchemaContext(final UpdateSchemaContext message) {
571         updateSchemaContext(message.getSchemaContext());
572     }
573
574     @VisibleForTesting
575     void updateSchemaContext(final SchemaContext schemaContext) {
576         store.updateSchemaContext(schemaContext);
577     }
578
579     private boolean isMetricsCaptureEnabled() {
580         CommonConfig config = new CommonConfig(getContext().system().settings().config());
581         return config.isMetricCaptureEnabled();
582     }
583
584     @Override
585     @VisibleForTesting
586     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
587         return snapshotCohort;
588     }
589
590     @Override
591     @Nonnull
592     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
593         return new ShardRecoveryCoordinator(store, store.getSchemaContext(),
594                 restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
595     }
596
597     @Override
598     protected void onRecoveryComplete() {
599         restoreFromSnapshot = null;
600
601         //notify shard manager
602         getContext().parent().tell(new ActorInitialized(), getSelf());
603
604         // Being paranoid here - this method should only be called once but just in case...
605         if(txCommitTimeoutCheckSchedule == null) {
606             // Schedule a message to be periodically sent to check if the current in-progress
607             // transaction should be expired and aborted.
608             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
609             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
610                     period, period, getSelf(),
611                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
612         }
613     }
614
615     @Override
616     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
617         if (data instanceof DataTreeCandidatePayload) {
618             if (clientActor == null) {
619                 // No clientActor indicates a replica coming from the leader
620                 try {
621                     store.applyForeignCandidate(identifier, ((DataTreeCandidatePayload)data).getCandidate());
622                 } catch (DataValidationFailedException | IOException e) {
623                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
624                 }
625             } else {
626                 // Replication consensus reached, proceed to commit
627                 finishCommit(clientActor, identifier);
628             }
629         } else if (data instanceof ModificationPayload) {
630             try {
631                 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
632             } catch (ClassNotFoundException | IOException e) {
633                 LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
634             }
635         } else if (data instanceof CompositeModificationPayload) {
636             Object modification = ((CompositeModificationPayload) data).getModification();
637
638             applyModificationToState(clientActor, identifier, modification);
639         } else if(data instanceof CompositeModificationByteStringPayload ){
640             Object modification = ((CompositeModificationByteStringPayload) data).getModification();
641
642             applyModificationToState(clientActor, identifier, modification);
643         } else {
644             LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
645                     persistenceId(), data, data.getClass().getClassLoader(),
646                     CompositeModificationPayload.class.getClassLoader());
647         }
648     }
649
650     private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
651         if(modification == null) {
652             LOG.error(
653                     "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
654                     persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
655         } else if(clientActor == null) {
656             // There's no clientActor to which to send a commit reply so we must be applying
657             // replicated state from the leader.
658             commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
659         } else {
660             // This must be the OK to commit after replication consensus.
661             finishCommit(clientActor, identifier);
662         }
663     }
664
665     @Override
666     protected void onStateChanged() {
667         boolean isLeader = isLeader();
668         boolean hasLeader = hasLeader();
669         changeSupport.onLeadershipChange(isLeader, hasLeader);
670         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
671
672         // If this actor is no longer the leader close all the transaction chains
673         if (!isLeader) {
674             if(LOG.isDebugEnabled()) {
675                 LOG.debug(
676                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
677                     persistenceId(), getId());
678             }
679
680             store.closeAllTransactionChains();
681         }
682     }
683
684     @Override
685     protected void onLeaderChanged(String oldLeader, String newLeader) {
686         shardMBean.incrementLeadershipChangeCount();
687     }
688
689     @Override
690     public String persistenceId() {
691         return this.name;
692     }
693
694     @VisibleForTesting
695     ShardCommitCoordinator getCommitCoordinator() {
696         return commitCoordinator;
697     }
698
699     public DatastoreContext getDatastoreContext() {
700         return datastoreContext;
701     }
702
703     @VisibleForTesting
704     public ShardDataTree getDataStore() {
705         return store;
706     }
707
708     @VisibleForTesting
709     ShardStats getShardMBean() {
710         return shardMBean;
711     }
712
713     public static Builder builder() {
714         return new Builder();
715     }
716
717     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
718         private final Class<S> shardClass;
719         private ShardIdentifier id;
720         private Map<String, String> peerAddresses = Collections.emptyMap();
721         private DatastoreContext datastoreContext;
722         private SchemaContext schemaContext;
723         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
724         private volatile boolean sealed;
725
726         protected AbstractBuilder(Class<S> shardClass) {
727             this.shardClass = shardClass;
728         }
729
730         protected void checkSealed() {
731             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
732         }
733
734         @SuppressWarnings("unchecked")
735         private T self() {
736             return (T) this;
737         }
738
739         public T id(ShardIdentifier id) {
740             checkSealed();
741             this.id = id;
742             return self();
743         }
744
745         public T peerAddresses(Map<String, String> peerAddresses) {
746             checkSealed();
747             this.peerAddresses = peerAddresses;
748             return self();
749         }
750
751         public T datastoreContext(DatastoreContext datastoreContext) {
752             checkSealed();
753             this.datastoreContext = datastoreContext;
754             return self();
755         }
756
757         public T schemaContext(SchemaContext schemaContext) {
758             checkSealed();
759             this.schemaContext = schemaContext;
760             return self();
761         }
762
763         public T restoreFromSnapshot(DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
764             checkSealed();
765             this.restoreFromSnapshot = restoreFromSnapshot;
766             return self();
767         }
768
769         public ShardIdentifier getId() {
770             return id;
771         }
772
773         public Map<String, String> getPeerAddresses() {
774             return peerAddresses;
775         }
776
777         public DatastoreContext getDatastoreContext() {
778             return datastoreContext;
779         }
780
781         public SchemaContext getSchemaContext() {
782             return schemaContext;
783         }
784
785         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
786             return restoreFromSnapshot;
787         }
788
789         protected void verify() {
790             Preconditions.checkNotNull(id, "id should not be null");
791             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
792             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
793             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
794         }
795
796         public Props props() {
797             sealed = true;
798             verify();
799             return Props.create(shardClass, this);
800         }
801     }
802
803     public static class Builder extends AbstractBuilder<Builder, Shard> {
804         private Builder() {
805             super(Shard.class);
806         }
807     }
808 }