3fe349f798d826f741418b813f7c86e06bc701ec
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
1 /*
2  * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
3  *
4  * This program and the accompanying materials are made available under the
5  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6  * and is available at http://www.eclipse.org/legal/epl-v10.html
7  */
8
9 package org.opendaylight.controller.cluster.datastore;
10
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.serialization.Serialization;
16 import com.google.common.annotations.VisibleForTesting;
17 import com.google.common.base.Optional;
18 import com.google.common.base.Preconditions;
19 import com.google.common.base.Ticker;
20 import java.io.IOException;
21 import java.util.Collection;
22 import java.util.Collections;
23 import java.util.Map;
24 import java.util.concurrent.TimeUnit;
25 import javax.annotation.Nonnull;
26 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
27 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
28 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
29 import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
30 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
31 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
32 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
33 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
34 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
35 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
36 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
37 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
38 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
39 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
40 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
42 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
43 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot;
44 import org.opendaylight.controller.cluster.datastore.messages.DatastoreSnapshot.ShardSnapshot;
45 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
46 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
47 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
48 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
49 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
50 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
51 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
52 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
53 import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateSupplier;
54 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
55 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
56 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
57 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
58 import org.opendaylight.controller.cluster.raft.RaftActor;
59 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
60 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
61 import org.opendaylight.controller.cluster.raft.RaftState;
62 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
63 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
64 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
65 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
66 import org.opendaylight.yangtools.concepts.Identifier;
67 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
68 import org.opendaylight.yangtools.yang.data.api.schema.tree.TipProducingDataTree;
69 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
70 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
71 import scala.concurrent.duration.Duration;
72 import scala.concurrent.duration.FiniteDuration;
73
74 /**
75  * A Shard represents a portion of the logical data tree <br/>
76  * <p>
77  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
78  * </p>
79  */
80 public class Shard extends RaftActor {
81
82     @VisibleForTesting
83     static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = new Object() {
84         @Override
85         public String toString() {
86             return "txCommitTimeoutCheck";
87         }
88     };
89
90     @VisibleForTesting
91     static final Object GET_SHARD_MBEAN_MESSAGE = new Object() {
92         @Override
93         public String toString() {
94             return "getShardMBeanMessage";
95         }
96     };
97
98     // FIXME: shard names should be encapsulated in their own class and this should be exposed as a constant.
99     public static final String DEFAULT_NAME = "default";
100
101     // The state of this Shard
102     private final ShardDataTree store;
103
104     /// The name of this shard
105     private final String name;
106
107     private final ShardStats shardMBean;
108
109     private DatastoreContext datastoreContext;
110
111     private final ShardCommitCoordinator commitCoordinator;
112
113     private long transactionCommitTimeout;
114
115     private Cancellable txCommitTimeoutCheckSchedule;
116
117     private final Optional<ActorRef> roleChangeNotifier;
118
119     private final MessageTracker appendEntriesReplyTracker;
120
121     private final ShardTransactionActorFactory transactionActorFactory;
122
123     private final ShardSnapshotCohort snapshotCohort;
124
125     private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
126     private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
127
128
129     private ShardSnapshot restoreFromSnapshot;
130
131     private final ShardTransactionMessageRetrySupport messageRetrySupport;
132
133     protected Shard(final AbstractBuilder<?, ?> builder) {
134         super(builder.getId().toString(), builder.getPeerAddresses(),
135                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
136
137         this.name = builder.getId().toString();
138         this.datastoreContext = builder.getDatastoreContext();
139         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
140
141         setPersistence(datastoreContext.isPersistent());
142
143         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
144
145         ShardDataTreeChangeListenerPublisherActorProxy treeChangeListenerPublisher =
146                 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher");
147         ShardDataChangeListenerPublisherActorProxy dataChangeListenerPublisher =
148                 new ShardDataChangeListenerPublisherActorProxy(getContext(), name + "-DCL-publisher");
149         if(builder.getDataTree() != null) {
150             store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
151                     treeChangeListenerPublisher, dataChangeListenerPublisher, name);
152         } else {
153             store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
154                     treeChangeListenerPublisher, dataChangeListenerPublisher, name);
155         }
156
157         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
158                 datastoreContext.getDataStoreMXBeanType());
159         shardMBean.setShard(this);
160
161         if (isMetricsCaptureEnabled()) {
162             getContext().become(new MeteringBehavior(this));
163         }
164
165         commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
166
167         setTransactionCommitTimeout();
168
169         // create a notifier actor for each cluster member
170         roleChangeNotifier = createRoleChangeNotifier(name.toString());
171
172         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
173                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
174
175         transactionActorFactory = new ShardTransactionActorFactory(store, datastoreContext,
176                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(
177                         Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
178
179         snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
180             this.name);
181
182         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
183     }
184
185     private void setTransactionCommitTimeout() {
186         transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
187                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS) / 2;
188     }
189
190     private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
191         ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
192             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
193         return Optional.of(shardRoleChangeNotifier);
194     }
195
196     @Override
197     public void postStop() {
198         LOG.info("Stopping Shard {}", persistenceId());
199
200         super.postStop();
201
202         messageRetrySupport.close();
203
204         if (txCommitTimeoutCheckSchedule != null) {
205             txCommitTimeoutCheckSchedule.cancel();
206         }
207
208         commitCoordinator.abortPendingTransactions("Transaction aborted due to shutdown.", this);
209
210         shardMBean.unregisterMBean();
211     }
212
213     @Override
214     protected void handleRecover(final Object message) {
215         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
216             getSender());
217
218         super.handleRecover(message);
219         if (LOG.isTraceEnabled()) {
220             appendEntriesReplyTracker.begin();
221         }
222     }
223
224     @Override
225     protected void handleNonRaftCommand(final Object message) {
226         try (final MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
227             final Optional<Error> maybeError = context.error();
228             if (maybeError.isPresent()) {
229                 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
230                     maybeError.get());
231             }
232
233             if (CreateTransaction.isSerializedType(message)) {
234                 handleCreateTransaction(message);
235             } else if (message instanceof BatchedModifications) {
236                 handleBatchedModifications((BatchedModifications)message);
237             } else if (message instanceof ForwardedReadyTransaction) {
238                 handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
239             } else if (message instanceof ReadyLocalTransaction) {
240                 handleReadyLocalTransaction((ReadyLocalTransaction)message);
241             } else if (CanCommitTransaction.isSerializedType(message)) {
242                 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
243             } else if (CommitTransaction.isSerializedType(message)) {
244                 handleCommitTransaction(CommitTransaction.fromSerializable(message));
245             } else if (AbortTransaction.isSerializedType(message)) {
246                 handleAbortTransaction(AbortTransaction.fromSerializable(message));
247             } else if (CloseTransactionChain.isSerializedType(message)) {
248                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
249             } else if (message instanceof RegisterChangeListener) {
250                 changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
251             } else if (message instanceof RegisterDataTreeChangeListener) {
252                 treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
253             } else if (message instanceof UpdateSchemaContext) {
254                 updateSchemaContext((UpdateSchemaContext) message);
255             } else if (message instanceof PeerAddressResolved) {
256                 PeerAddressResolved resolved = (PeerAddressResolved) message;
257                 setPeerAddress(resolved.getPeerId().toString(),
258                         resolved.getPeerAddress());
259             } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
260                 store.checkForExpiredTransactions(transactionCommitTimeout);
261                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
262             } else if (message instanceof DatastoreContext) {
263                 onDatastoreContext((DatastoreContext)message);
264             } else if (message instanceof RegisterRoleChangeListener){
265                 roleChangeNotifier.get().forward(message, context());
266             } else if (message instanceof FollowerInitialSyncUpStatus) {
267                 shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
268                 context().parent().tell(message, self());
269             } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)){
270                 sender().tell(getShardMBean(), self());
271             } else if (message instanceof GetShardDataTree) {
272                 sender().tell(store.getDataTree(), self());
273             } else if (message instanceof ServerRemoved){
274                 context().parent().forward(message, context());
275             } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
276                 messageRetrySupport.onTimerMessage(message);
277             } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
278                 store.processCohortRegistryCommand(getSender(),
279                         (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
280             } else {
281                 super.handleNonRaftCommand(message);
282             }
283         }
284     }
285
286     private boolean hasLeader() {
287         return getLeaderId() != null;
288     }
289
290     public int getPendingTxCommitQueueSize() {
291         return store.getQueueSize();
292     }
293
294     public int getCohortCacheSize() {
295         return commitCoordinator.getCohortCacheSize();
296     }
297
298     @Override
299     protected Optional<ActorRef> getRoleChangeNotifier() {
300         return roleChangeNotifier;
301     }
302
303     @Override
304     protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId, final short leaderPayloadVersion) {
305         return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
306                 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
307     }
308
309     protected void onDatastoreContext(final DatastoreContext context) {
310         datastoreContext = context;
311
312         setTransactionCommitTimeout();
313
314         if (datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
315             setPersistence(true);
316         } else if (!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
317             setPersistence(false);
318         }
319
320         updateConfigParams(datastoreContext.getShardRaftConfig());
321     }
322
323     boolean canSkipPayload() {
324         // If we do not have any followers and we are not using persistence we can apply modification to the state
325         // immediately
326         return !hasFollowers() && !persistence().isRecoveryApplicable();
327     }
328
329     // applyState() will be invoked once consensus is reached on the payload
330     void persistPayload(final TransactionIdentifier transactionId, final Payload payload) {
331         // We are faking the sender
332         persistData(self(), transactionId, payload);
333     }
334
335     private void handleCommitTransaction(final CommitTransaction commit) {
336         if (isLeader()) {
337             commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this);
338         } else {
339             ActorSelection leader = getLeader();
340             if (leader == null) {
341                 messageRetrySupport.addMessageToRetry(commit, getSender(),
342                         "Could not commit transaction " + commit.getTransactionID());
343             } else {
344                 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
345                 leader.forward(commit, getContext());
346             }
347         }
348     }
349
350     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
351         LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
352
353         if (isLeader()) {
354         commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
355         } else {
356             ActorSelection leader = getLeader();
357             if (leader == null) {
358                 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
359                         "Could not canCommit transaction " + canCommit.getTransactionID());
360             } else {
361                 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
362                 leader.forward(canCommit, getContext());
363             }
364         }
365     }
366
367     protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
368         try {
369             commitCoordinator.handleBatchedModifications(batched, sender, this);
370         } catch (Exception e) {
371             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
372                     batched.getTransactionID(), e);
373             sender.tell(new akka.actor.Status.Failure(e), getSelf());
374         }
375     }
376
377     private void handleBatchedModifications(final BatchedModifications batched) {
378         // This message is sent to prepare the modifications transaction directly on the Shard as an
379         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
380         // BatchedModifications message, the caller sets the ready flag in the message indicating
381         // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
382         // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
383         // ReadyTransaction message.
384
385         // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
386         // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
387         // the primary/leader shard. However with timing and caching on the front-end, there's a small
388         // window where it could have a stale leader during leadership transitions.
389         //
390         boolean isLeaderActive = isLeaderActive();
391         if (isLeader() && isLeaderActive) {
392             handleBatchedModificationsLocal(batched, getSender());
393         } else {
394             ActorSelection leader = getLeader();
395             if (!isLeaderActive || leader == null) {
396                 messageRetrySupport.addMessageToRetry(batched, getSender(),
397                         "Could not commit transaction " + batched.getTransactionID());
398             } else {
399                 // If this is not the first batch and leadership changed in between batched messages,
400                 // we need to reconstruct previous BatchedModifications from the transaction
401                 // DataTreeModification, honoring the max batched modification count, and forward all the
402                 // previous BatchedModifications to the new leader.
403                 Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(
404                         batched, datastoreContext.getShardBatchedModificationCount());
405
406                 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
407                         newModifications.size(), leader);
408
409                 for (BatchedModifications bm : newModifications) {
410                     leader.forward(bm, getContext());
411                 }
412             }
413         }
414     }
415
416     private boolean failIfIsolatedLeader(final ActorRef sender) {
417         if (isIsolatedLeader()) {
418             sender.tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
419                     "Shard %s was the leader but has lost contact with all of its followers. Either all" +
420                     " other follower nodes are down or this node is isolated by a network partition.",
421                     persistenceId()))), getSelf());
422             return true;
423         }
424
425         return false;
426     }
427
428     protected boolean isIsolatedLeader() {
429         return getRaftState() == RaftState.IsolatedLeader;
430     }
431
432     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
433         LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
434
435         boolean isLeaderActive = isLeaderActive();
436         if (isLeader() && isLeaderActive) {
437             try {
438                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
439             } catch (Exception e) {
440                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
441                         message.getTransactionID(), e);
442                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
443             }
444         } else {
445             ActorSelection leader = getLeader();
446             if (!isLeaderActive || leader == null) {
447                 messageRetrySupport.addMessageToRetry(message, getSender(),
448                         "Could not commit transaction " + message.getTransactionID());
449             } else {
450                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
451                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
452                 leader.forward(message, getContext());
453             }
454         }
455     }
456
457     private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
458         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
459
460         boolean isLeaderActive = isLeaderActive();
461         if (isLeader() && isLeaderActive) {
462             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
463         } else {
464             ActorSelection leader = getLeader();
465             if (!isLeaderActive || leader == null) {
466                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
467                         "Could not commit transaction " + forwardedReady.getTransactionID());
468             } else {
469                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
470
471                 ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
472                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
473                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
474                 leader.forward(readyLocal, getContext());
475             }
476         }
477     }
478
479     private void handleAbortTransaction(final AbortTransaction abort) {
480         doAbortTransaction(abort.getTransactionID(), getSender());
481     }
482
483     void doAbortTransaction(final TransactionIdentifier transactionID, final ActorRef sender) {
484         commitCoordinator.handleAbort(transactionID, sender, this);
485     }
486
487     private void handleCreateTransaction(final Object message) {
488         if (isLeader()) {
489             createTransaction(CreateTransaction.fromSerializable(message));
490         } else if (getLeader() != null) {
491             getLeader().forward(message, getContext());
492         } else {
493             getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
494                     "Could not create a shard transaction", persistenceId())), getSelf());
495         }
496     }
497
498     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
499         store.closeTransactionChain(closeTransactionChain.getIdentifier());
500     }
501
502     private void createTransaction(final CreateTransaction createTransaction) {
503         try {
504             if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY &&
505                     failIfIsolatedLeader(getSender())) {
506                 return;
507             }
508
509             ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
510                 createTransaction.getTransactionId());
511
512             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
513                     createTransaction.getTransactionId(), createTransaction.getVersion()).toSerializable(), getSelf());
514         } catch (Exception e) {
515             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
516         }
517     }
518
519     private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
520         LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
521         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
522             transactionId);
523     }
524
525     private void updateSchemaContext(final UpdateSchemaContext message) {
526         updateSchemaContext(message.getSchemaContext());
527     }
528
529     @VisibleForTesting
530     void updateSchemaContext(final SchemaContext schemaContext) {
531         store.updateSchemaContext(schemaContext);
532     }
533
534     private boolean isMetricsCaptureEnabled() {
535         CommonConfig config = new CommonConfig(getContext().system().settings().config());
536         return config.isMetricCaptureEnabled();
537     }
538
539     @Override
540     @VisibleForTesting
541     public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
542         return snapshotCohort;
543     }
544
545     @Override
546     @Nonnull
547     protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
548         return new ShardRecoveryCoordinator(store,
549             restoreFromSnapshot != null ? restoreFromSnapshot.getSnapshot() : null, persistenceId(), LOG);
550     }
551
552     @Override
553     protected void onRecoveryComplete() {
554         restoreFromSnapshot = null;
555
556         //notify shard manager
557         getContext().parent().tell(new ActorInitialized(), getSelf());
558
559         // Being paranoid here - this method should only be called once but just in case...
560         if (txCommitTimeoutCheckSchedule == null) {
561             // Schedule a message to be periodically sent to check if the current in-progress
562             // transaction should be expired and aborted.
563             FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
564             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
565                     period, period, getSelf(),
566                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
567         }
568     }
569
570     @Override
571     protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
572         if (data instanceof DataTreeCandidateSupplier) {
573             if (clientActor == null) {
574                 // No clientActor indicates a replica coming from the leader
575                 try {
576                     store.applyStateFromLeader(identifier, (DataTreeCandidateSupplier)data);
577                 } catch (DataValidationFailedException | IOException e) {
578                     LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
579                 }
580             } else {
581                 // Replication consensus reached, proceed to commit
582                 store.payloadReplicationComplete(identifier, (DataTreeCandidateSupplier)data);
583             }
584         } else {
585             LOG.error("{}: Unknown state received {} ClassLoader {}", persistenceId(), data,
586                 data.getClass().getClassLoader());
587         }
588     }
589
590     @Override
591     protected void onStateChanged() {
592         boolean isLeader = isLeader();
593         boolean hasLeader = hasLeader();
594         changeSupport.onLeadershipChange(isLeader, hasLeader);
595         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
596
597         // If this actor is no longer the leader close all the transaction chains
598         if (!isLeader) {
599             if (LOG.isDebugEnabled()) {
600                 LOG.debug(
601                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
602                     persistenceId(), getId());
603             }
604
605             store.closeAllTransactionChains();
606         }
607
608         if (hasLeader && !isIsolatedLeader()) {
609             messageRetrySupport.retryMessages();
610         }
611     }
612
613     @Override
614     protected void onLeaderChanged(final String oldLeader, final String newLeader) {
615         shardMBean.incrementLeadershipChangeCount();
616
617         boolean hasLeader = hasLeader();
618         if (hasLeader && !isLeader()) {
619             // Another leader was elected. If we were the previous leader and had pending transactions, convert
620             // them to transaction messages and send to the new leader.
621             ActorSelection leader = getLeader();
622             if (leader != null) {
623                 Collection<?> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
624                             datastoreContext.getShardBatchedModificationCount());
625
626                 if (!messagesToForward.isEmpty()) {
627                     LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
628                             messagesToForward.size(), leader);
629
630                     for (Object message : messagesToForward) {
631                         leader.tell(message, self());
632                     }
633                 }
634             } else {
635                 commitCoordinator.abortPendingTransactions(
636                         "The transacton was aborted due to inflight leadership change and the leader address isn't available.",
637                         this);
638             }
639         }
640
641         if (hasLeader && !isIsolatedLeader()) {
642             messageRetrySupport.retryMessages();
643         }
644     }
645
646     @Override
647     protected void pauseLeader(final Runnable operation) {
648         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
649         store.setRunOnPendingTransactionsComplete(operation);
650     }
651
652     @Override
653     public String persistenceId() {
654         return this.name;
655     }
656
657     @VisibleForTesting
658     ShardCommitCoordinator getCommitCoordinator() {
659         return commitCoordinator;
660     }
661
662     public DatastoreContext getDatastoreContext() {
663         return datastoreContext;
664     }
665
666     @VisibleForTesting
667     public ShardDataTree getDataStore() {
668         return store;
669     }
670
671     @VisibleForTesting
672     ShardStats getShardMBean() {
673         return shardMBean;
674     }
675
676     public static Builder builder() {
677         return new Builder();
678     }
679
680     public static abstract class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
681         private final Class<S> shardClass;
682         private ShardIdentifier id;
683         private Map<String, String> peerAddresses = Collections.emptyMap();
684         private DatastoreContext datastoreContext;
685         private SchemaContext schemaContext;
686         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
687         private TipProducingDataTree dataTree;
688         private volatile boolean sealed;
689
690         protected AbstractBuilder(final Class<S> shardClass) {
691             this.shardClass = shardClass;
692         }
693
694         protected void checkSealed() {
695             Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
696         }
697
698         @SuppressWarnings("unchecked")
699         private T self() {
700             return (T) this;
701         }
702
703         public T id(final ShardIdentifier id) {
704             checkSealed();
705             this.id = id;
706             return self();
707         }
708
709         public T peerAddresses(final Map<String, String> peerAddresses) {
710             checkSealed();
711             this.peerAddresses = peerAddresses;
712             return self();
713         }
714
715         public T datastoreContext(final DatastoreContext datastoreContext) {
716             checkSealed();
717             this.datastoreContext = datastoreContext;
718             return self();
719         }
720
721         public T schemaContext(final SchemaContext schemaContext) {
722             checkSealed();
723             this.schemaContext = schemaContext;
724             return self();
725         }
726
727         public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot restoreFromSnapshot) {
728             checkSealed();
729             this.restoreFromSnapshot = restoreFromSnapshot;
730             return self();
731         }
732
733         public T dataTree(final TipProducingDataTree dataTree) {
734             checkSealed();
735             this.dataTree = dataTree;
736             return self();
737         }
738
739         public ShardIdentifier getId() {
740             return id;
741         }
742
743         public Map<String, String> getPeerAddresses() {
744             return peerAddresses;
745         }
746
747         public DatastoreContext getDatastoreContext() {
748             return datastoreContext;
749         }
750
751         public SchemaContext getSchemaContext() {
752             return schemaContext;
753         }
754
755         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
756             return restoreFromSnapshot;
757         }
758
759         public TipProducingDataTree getDataTree() {
760             return dataTree;
761         }
762
763         public TreeType getTreeType() {
764             switch (datastoreContext.getLogicalStoreType()) {
765             case CONFIGURATION:
766                 return TreeType.CONFIGURATION;
767             case OPERATIONAL:
768                 return TreeType.OPERATIONAL;
769             }
770
771             throw new IllegalStateException("Unhandled logical store type " + datastoreContext.getLogicalStoreType());
772         }
773
774         protected void verify() {
775             Preconditions.checkNotNull(id, "id should not be null");
776             Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
777             Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
778             Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
779         }
780
781         public Props props() {
782             sealed = true;
783             verify();
784             return Props.create(shardClass, this);
785         }
786     }
787
788     public static class Builder extends AbstractBuilder<Builder, Shard> {
789         private Builder() {
790             super(Shard.class);
791         }
792     }
793
794     Ticker ticker() {
795         return Ticker.systemTicker();
796     }
797 }