2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.PoisonPill;
15 import akka.actor.Props;
16 import akka.event.Logging;
17 import akka.event.LoggingAdapter;
18 import akka.japi.Creator;
19 import akka.persistence.RecoveryFailure;
20 import akka.serialization.Serialization;
21 import com.google.common.annotations.VisibleForTesting;
22 import com.google.common.base.Optional;
23 import com.google.common.base.Preconditions;
24 import com.google.common.collect.Lists;
25 import com.google.common.util.concurrent.FutureCallback;
26 import com.google.common.util.concurrent.Futures;
27 import com.google.common.util.concurrent.ListenableFuture;
28 import com.google.protobuf.ByteString;
29 import com.google.protobuf.InvalidProtocolBufferException;
30 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
31 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
32 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
33 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
34 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
35 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
36 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
37 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
38 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
39 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
40 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
41 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
42 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
43 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
44 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
45 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
46 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
47 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
49 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
50 import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
51 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
52 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
53 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
54 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
55 import org.opendaylight.controller.cluster.datastore.modification.Modification;
56 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
57 import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
58 import org.opendaylight.controller.cluster.raft.RaftActor;
59 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
60 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
61 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
62 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
63 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
64 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
65 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
66 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
67 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
68 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
69 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
70 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
71 import org.opendaylight.yangtools.concepts.ListenerRegistration;
72 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
73 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
74 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
75 import scala.concurrent.duration.Duration;
76 import scala.concurrent.duration.FiniteDuration;
78 import javax.annotation.Nonnull;
79 import java.util.Collection;
80 import java.util.HashMap;
81 import java.util.List;
83 import java.util.concurrent.ExecutionException;
84 import java.util.concurrent.TimeUnit;
87 * A Shard represents a portion of the logical data tree <br/>
89 * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
92 public class Shard extends RaftActor {
94 private static final Object COMMIT_TRANSACTION_REPLY = new CommitTransactionReply().toSerializable();
96 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
98 public static final String DEFAULT_NAME = "default";
100 // The state of this Shard
101 private final InMemoryDOMDataStore store;
103 private final LoggingAdapter LOG =
104 Logging.getLogger(getContext().system(), this);
106 // By default persistent will be true and can be turned off using the system
107 // property shard.persistent
108 private final boolean persistent;
110 /// The name of this shard
111 private final ShardIdentifier name;
113 private final ShardStats shardMBean;
115 private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
117 private final List<DelayedListenerRegistration> delayedListenerRegistrations =
118 Lists.newArrayList();
120 private final DatastoreContext datastoreContext;
122 private SchemaContext schemaContext;
124 private ActorRef createSnapshotTransaction;
126 private int createSnapshotTransactionCounter;
128 private final ShardCommitCoordinator commitCoordinator;
130 private final long transactionCommitTimeout;
132 private Cancellable txCommitTimeoutCheckSchedule;
135 * Coordinates persistence recovery on startup.
137 private ShardRecoveryCoordinator recoveryCoordinator;
138 private List<Object> currentLogRecoveryBatch;
140 private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
142 protected Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
143 DatastoreContext datastoreContext, SchemaContext schemaContext) {
144 super(name.toString(), mapPeerAddresses(peerAddresses),
145 Optional.of(datastoreContext.getShardRaftConfig()));
148 this.datastoreContext = datastoreContext;
149 this.schemaContext = schemaContext;
151 String setting = System.getProperty("shard.persistent");
153 this.persistent = !"false".equals(setting);
155 LOG.info("Shard created : {} persistent : {}", name, persistent);
157 store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
158 datastoreContext.getDataStoreProperties());
160 if(schemaContext != null) {
161 store.onGlobalContextUpdated(schemaContext);
164 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
165 datastoreContext.getDataStoreMXBeanType());
166 shardMBean.setDataStoreExecutor(store.getDomStoreExecutor());
167 shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
169 if (isMetricsCaptureEnabled()) {
170 getContext().become(new MeteringBehavior(this));
173 commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
174 datastoreContext.getShardTransactionCommitQueueCapacity());
176 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
177 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
180 private static Map<String, String> mapPeerAddresses(
181 Map<ShardIdentifier, String> peerAddresses) {
182 Map<String, String> map = new HashMap<>();
184 for (Map.Entry<ShardIdentifier, String> entry : peerAddresses
186 map.put(entry.getKey().toString(), entry.getValue());
192 public static Props props(final ShardIdentifier name,
193 final Map<ShardIdentifier, String> peerAddresses,
194 DatastoreContext datastoreContext, SchemaContext schemaContext) {
195 Preconditions.checkNotNull(name, "name should not be null");
196 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
197 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
198 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
200 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
204 public void postStop() {
207 if(txCommitTimeoutCheckSchedule != null) {
208 txCommitTimeoutCheckSchedule.cancel();
213 public void onReceiveRecover(Object message) {
214 if(LOG.isDebugEnabled()) {
215 LOG.debug("onReceiveRecover: Received message {} from {}",
216 message.getClass().toString(),
220 if (message instanceof RecoveryFailure){
221 LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
223 // Even though recovery failed, we still need to finish our recovery, eg send the
224 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
225 onRecoveryComplete();
227 super.onReceiveRecover(message);
232 public void onReceiveCommand(Object message) {
233 if(LOG.isDebugEnabled()) {
234 LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
237 if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
238 handleReadDataReply(message);
239 } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
240 handleCreateTransaction(message);
241 } else if(message instanceof ForwardedReadyTransaction) {
242 handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
243 } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
244 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
245 } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
246 handleCommitTransaction(CommitTransaction.fromSerializable(message));
247 } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
248 handleAbortTransaction(AbortTransaction.fromSerializable(message));
249 } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
250 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
251 } else if (message instanceof RegisterChangeListener) {
252 registerChangeListener((RegisterChangeListener) message);
253 } else if (message instanceof UpdateSchemaContext) {
254 updateSchemaContext((UpdateSchemaContext) message);
255 } else if (message instanceof PeerAddressResolved) {
256 PeerAddressResolved resolved = (PeerAddressResolved) message;
257 setPeerAddress(resolved.getPeerId().toString(),
258 resolved.getPeerAddress());
259 } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
260 handleTransactionCommitTimeoutCheck();
262 super.onReceiveCommand(message);
266 private void handleTransactionCommitTimeoutCheck() {
267 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
268 if(cohortEntry != null) {
269 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
270 if(elapsed > transactionCommitTimeout) {
271 LOG.warning("Current transaction {} has timed out after {} ms - aborting",
272 cohortEntry.getTransactionID(), transactionCommitTimeout);
274 doAbortTransaction(cohortEntry.getTransactionID(), null);
279 private void handleCommitTransaction(CommitTransaction commit) {
280 final String transactionID = commit.getTransactionID();
282 LOG.debug("Committing transaction {}", transactionID);
284 // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
286 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
287 if(cohortEntry == null) {
288 // We're not the current Tx - the Tx was likely expired b/c it took too long in
289 // between the canCommit and commit messages.
290 IllegalStateException ex = new IllegalStateException(
291 String.format("Cannot commit transaction %s - it is not the current transaction",
293 LOG.error(ex.getMessage());
294 shardMBean.incrementFailedTransactionsCount();
295 getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
299 // We perform the preCommit phase here atomically with the commit phase. This is an
300 // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
301 // coordination of preCommit across shards in case of failure but preCommit should not
302 // normally fail since we ensure only one concurrent 3-phase commit.
305 // We block on the future here so we don't have to worry about possibly accessing our
306 // state on a different thread outside of our dispatcher. Also, the data store
307 // currently uses a same thread executor anyway.
308 cohortEntry.getCohort().preCommit().get();
311 Shard.this.persistData(getSender(), transactionID,
312 new CompositeModificationPayload(cohortEntry.getModification().toSerializable()));
314 Shard.this.finishCommit(getSender(), transactionID);
316 } catch (InterruptedException | ExecutionException e) {
317 LOG.error(e, "An exception occurred while preCommitting transaction {}",
318 cohortEntry.getTransactionID());
319 shardMBean.incrementFailedTransactionsCount();
320 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
323 cohortEntry.updateLastAccessTime();
326 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
327 // With persistence enabled, this method is called via applyState by the leader strategy
328 // after the commit has been replicated to a majority of the followers.
330 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
331 if(cohortEntry == null) {
332 // The transaction is no longer the current commit. This can happen if the transaction
333 // was aborted prior, most likely due to timeout in the front-end. We need to finish
334 // committing the transaction though since it was successfully persisted and replicated
335 // however we can't use the original cohort b/c it was already preCommitted and may
336 // conflict with the current commit or may have been aborted so we commit with a new
338 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
339 if(cohortEntry != null) {
340 commitWithNewTransaction(cohortEntry.getModification());
341 sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
343 // This really shouldn't happen - it likely means that persistence or replication
344 // took so long to complete such that the cohort entry was expired from the cache.
345 IllegalStateException ex = new IllegalStateException(
346 String.format("Could not finish committing transaction %s - no CohortEntry found",
348 LOG.error(ex.getMessage());
349 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
355 LOG.debug("Finishing commit for transaction {}", cohortEntry.getTransactionID());
358 // We block on the future here so we don't have to worry about possibly accessing our
359 // state on a different thread outside of our dispatcher. Also, the data store
360 // currently uses a same thread executor anyway.
361 cohortEntry.getCohort().commit().get();
363 sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
365 shardMBean.incrementCommittedTransactionCount();
366 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
368 } catch (InterruptedException | ExecutionException e) {
369 sender.tell(new akka.actor.Status.Failure(e), getSelf());
371 LOG.error(e, "An exception occurred while committing transaction {}", transactionID);
372 shardMBean.incrementFailedTransactionsCount();
375 commitCoordinator.currentTransactionComplete(transactionID, true);
378 private void handleCanCommitTransaction(CanCommitTransaction canCommit) {
379 LOG.debug("Can committing transaction {}", canCommit.getTransactionID());
380 commitCoordinator.handleCanCommit(canCommit, getSender(), self());
383 private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
384 LOG.debug("Readying transaction {}", ready.getTransactionID());
386 // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
387 // commitCoordinator in preparation for the subsequent three phase commit initiated by
389 commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
390 ready.getModification());
392 // Return our actor path as we'll handle the three phase commit.
393 ReadyTransactionReply readyTransactionReply =
394 new ReadyTransactionReply(Serialization.serializedActorPath(self()));
396 ready.isReturnSerialized() ? readyTransactionReply.toSerializable() : readyTransactionReply,
400 private void handleAbortTransaction(AbortTransaction abort) {
401 doAbortTransaction(abort.getTransactionID(), getSender());
404 private void doAbortTransaction(String transactionID, final ActorRef sender) {
405 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
406 if(cohortEntry != null) {
407 LOG.debug("Aborting transaction {}", transactionID);
409 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
410 // aborted during replication in which case we may still commit locally if replication
412 commitCoordinator.currentTransactionComplete(transactionID, false);
414 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
415 final ActorRef self = getSelf();
417 Futures.addCallback(future, new FutureCallback<Void>() {
419 public void onSuccess(Void v) {
420 shardMBean.incrementAbortTransactionsCount();
423 sender.tell(new AbortTransactionReply().toSerializable(), self);
428 public void onFailure(Throwable t) {
429 LOG.error(t, "An exception happened during abort");
432 sender.tell(new akka.actor.Status.Failure(t), self);
439 private void handleCreateTransaction(Object message) {
441 createTransaction(CreateTransaction.fromSerializable(message));
442 } else if (getLeader() != null) {
443 getLeader().forward(message, getContext());
445 getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
446 "Could not find shard leader so transaction cannot be created. This typically happens" +
447 " when system is coming up or recovering and a leader is being elected. Try again" +
448 " later.")), getSelf());
452 private void handleReadDataReply(Object message) {
453 // This must be for install snapshot. Don't want to open this up and trigger
456 self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
459 createSnapshotTransaction = null;
461 // Send a PoisonPill instead of sending close transaction because we do not really need
463 getSender().tell(PoisonPill.getInstance(), self());
466 private void closeTransactionChain(CloseTransactionChain closeTransactionChain) {
467 DOMStoreTransactionChain chain =
468 transactionChains.remove(closeTransactionChain.getTransactionChainId());
475 private ActorRef createTypedTransactionActor(
477 ShardTransactionIdentifier transactionId,
478 String transactionChainId ) {
480 DOMStoreTransactionFactory factory = store;
482 if(!transactionChainId.isEmpty()) {
483 factory = transactionChains.get(transactionChainId);
485 DOMStoreTransactionChain transactionChain = store.createTransactionChain();
486 transactionChains.put(transactionChainId, transactionChain);
487 factory = transactionChain;
491 if(this.schemaContext == null){
492 throw new NullPointerException("schemaContext should not be null");
495 if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
497 shardMBean.incrementReadOnlyTransactionCount();
499 return getContext().actorOf(
500 ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
501 schemaContext,datastoreContext, shardMBean,
502 transactionId.getRemoteTransactionId()), transactionId.toString());
504 } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
506 shardMBean.incrementReadWriteTransactionCount();
508 return getContext().actorOf(
509 ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
510 schemaContext, datastoreContext, shardMBean,
511 transactionId.getRemoteTransactionId()), transactionId.toString());
514 } else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
516 shardMBean.incrementWriteOnlyTransactionCount();
518 return getContext().actorOf(
519 ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
520 schemaContext, datastoreContext, shardMBean,
521 transactionId.getRemoteTransactionId()), transactionId.toString());
523 throw new IllegalArgumentException(
524 "Shard="+name + ":CreateTransaction message has unidentified transaction type="
529 private void createTransaction(CreateTransaction createTransaction) {
530 createTransaction(createTransaction.getTransactionType(),
531 createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
534 private ActorRef createTransaction(int transactionType, String remoteTransactionId, String transactionChainId) {
536 ShardTransactionIdentifier transactionId =
537 ShardTransactionIdentifier.builder()
538 .remoteTransactionId(remoteTransactionId)
540 if(LOG.isDebugEnabled()) {
541 LOG.debug("Creating transaction : {} ", transactionId);
543 ActorRef transactionActor =
544 createTypedTransactionActor(transactionType, transactionId, transactionChainId);
547 .tell(new CreateTransactionReply(
548 Serialization.serializedActorPath(transactionActor),
549 remoteTransactionId).toSerializable(),
552 return transactionActor;
555 private void syncCommitTransaction(DOMStoreWriteTransaction transaction)
556 throws ExecutionException, InterruptedException {
557 DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
558 commitCohort.preCommit().get();
559 commitCohort.commit().get();
562 private void commitWithNewTransaction(Modification modification) {
563 DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
564 modification.apply(tx);
566 syncCommitTransaction(tx);
567 shardMBean.incrementCommittedTransactionCount();
568 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
569 } catch (InterruptedException | ExecutionException e) {
570 shardMBean.incrementFailedTransactionsCount();
571 LOG.error(e, "Failed to commit");
575 private void updateSchemaContext(UpdateSchemaContext message) {
576 this.schemaContext = message.getSchemaContext();
577 updateSchemaContext(message.getSchemaContext());
578 store.onGlobalContextUpdated(message.getSchemaContext());
582 void updateSchemaContext(SchemaContext schemaContext) {
583 store.onGlobalContextUpdated(schemaContext);
586 private void registerChangeListener(RegisterChangeListener registerChangeListener) {
588 LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
590 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
591 NormalizedNode<?, ?>>> registration;
593 registration = doChangeListenerRegistration(registerChangeListener);
595 LOG.debug("Shard is not the leader - delaying registration");
597 DelayedListenerRegistration delayedReg =
598 new DelayedListenerRegistration(registerChangeListener);
599 delayedListenerRegistrations.add(delayedReg);
600 registration = delayedReg;
603 ActorRef listenerRegistration = getContext().actorOf(
604 DataChangeListenerRegistration.props(registration));
606 LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
607 listenerRegistration.path());
609 getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()),getSelf());
612 private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
613 NormalizedNode<?, ?>>> doChangeListenerRegistration(
614 RegisterChangeListener registerChangeListener) {
616 ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
617 registerChangeListener.getDataChangeListenerPath());
619 // Notify the listener if notifications should be enabled or not
620 // If this shard is the leader then it will enable notifications else
622 dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
624 // Now store a reference to the data change listener so it can be notified
625 // at a later point if notifications should be enabled or disabled
626 dataChangeListeners.add(dataChangeListenerPath);
628 AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
629 new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
631 LOG.debug("Registering for path {}", registerChangeListener.getPath());
633 return store.registerChangeListener(registerChangeListener.getPath(), listener,
634 registerChangeListener.getScope());
637 private boolean isMetricsCaptureEnabled(){
638 CommonConfig config = new CommonConfig(getContext().system().settings().config());
639 return config.isMetricCaptureEnabled();
644 void startLogRecoveryBatch(int maxBatchSize) {
645 currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
647 if(LOG.isDebugEnabled()) {
648 LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
653 protected void appendRecoveredLogEntry(Payload data) {
654 if (data instanceof CompositeModificationPayload) {
655 currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
657 LOG.error("Unknown state received {} during recovery", data);
662 protected void applyRecoverySnapshot(ByteString snapshot) {
663 if(recoveryCoordinator == null) {
664 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
667 recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
669 if(LOG.isDebugEnabled()) {
670 LOG.debug("{} : submitted recovery sbapshot", persistenceId());
675 protected void applyCurrentLogRecoveryBatch() {
676 if(recoveryCoordinator == null) {
677 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
680 recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
682 if(LOG.isDebugEnabled()) {
683 LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
684 currentLogRecoveryBatch.size());
689 protected void onRecoveryComplete() {
690 if(recoveryCoordinator != null) {
691 Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
693 if(LOG.isDebugEnabled()) {
694 LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
697 for(DOMStoreWriteTransaction tx: txList) {
699 syncCommitTransaction(tx);
700 shardMBean.incrementCommittedTransactionCount();
701 } catch (InterruptedException | ExecutionException e) {
702 shardMBean.incrementFailedTransactionsCount();
703 LOG.error(e, "Failed to commit");
708 recoveryCoordinator = null;
709 currentLogRecoveryBatch = null;
710 updateJournalStats();
712 //notify shard manager
713 getContext().parent().tell(new ActorInitialized(), getSelf());
715 // Being paranoid here - this method should only be called once but just in case...
716 if(txCommitTimeoutCheckSchedule == null) {
717 // Schedule a message to be periodically sent to check if the current in-progress
718 // transaction should be expired and aborted.
719 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
720 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
721 period, period, getSelf(),
722 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
727 protected void applyState(ActorRef clientActor, String identifier, Object data) {
729 if (data instanceof CompositeModificationPayload) {
730 Object modification = ((CompositeModificationPayload) data).getModification();
732 if(modification == null) {
734 "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
735 identifier, clientActor != null ? clientActor.path().toString() : null);
736 } else if(clientActor == null) {
737 // There's no clientActor to which to send a commit reply so we must be applying
738 // replicated state from the leader.
739 commitWithNewTransaction(MutableCompositeModification.fromSerializable(
740 modification, schemaContext));
742 // This must be the OK to commit after replication consensus.
743 finishCommit(clientActor, identifier);
746 LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
747 data, data.getClass().getClassLoader(),
748 CompositeModificationPayload.class.getClassLoader());
751 updateJournalStats();
755 private void updateJournalStats() {
756 ReplicatedLogEntry lastLogEntry = getLastLogEntry();
758 if (lastLogEntry != null) {
759 shardMBean.setLastLogIndex(lastLogEntry.getIndex());
760 shardMBean.setLastLogTerm(lastLogEntry.getTerm());
763 shardMBean.setCommitIndex(getCommitIndex());
764 shardMBean.setLastApplied(getLastApplied());
768 protected void createSnapshot() {
769 if (createSnapshotTransaction == null) {
771 // Create a transaction. We are really going to treat the transaction as a worker
772 // so that this actor does not get block building the snapshot
773 createSnapshotTransaction = createTransaction(
774 TransactionProxy.TransactionType.READ_ONLY.ordinal(),
775 "createSnapshot" + ++createSnapshotTransactionCounter, "");
777 createSnapshotTransaction.tell(
778 new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
785 protected void applySnapshot(ByteString snapshot) {
786 // Since this will be done only on Recovery or when this actor is a Follower
787 // we can safely commit everything in here. We not need to worry about event notifications
788 // as they would have already been disabled on the follower
790 LOG.info("Applying snapshot");
792 DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
793 NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
794 NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext)
795 .decode(serializedNode);
797 // delete everything first
798 transaction.delete(YangInstanceIdentifier.builder().build());
800 // Add everything from the remote node back
801 transaction.write(YangInstanceIdentifier.builder().build(), node);
802 syncCommitTransaction(transaction);
803 } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
804 LOG.error(e, "An exception occurred when applying snapshot");
806 LOG.info("Done applying snapshot");
811 protected void onStateChanged() {
812 boolean isLeader = isLeader();
813 for (ActorSelection dataChangeListener : dataChangeListeners) {
814 dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
818 for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
819 if(!reg.isClosed()) {
820 reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
824 delayedListenerRegistrations.clear();
827 shardMBean.setRaftState(getRaftState().name());
828 shardMBean.setCurrentTerm(getCurrentTerm());
830 // If this actor is no longer the leader close all the transaction chains
832 for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
833 if(LOG.isDebugEnabled()) {
835 "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
836 entry.getKey(), getId());
838 entry.getValue().close();
841 transactionChains.clear();
845 @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
846 shardMBean.setLeader(newLeader);
849 @Override public String persistenceId() {
850 return this.name.toString();
853 private static class ShardCreator implements Creator<Shard> {
855 private static final long serialVersionUID = 1L;
857 final ShardIdentifier name;
858 final Map<ShardIdentifier, String> peerAddresses;
859 final DatastoreContext datastoreContext;
860 final SchemaContext schemaContext;
862 ShardCreator(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
863 DatastoreContext datastoreContext, SchemaContext schemaContext) {
865 this.peerAddresses = peerAddresses;
866 this.datastoreContext = datastoreContext;
867 this.schemaContext = schemaContext;
871 public Shard create() throws Exception {
872 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
877 InMemoryDOMDataStore getDataStore() {
882 ShardStats getShardMBean() {
886 private static class DelayedListenerRegistration implements
887 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
889 private volatile boolean closed;
891 private final RegisterChangeListener registerChangeListener;
893 private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
894 NormalizedNode<?, ?>>> delegate;
896 DelayedListenerRegistration(RegisterChangeListener registerChangeListener) {
897 this.registerChangeListener = registerChangeListener;
900 void setDelegate( ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
901 NormalizedNode<?, ?>>> registration) {
902 this.delegate = registration;
909 RegisterChangeListener getRegisterChangeListener() {
910 return registerChangeListener;
914 public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
915 return delegate != null ? delegate.getInstance() : null;
919 public void close() {
921 if(delegate != null) {