2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.Props;
15 import akka.event.Logging;
16 import akka.event.LoggingAdapter;
17 import akka.japi.Creator;
18 import akka.persistence.RecoveryFailure;
19 import akka.serialization.Serialization;
20 import com.google.common.annotations.VisibleForTesting;
21 import com.google.common.base.Optional;
22 import com.google.common.base.Preconditions;
23 import com.google.common.collect.Lists;
24 import com.google.common.util.concurrent.FutureCallback;
25 import com.google.common.util.concurrent.Futures;
26 import com.google.common.util.concurrent.ListenableFuture;
27 import java.io.IOException;
28 import java.util.Collection;
29 import java.util.HashMap;
30 import java.util.List;
32 import java.util.concurrent.ExecutionException;
33 import java.util.concurrent.TimeUnit;
34 import javax.annotation.Nonnull;
35 import org.opendaylight.controller.cluster.DataPersistenceProvider;
36 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
37 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
38 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
39 import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
40 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
41 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
42 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
43 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
44 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
45 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
46 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
47 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
48 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
49 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
50 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
51 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
52 import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
53 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
54 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
55 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
56 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
57 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
58 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
59 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
60 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
61 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
62 import org.opendaylight.controller.cluster.datastore.modification.Modification;
63 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
64 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
65 import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
66 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
67 import org.opendaylight.controller.cluster.raft.RaftActor;
68 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
69 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
70 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
71 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
72 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
73 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
74 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
75 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
76 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
77 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
78 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
79 import org.opendaylight.yangtools.concepts.ListenerRegistration;
80 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
81 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
82 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
83 import scala.concurrent.duration.Duration;
84 import scala.concurrent.duration.FiniteDuration;
87 * A Shard represents a portion of the logical data tree <br/>
89 * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
92 public class Shard extends RaftActor {
94 private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build();
96 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
99 static final String DEFAULT_NAME = "default";
101 // The state of this Shard
102 private final InMemoryDOMDataStore store;
104 private final LoggingAdapter LOG =
105 Logging.getLogger(getContext().system(), this);
107 /// The name of this shard
108 private final ShardIdentifier name;
110 private final ShardStats shardMBean;
112 private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
114 private final List<DelayedListenerRegistration> delayedListenerRegistrations =
115 Lists.newArrayList();
117 private final DatastoreContext datastoreContext;
119 private final DataPersistenceProvider dataPersistenceProvider;
121 private SchemaContext schemaContext;
123 private int createSnapshotTransactionCounter;
125 private final ShardCommitCoordinator commitCoordinator;
127 private final long transactionCommitTimeout;
129 private Cancellable txCommitTimeoutCheckSchedule;
131 private final Optional<ActorRef> roleChangeNotifier;
134 * Coordinates persistence recovery on startup.
136 private ShardRecoveryCoordinator recoveryCoordinator;
137 private List<Object> currentLogRecoveryBatch;
139 private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
141 protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
142 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
143 super(name.toString(), mapPeerAddresses(peerAddresses),
144 Optional.of(datastoreContext.getShardRaftConfig()));
147 this.datastoreContext = datastoreContext;
148 this.schemaContext = schemaContext;
149 this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
151 LOG.info("Shard created : {} persistent : {}", name, datastoreContext.isPersistent());
153 store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
154 datastoreContext.getDataStoreProperties());
156 if(schemaContext != null) {
157 store.onGlobalContextUpdated(schemaContext);
160 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
161 datastoreContext.getDataStoreMXBeanType());
162 shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
164 if (isMetricsCaptureEnabled()) {
165 getContext().become(new MeteringBehavior(this));
168 commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
169 datastoreContext.getShardTransactionCommitQueueCapacity());
171 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
172 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
174 // create a notifier actor for each cluster member
175 roleChangeNotifier = createRoleChangeNotifier(name.toString());
178 private static Map<String, String> mapPeerAddresses(
179 final Map<ShardIdentifier, String> peerAddresses) {
180 Map<String, String> map = new HashMap<>();
182 for (Map.Entry<ShardIdentifier, String> entry : peerAddresses
184 map.put(entry.getKey().toString(), entry.getValue());
190 public static Props props(final ShardIdentifier name,
191 final Map<ShardIdentifier, String> peerAddresses,
192 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
193 Preconditions.checkNotNull(name, "name should not be null");
194 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
195 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
196 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
198 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
201 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
202 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
203 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
204 return Optional.<ActorRef>of(shardRoleChangeNotifier);
208 public void postStop() {
211 if(txCommitTimeoutCheckSchedule != null) {
212 txCommitTimeoutCheckSchedule.cancel();
217 public void onReceiveRecover(final Object message) throws Exception {
218 if(LOG.isDebugEnabled()) {
219 LOG.debug("onReceiveRecover: Received message {} from {}",
220 message.getClass().toString(),
224 if (message instanceof RecoveryFailure){
225 LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
227 // Even though recovery failed, we still need to finish our recovery, eg send the
228 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
229 onRecoveryComplete();
231 super.onReceiveRecover(message);
236 public void onReceiveCommand(final Object message) throws Exception {
237 if(LOG.isDebugEnabled()) {
238 LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
241 if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
242 handleCreateTransaction(message);
243 } else if(message instanceof ForwardedReadyTransaction) {
244 handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
245 } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
246 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
247 } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
248 handleCommitTransaction(CommitTransaction.fromSerializable(message));
249 } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
250 handleAbortTransaction(AbortTransaction.fromSerializable(message));
251 } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
252 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
253 } else if (message instanceof RegisterChangeListener) {
254 registerChangeListener((RegisterChangeListener) message);
255 } else if (message instanceof UpdateSchemaContext) {
256 updateSchemaContext((UpdateSchemaContext) message);
257 } else if (message instanceof PeerAddressResolved) {
258 PeerAddressResolved resolved = (PeerAddressResolved) message;
259 setPeerAddress(resolved.getPeerId().toString(),
260 resolved.getPeerAddress());
261 } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
262 handleTransactionCommitTimeoutCheck();
264 super.onReceiveCommand(message);
269 protected Optional<ActorRef> getRoleChangeNotifier() {
270 return roleChangeNotifier;
273 private void handleTransactionCommitTimeoutCheck() {
274 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
275 if(cohortEntry != null) {
276 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
277 if(elapsed > transactionCommitTimeout) {
278 LOG.warning("Current transaction {} has timed out after {} ms - aborting",
279 cohortEntry.getTransactionID(), transactionCommitTimeout);
281 doAbortTransaction(cohortEntry.getTransactionID(), null);
286 private void handleCommitTransaction(final CommitTransaction commit) {
287 final String transactionID = commit.getTransactionID();
289 LOG.debug("Committing transaction {}", transactionID);
291 // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
293 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
294 if(cohortEntry == null) {
295 // We're not the current Tx - the Tx was likely expired b/c it took too long in
296 // between the canCommit and commit messages.
297 IllegalStateException ex = new IllegalStateException(
298 String.format("Cannot commit transaction %s - it is not the current transaction",
300 LOG.error(ex.getMessage());
301 shardMBean.incrementFailedTransactionsCount();
302 getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
306 // We perform the preCommit phase here atomically with the commit phase. This is an
307 // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
308 // coordination of preCommit across shards in case of failure but preCommit should not
309 // normally fail since we ensure only one concurrent 3-phase commit.
312 // We block on the future here so we don't have to worry about possibly accessing our
313 // state on a different thread outside of our dispatcher. Also, the data store
314 // currently uses a same thread executor anyway.
315 cohortEntry.getCohort().preCommit().get();
317 // If we do not have any followers and we are not using persistence we can
318 // apply modification to the state immediately
319 if(!hasFollowers() && !persistence().isRecoveryApplicable()){
320 applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
322 Shard.this.persistData(getSender(), transactionID,
323 new ModificationPayload(cohortEntry.getModification()));
325 } catch (InterruptedException | ExecutionException | IOException e) {
326 LOG.error(e, "An exception occurred while preCommitting transaction {}",
327 cohortEntry.getTransactionID());
328 shardMBean.incrementFailedTransactionsCount();
329 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
332 cohortEntry.updateLastAccessTime();
335 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
336 // With persistence enabled, this method is called via applyState by the leader strategy
337 // after the commit has been replicated to a majority of the followers.
339 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
340 if(cohortEntry == null) {
341 // The transaction is no longer the current commit. This can happen if the transaction
342 // was aborted prior, most likely due to timeout in the front-end. We need to finish
343 // committing the transaction though since it was successfully persisted and replicated
344 // however we can't use the original cohort b/c it was already preCommitted and may
345 // conflict with the current commit or may have been aborted so we commit with a new
347 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
348 if(cohortEntry != null) {
349 commitWithNewTransaction(cohortEntry.getModification());
350 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
352 // This really shouldn't happen - it likely means that persistence or replication
353 // took so long to complete such that the cohort entry was expired from the cache.
354 IllegalStateException ex = new IllegalStateException(
355 String.format("Could not finish committing transaction %s - no CohortEntry found",
357 LOG.error(ex.getMessage());
358 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
364 LOG.debug("Finishing commit for transaction {}", cohortEntry.getTransactionID());
367 // We block on the future here so we don't have to worry about possibly accessing our
368 // state on a different thread outside of our dispatcher. Also, the data store
369 // currently uses a same thread executor anyway.
370 cohortEntry.getCohort().commit().get();
372 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
374 shardMBean.incrementCommittedTransactionCount();
375 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
377 } catch (InterruptedException | ExecutionException e) {
378 sender.tell(new akka.actor.Status.Failure(e), getSelf());
380 LOG.error(e, "An exception occurred while committing transaction {}", transactionID);
381 shardMBean.incrementFailedTransactionsCount();
384 commitCoordinator.currentTransactionComplete(transactionID, true);
387 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
388 LOG.debug("Can committing transaction {}", canCommit.getTransactionID());
389 commitCoordinator.handleCanCommit(canCommit, getSender(), self());
392 private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
393 LOG.debug("Readying transaction {}, client version {}", ready.getTransactionID(),
394 ready.getTxnClientVersion());
396 // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
397 // commitCoordinator in preparation for the subsequent three phase commit initiated by
399 commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
400 ready.getModification());
402 // Return our actor path as we'll handle the three phase commit, except if the Tx client
403 // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
404 // node. In that case, the subsequent 3-phase commit messages won't contain the
405 // transactionId so to maintain backwards compatibility, we create a separate cohort actor
406 // to provide the compatible behavior.
407 ActorRef replyActorPath = self();
408 if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
409 LOG.debug("Creating BackwardsCompatibleThreePhaseCommitCohort");
410 replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
411 ready.getTransactionID()));
414 ReadyTransactionReply readyTransactionReply = new ReadyTransactionReply(
415 Serialization.serializedActorPath(replyActorPath));
416 getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
417 readyTransactionReply, getSelf());
420 private void handleAbortTransaction(final AbortTransaction abort) {
421 doAbortTransaction(abort.getTransactionID(), getSender());
424 void doAbortTransaction(final String transactionID, final ActorRef sender) {
425 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
426 if(cohortEntry != null) {
427 LOG.debug("Aborting transaction {}", transactionID);
429 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
430 // aborted during replication in which case we may still commit locally if replication
432 commitCoordinator.currentTransactionComplete(transactionID, false);
434 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
435 final ActorRef self = getSelf();
437 Futures.addCallback(future, new FutureCallback<Void>() {
439 public void onSuccess(final Void v) {
440 shardMBean.incrementAbortTransactionsCount();
443 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
448 public void onFailure(final Throwable t) {
449 LOG.error(t, "An exception happened during abort");
452 sender.tell(new akka.actor.Status.Failure(t), self);
459 private void handleCreateTransaction(final Object message) {
461 createTransaction(CreateTransaction.fromSerializable(message));
462 } else if (getLeader() != null) {
463 getLeader().forward(message, getContext());
465 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
466 "Could not find shard leader so transaction cannot be created. This typically happens" +
467 " when the system is coming up or recovering and a leader is being elected. Try again" +
468 " later.")), getSelf());
472 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
473 DOMStoreTransactionChain chain =
474 transactionChains.remove(closeTransactionChain.getTransactionChainId());
481 private ActorRef createTypedTransactionActor(int transactionType,
482 ShardTransactionIdentifier transactionId, String transactionChainId,
483 short clientVersion ) {
485 DOMStoreTransactionFactory factory = store;
487 if(!transactionChainId.isEmpty()) {
488 factory = transactionChains.get(transactionChainId);
490 DOMStoreTransactionChain transactionChain = store.createTransactionChain();
491 transactionChains.put(transactionChainId, transactionChain);
492 factory = transactionChain;
496 if(this.schemaContext == null) {
497 throw new IllegalStateException("SchemaContext is not set");
500 if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
502 shardMBean.incrementReadOnlyTransactionCount();
504 return getContext().actorOf(
505 ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
506 schemaContext,datastoreContext, shardMBean,
507 transactionId.getRemoteTransactionId(), clientVersion),
508 transactionId.toString());
510 } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
512 shardMBean.incrementReadWriteTransactionCount();
514 return getContext().actorOf(
515 ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
516 schemaContext, datastoreContext, shardMBean,
517 transactionId.getRemoteTransactionId(), clientVersion),
518 transactionId.toString());
521 } else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
523 shardMBean.incrementWriteOnlyTransactionCount();
525 return getContext().actorOf(
526 ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
527 schemaContext, datastoreContext, shardMBean,
528 transactionId.getRemoteTransactionId(), clientVersion),
529 transactionId.toString());
531 throw new IllegalArgumentException(
532 "Shard="+name + ":CreateTransaction message has unidentified transaction type="
537 private void createTransaction(CreateTransaction createTransaction) {
539 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
540 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
541 createTransaction.getVersion());
543 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
544 createTransaction.getTransactionId()).toSerializable(), getSelf());
545 } catch (Exception e) {
546 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
550 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
551 String transactionChainId, short clientVersion) {
553 ShardTransactionIdentifier transactionId =
554 ShardTransactionIdentifier.builder()
555 .remoteTransactionId(remoteTransactionId)
558 if(LOG.isDebugEnabled()) {
559 LOG.debug("Creating transaction : {} ", transactionId);
562 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
563 transactionChainId, clientVersion);
565 return transactionActor;
568 private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
569 throws ExecutionException, InterruptedException {
570 DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
571 commitCohort.preCommit().get();
572 commitCohort.commit().get();
575 private void commitWithNewTransaction(final Modification modification) {
576 DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
577 modification.apply(tx);
579 syncCommitTransaction(tx);
580 shardMBean.incrementCommittedTransactionCount();
581 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
582 } catch (InterruptedException | ExecutionException e) {
583 shardMBean.incrementFailedTransactionsCount();
584 LOG.error(e, "Failed to commit");
588 private void updateSchemaContext(final UpdateSchemaContext message) {
589 this.schemaContext = message.getSchemaContext();
590 updateSchemaContext(message.getSchemaContext());
591 store.onGlobalContextUpdated(message.getSchemaContext());
595 void updateSchemaContext(final SchemaContext schemaContext) {
596 store.onGlobalContextUpdated(schemaContext);
599 private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
601 LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
603 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
604 NormalizedNode<?, ?>>> registration;
606 registration = doChangeListenerRegistration(registerChangeListener);
608 LOG.debug("Shard is not the leader - delaying registration");
610 DelayedListenerRegistration delayedReg =
611 new DelayedListenerRegistration(registerChangeListener);
612 delayedListenerRegistrations.add(delayedReg);
613 registration = delayedReg;
616 ActorRef listenerRegistration = getContext().actorOf(
617 DataChangeListenerRegistration.props(registration));
619 LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
620 listenerRegistration.path());
622 getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf());
625 private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
626 NormalizedNode<?, ?>>> doChangeListenerRegistration(
627 final RegisterChangeListener registerChangeListener) {
629 ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
630 registerChangeListener.getDataChangeListenerPath());
632 // Notify the listener if notifications should be enabled or not
633 // If this shard is the leader then it will enable notifications else
635 dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
637 // Now store a reference to the data change listener so it can be notified
638 // at a later point if notifications should be enabled or disabled
639 dataChangeListeners.add(dataChangeListenerPath);
641 AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
642 new DataChangeListenerProxy(dataChangeListenerPath);
644 LOG.debug("Registering for path {}", registerChangeListener.getPath());
646 return store.registerChangeListener(registerChangeListener.getPath(), listener,
647 registerChangeListener.getScope());
650 private boolean isMetricsCaptureEnabled(){
651 CommonConfig config = new CommonConfig(getContext().system().settings().config());
652 return config.isMetricCaptureEnabled();
657 void startLogRecoveryBatch(final int maxBatchSize) {
658 currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
660 if(LOG.isDebugEnabled()) {
661 LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
666 protected void appendRecoveredLogEntry(final Payload data) {
667 if(data instanceof ModificationPayload) {
669 currentLogRecoveryBatch.add(((ModificationPayload) data).getModification());
670 } catch (ClassNotFoundException | IOException e) {
671 LOG.error(e, "Error extracting ModificationPayload");
673 } else if (data instanceof CompositeModificationPayload) {
674 currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
675 } else if (data instanceof CompositeModificationByteStringPayload) {
676 currentLogRecoveryBatch.add(((CompositeModificationByteStringPayload) data).getModification());
678 LOG.error("Unknown state received {} during recovery", data);
683 protected void applyRecoverySnapshot(final byte[] snapshotBytes) {
684 if(recoveryCoordinator == null) {
685 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
688 recoveryCoordinator.submit(snapshotBytes, store.newWriteOnlyTransaction());
690 if(LOG.isDebugEnabled()) {
691 LOG.debug("{} : submitted recovery sbapshot", persistenceId());
696 protected void applyCurrentLogRecoveryBatch() {
697 if(recoveryCoordinator == null) {
698 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
701 recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
703 if(LOG.isDebugEnabled()) {
704 LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
705 currentLogRecoveryBatch.size());
710 protected void onRecoveryComplete() {
711 if(recoveryCoordinator != null) {
712 Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
714 if(LOG.isDebugEnabled()) {
715 LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
718 for(DOMStoreWriteTransaction tx: txList) {
720 syncCommitTransaction(tx);
721 shardMBean.incrementCommittedTransactionCount();
722 } catch (InterruptedException | ExecutionException e) {
723 shardMBean.incrementFailedTransactionsCount();
724 LOG.error(e, "Failed to commit");
729 recoveryCoordinator = null;
730 currentLogRecoveryBatch = null;
731 updateJournalStats();
733 //notify shard manager
734 getContext().parent().tell(new ActorInitialized(), getSelf());
736 // Being paranoid here - this method should only be called once but just in case...
737 if(txCommitTimeoutCheckSchedule == null) {
738 // Schedule a message to be periodically sent to check if the current in-progress
739 // transaction should be expired and aborted.
740 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
741 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
742 period, period, getSelf(),
743 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
748 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
750 if(data instanceof ModificationPayload) {
752 applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
753 } catch (ClassNotFoundException | IOException e) {
754 LOG.error(e, "Error extracting ModificationPayload");
757 else if (data instanceof CompositeModificationPayload) {
758 Object modification = ((CompositeModificationPayload) data).getModification();
760 applyModificationToState(clientActor, identifier, modification);
761 } else if(data instanceof CompositeModificationByteStringPayload ){
762 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
764 applyModificationToState(clientActor, identifier, modification);
766 LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
767 data, data.getClass().getClassLoader(),
768 CompositeModificationPayload.class.getClassLoader());
771 updateJournalStats();
775 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
776 if(modification == null) {
778 "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
779 identifier, clientActor != null ? clientActor.path().toString() : null);
780 } else if(clientActor == null) {
781 // There's no clientActor to which to send a commit reply so we must be applying
782 // replicated state from the leader.
783 commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
785 // This must be the OK to commit after replication consensus.
786 finishCommit(clientActor, identifier);
790 private void updateJournalStats() {
791 ReplicatedLogEntry lastLogEntry = getLastLogEntry();
793 if (lastLogEntry != null) {
794 shardMBean.setLastLogIndex(lastLogEntry.getIndex());
795 shardMBean.setLastLogTerm(lastLogEntry.getTerm());
798 shardMBean.setCommitIndex(getCommitIndex());
799 shardMBean.setLastApplied(getLastApplied());
800 shardMBean.setInMemoryJournalDataSize(getRaftActorContext().getReplicatedLog().dataSize());
804 protected void createSnapshot() {
805 // Create a transaction actor. We are really going to treat the transaction as a worker
806 // so that this actor does not get block building the snapshot. THe transaction actor will
807 // after processing the CreateSnapshot message.
809 ActorRef createSnapshotTransaction = createTransaction(
810 TransactionProxy.TransactionType.READ_ONLY.ordinal(),
811 "createSnapshot" + ++createSnapshotTransactionCounter, "",
812 DataStoreVersions.CURRENT_VERSION);
814 createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, self());
819 protected void applySnapshot(final byte[] snapshotBytes) {
820 // Since this will be done only on Recovery or when this actor is a Follower
821 // we can safely commit everything in here. We not need to worry about event notifications
822 // as they would have already been disabled on the follower
824 LOG.info("Applying snapshot");
826 DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
828 NormalizedNode<?, ?> node = SerializationUtils.deserializeNormalizedNode(snapshotBytes);
830 // delete everything first
831 transaction.delete(DATASTORE_ROOT);
833 // Add everything from the remote node back
834 transaction.write(DATASTORE_ROOT, node);
835 syncCommitTransaction(transaction);
836 } catch (InterruptedException | ExecutionException e) {
837 LOG.error(e, "An exception occurred when applying snapshot");
839 LOG.info("Done applying snapshot");
844 protected void onStateChanged() {
845 boolean isLeader = isLeader();
846 for (ActorSelection dataChangeListener : dataChangeListeners) {
847 dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
851 for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
852 if(!reg.isClosed()) {
853 reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
857 delayedListenerRegistrations.clear();
860 shardMBean.setRaftState(getRaftState().name());
861 shardMBean.setCurrentTerm(getCurrentTerm());
863 // If this actor is no longer the leader close all the transaction chains
865 for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
866 if(LOG.isDebugEnabled()) {
868 "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
869 entry.getKey(), getId());
871 entry.getValue().close();
874 transactionChains.clear();
879 protected DataPersistenceProvider persistence() {
880 return dataPersistenceProvider;
883 @Override protected void onLeaderChanged(final String oldLeader, final String newLeader) {
884 shardMBean.setLeader(newLeader);
887 @Override public String persistenceId() {
888 return this.name.toString();
892 DataPersistenceProvider getDataPersistenceProvider() {
893 return dataPersistenceProvider;
896 private static class ShardCreator implements Creator<Shard> {
898 private static final long serialVersionUID = 1L;
900 final ShardIdentifier name;
901 final Map<ShardIdentifier, String> peerAddresses;
902 final DatastoreContext datastoreContext;
903 final SchemaContext schemaContext;
905 ShardCreator(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
906 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
908 this.peerAddresses = peerAddresses;
909 this.datastoreContext = datastoreContext;
910 this.schemaContext = schemaContext;
914 public Shard create() throws Exception {
915 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
920 InMemoryDOMDataStore getDataStore() {
925 ShardStats getShardMBean() {
929 private static class DelayedListenerRegistration implements
930 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
932 private volatile boolean closed;
934 private final RegisterChangeListener registerChangeListener;
936 private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
937 NormalizedNode<?, ?>>> delegate;
939 DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
940 this.registerChangeListener = registerChangeListener;
943 void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
944 NormalizedNode<?, ?>>> registration) {
945 this.delegate = registration;
952 RegisterChangeListener getRegisterChangeListener() {
953 return registerChangeListener;
957 public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
958 return delegate != null ? delegate.getInstance() : null;
962 public void close() {
964 if(delegate != null) {