2 * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
4 * This program and the accompanying materials are made available under the
5 * terms of the Eclipse Public License v1.0 which accompanies this distribution,
6 * and is available at http://www.eclipse.org/legal/epl-v10.html
9 package org.opendaylight.controller.cluster.datastore;
11 import akka.actor.ActorRef;
12 import akka.actor.ActorSelection;
13 import akka.actor.Cancellable;
14 import akka.actor.PoisonPill;
15 import akka.actor.Props;
16 import akka.event.Logging;
17 import akka.event.LoggingAdapter;
18 import akka.japi.Creator;
19 import akka.persistence.RecoveryFailure;
20 import akka.serialization.Serialization;
21 import com.google.common.annotations.VisibleForTesting;
22 import com.google.common.base.Optional;
23 import com.google.common.base.Preconditions;
24 import com.google.common.collect.Lists;
25 import com.google.common.util.concurrent.FutureCallback;
26 import com.google.common.util.concurrent.Futures;
27 import com.google.common.util.concurrent.ListenableFuture;
28 import com.google.protobuf.ByteString;
29 import com.google.protobuf.InvalidProtocolBufferException;
30 import java.util.Collection;
31 import java.util.HashMap;
32 import java.util.List;
34 import java.util.concurrent.ExecutionException;
35 import java.util.concurrent.TimeUnit;
36 import javax.annotation.Nonnull;
37 import org.opendaylight.controller.cluster.DataPersistenceProvider;
38 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
39 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
40 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
41 import org.opendaylight.controller.cluster.datastore.compat.BackwardsCompatibleThreePhaseCommitCohort;
42 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
43 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
44 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
45 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
46 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
47 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
48 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
49 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
50 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
51 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
52 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
53 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
54 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
55 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
56 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
57 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
58 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
59 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
60 import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
61 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
62 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
63 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
64 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
65 import org.opendaylight.controller.cluster.datastore.modification.Modification;
66 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
67 import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
68 import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
69 import org.opendaylight.controller.cluster.raft.RaftActor;
70 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
71 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
72 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
73 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
74 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
75 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
76 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
77 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
78 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
79 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
80 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
81 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
82 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
83 import org.opendaylight.yangtools.concepts.ListenerRegistration;
84 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
85 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
86 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
87 import scala.concurrent.duration.Duration;
88 import scala.concurrent.duration.FiniteDuration;
91 * A Shard represents a portion of the logical data tree <br/>
93 * Our Shard uses InMemoryDataStore as it's internal representation and delegates all requests it
96 public class Shard extends RaftActor {
98 private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
101 static final String DEFAULT_NAME = "default";
103 // The state of this Shard
104 private final InMemoryDOMDataStore store;
106 private final LoggingAdapter LOG =
107 Logging.getLogger(getContext().system(), this);
109 /// The name of this shard
110 private final ShardIdentifier name;
112 private final ShardStats shardMBean;
114 private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
116 private final List<DelayedListenerRegistration> delayedListenerRegistrations =
117 Lists.newArrayList();
119 private final DatastoreContext datastoreContext;
121 private final DataPersistenceProvider dataPersistenceProvider;
123 private SchemaContext schemaContext;
125 private ActorRef createSnapshotTransaction;
127 private int createSnapshotTransactionCounter;
129 private final ShardCommitCoordinator commitCoordinator;
131 private final long transactionCommitTimeout;
133 private Cancellable txCommitTimeoutCheckSchedule;
135 private final Optional<ActorRef> roleChangeNotifier;
138 * Coordinates persistence recovery on startup.
140 private ShardRecoveryCoordinator recoveryCoordinator;
141 private List<Object> currentLogRecoveryBatch;
143 private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
145 protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
146 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
147 super(name.toString(), mapPeerAddresses(peerAddresses),
148 Optional.of(datastoreContext.getShardRaftConfig()));
151 this.datastoreContext = datastoreContext;
152 this.schemaContext = schemaContext;
153 this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
155 LOG.info("Shard created : {} persistent : {}", name, datastoreContext.isPersistent());
157 store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
158 datastoreContext.getDataStoreProperties());
160 if(schemaContext != null) {
161 store.onGlobalContextUpdated(schemaContext);
164 shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
165 datastoreContext.getDataStoreMXBeanType());
166 shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
168 if (isMetricsCaptureEnabled()) {
169 getContext().become(new MeteringBehavior(this));
172 commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
173 datastoreContext.getShardTransactionCommitQueueCapacity());
175 transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
176 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
178 // create a notifier actor for each cluster member
179 roleChangeNotifier = createRoleChangeNotifier(name.toString());
182 private static Map<String, String> mapPeerAddresses(
183 final Map<ShardIdentifier, String> peerAddresses) {
184 Map<String, String> map = new HashMap<>();
186 for (Map.Entry<ShardIdentifier, String> entry : peerAddresses
188 map.put(entry.getKey().toString(), entry.getValue());
194 public static Props props(final ShardIdentifier name,
195 final Map<ShardIdentifier, String> peerAddresses,
196 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
197 Preconditions.checkNotNull(name, "name should not be null");
198 Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
199 Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
200 Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
202 return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
205 private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
206 ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
207 RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
208 return Optional.<ActorRef>of(shardRoleChangeNotifier);
212 public void postStop() {
215 if(txCommitTimeoutCheckSchedule != null) {
216 txCommitTimeoutCheckSchedule.cancel();
221 public void onReceiveRecover(final Object message) throws Exception {
222 if(LOG.isDebugEnabled()) {
223 LOG.debug("onReceiveRecover: Received message {} from {}",
224 message.getClass().toString(),
228 if (message instanceof RecoveryFailure){
229 LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
231 // Even though recovery failed, we still need to finish our recovery, eg send the
232 // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
233 onRecoveryComplete();
235 super.onReceiveRecover(message);
240 public void onReceiveCommand(final Object message) throws Exception {
241 if(LOG.isDebugEnabled()) {
242 LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
245 if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
246 handleReadDataReply(message);
247 } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
248 handleCreateTransaction(message);
249 } else if(message instanceof ForwardedReadyTransaction) {
250 handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
251 } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
252 handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
253 } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
254 handleCommitTransaction(CommitTransaction.fromSerializable(message));
255 } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
256 handleAbortTransaction(AbortTransaction.fromSerializable(message));
257 } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
258 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
259 } else if (message instanceof RegisterChangeListener) {
260 registerChangeListener((RegisterChangeListener) message);
261 } else if (message instanceof UpdateSchemaContext) {
262 updateSchemaContext((UpdateSchemaContext) message);
263 } else if (message instanceof PeerAddressResolved) {
264 PeerAddressResolved resolved = (PeerAddressResolved) message;
265 setPeerAddress(resolved.getPeerId().toString(),
266 resolved.getPeerAddress());
267 } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
268 handleTransactionCommitTimeoutCheck();
270 super.onReceiveCommand(message);
275 protected Optional<ActorRef> getRoleChangeNotifier() {
276 return roleChangeNotifier;
279 private void handleTransactionCommitTimeoutCheck() {
280 CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
281 if(cohortEntry != null) {
282 long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
283 if(elapsed > transactionCommitTimeout) {
284 LOG.warning("Current transaction {} has timed out after {} ms - aborting",
285 cohortEntry.getTransactionID(), transactionCommitTimeout);
287 doAbortTransaction(cohortEntry.getTransactionID(), null);
292 private void handleCommitTransaction(final CommitTransaction commit) {
293 final String transactionID = commit.getTransactionID();
295 LOG.debug("Committing transaction {}", transactionID);
297 // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
299 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
300 if(cohortEntry == null) {
301 // We're not the current Tx - the Tx was likely expired b/c it took too long in
302 // between the canCommit and commit messages.
303 IllegalStateException ex = new IllegalStateException(
304 String.format("Cannot commit transaction %s - it is not the current transaction",
306 LOG.error(ex.getMessage());
307 shardMBean.incrementFailedTransactionsCount();
308 getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
312 // We perform the preCommit phase here atomically with the commit phase. This is an
313 // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
314 // coordination of preCommit across shards in case of failure but preCommit should not
315 // normally fail since we ensure only one concurrent 3-phase commit.
318 // We block on the future here so we don't have to worry about possibly accessing our
319 // state on a different thread outside of our dispatcher. Also, the data store
320 // currently uses a same thread executor anyway.
321 cohortEntry.getCohort().preCommit().get();
323 // If we do not have any followers and we are not using persistence we can
324 // apply modification to the state immediately
325 if(!hasFollowers() && !persistence().isRecoveryApplicable()){
326 applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
328 Shard.this.persistData(getSender(), transactionID,
329 new CompositeModificationByteStringPayload(cohortEntry.getModification().toSerializable()));
331 } catch (InterruptedException | ExecutionException e) {
332 LOG.error(e, "An exception occurred while preCommitting transaction {}",
333 cohortEntry.getTransactionID());
334 shardMBean.incrementFailedTransactionsCount();
335 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
338 cohortEntry.updateLastAccessTime();
341 private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
342 // With persistence enabled, this method is called via applyState by the leader strategy
343 // after the commit has been replicated to a majority of the followers.
345 CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
346 if(cohortEntry == null) {
347 // The transaction is no longer the current commit. This can happen if the transaction
348 // was aborted prior, most likely due to timeout in the front-end. We need to finish
349 // committing the transaction though since it was successfully persisted and replicated
350 // however we can't use the original cohort b/c it was already preCommitted and may
351 // conflict with the current commit or may have been aborted so we commit with a new
353 cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
354 if(cohortEntry != null) {
355 commitWithNewTransaction(cohortEntry.getModification());
356 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
358 // This really shouldn't happen - it likely means that persistence or replication
359 // took so long to complete such that the cohort entry was expired from the cache.
360 IllegalStateException ex = new IllegalStateException(
361 String.format("Could not finish committing transaction %s - no CohortEntry found",
363 LOG.error(ex.getMessage());
364 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
370 LOG.debug("Finishing commit for transaction {}", cohortEntry.getTransactionID());
373 // We block on the future here so we don't have to worry about possibly accessing our
374 // state on a different thread outside of our dispatcher. Also, the data store
375 // currently uses a same thread executor anyway.
376 cohortEntry.getCohort().commit().get();
378 sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
380 shardMBean.incrementCommittedTransactionCount();
381 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
383 } catch (InterruptedException | ExecutionException e) {
384 sender.tell(new akka.actor.Status.Failure(e), getSelf());
386 LOG.error(e, "An exception occurred while committing transaction {}", transactionID);
387 shardMBean.incrementFailedTransactionsCount();
390 commitCoordinator.currentTransactionComplete(transactionID, true);
393 private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
394 LOG.debug("Can committing transaction {}", canCommit.getTransactionID());
395 commitCoordinator.handleCanCommit(canCommit, getSender(), self());
398 private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
399 LOG.debug("Readying transaction {}, client version {}", ready.getTransactionID(),
400 ready.getTxnClientVersion());
402 // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
403 // commitCoordinator in preparation for the subsequent three phase commit initiated by
405 commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
406 ready.getModification());
408 // Return our actor path as we'll handle the three phase commit, except if the Tx client
409 // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
410 // node. In that case, the subsequent 3-phase commit messages won't contain the
411 // transactionId so to maintain backwards compatibility, we create a separate cohort actor
412 // to provide the compatible behavior.
413 ActorRef replyActorPath = self();
414 if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
415 LOG.debug("Creating BackwardsCompatibleThreePhaseCommitCohort");
416 replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
417 ready.getTransactionID()));
420 ReadyTransactionReply readyTransactionReply = new ReadyTransactionReply(
421 Serialization.serializedActorPath(replyActorPath));
422 getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
423 readyTransactionReply, getSelf());
426 private void handleAbortTransaction(final AbortTransaction abort) {
427 doAbortTransaction(abort.getTransactionID(), getSender());
430 void doAbortTransaction(final String transactionID, final ActorRef sender) {
431 final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
432 if(cohortEntry != null) {
433 LOG.debug("Aborting transaction {}", transactionID);
435 // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
436 // aborted during replication in which case we may still commit locally if replication
438 commitCoordinator.currentTransactionComplete(transactionID, false);
440 final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
441 final ActorRef self = getSelf();
443 Futures.addCallback(future, new FutureCallback<Void>() {
445 public void onSuccess(final Void v) {
446 shardMBean.incrementAbortTransactionsCount();
449 sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
454 public void onFailure(final Throwable t) {
455 LOG.error(t, "An exception happened during abort");
458 sender.tell(new akka.actor.Status.Failure(t), self);
465 private void handleCreateTransaction(final Object message) {
467 createTransaction(CreateTransaction.fromSerializable(message));
468 } else if (getLeader() != null) {
469 getLeader().forward(message, getContext());
471 getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
472 "Could not find shard leader so transaction cannot be created. This typically happens" +
473 " when the system is coming up or recovering and a leader is being elected. Try again" +
474 " later.")), getSelf());
478 private void handleReadDataReply(final Object message) {
479 // This must be for install snapshot. Don't want to open this up and trigger
482 self().tell(new CaptureSnapshotReply(ReadDataReply.fromSerializableAsByteString(message)),
485 createSnapshotTransaction = null;
487 // Send a PoisonPill instead of sending close transaction because we do not really need
489 getSender().tell(PoisonPill.getInstance(), self());
492 private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
493 DOMStoreTransactionChain chain =
494 transactionChains.remove(closeTransactionChain.getTransactionChainId());
501 private ActorRef createTypedTransactionActor(int transactionType,
502 ShardTransactionIdentifier transactionId, String transactionChainId,
503 short clientVersion ) {
505 DOMStoreTransactionFactory factory = store;
507 if(!transactionChainId.isEmpty()) {
508 factory = transactionChains.get(transactionChainId);
510 DOMStoreTransactionChain transactionChain = store.createTransactionChain();
511 transactionChains.put(transactionChainId, transactionChain);
512 factory = transactionChain;
516 if(this.schemaContext == null) {
517 throw new IllegalStateException("SchemaContext is not set");
520 if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
522 shardMBean.incrementReadOnlyTransactionCount();
524 return getContext().actorOf(
525 ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
526 schemaContext,datastoreContext, shardMBean,
527 transactionId.getRemoteTransactionId(), clientVersion),
528 transactionId.toString());
530 } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
532 shardMBean.incrementReadWriteTransactionCount();
534 return getContext().actorOf(
535 ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
536 schemaContext, datastoreContext, shardMBean,
537 transactionId.getRemoteTransactionId(), clientVersion),
538 transactionId.toString());
541 } else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
543 shardMBean.incrementWriteOnlyTransactionCount();
545 return getContext().actorOf(
546 ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
547 schemaContext, datastoreContext, shardMBean,
548 transactionId.getRemoteTransactionId(), clientVersion),
549 transactionId.toString());
551 throw new IllegalArgumentException(
552 "Shard="+name + ":CreateTransaction message has unidentified transaction type="
557 private void createTransaction(CreateTransaction createTransaction) {
559 ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
560 createTransaction.getTransactionId(), createTransaction.getTransactionChainId(),
561 createTransaction.getVersion());
563 getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
564 createTransaction.getTransactionId()).toSerializable(), getSelf());
565 } catch (Exception e) {
566 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
570 private ActorRef createTransaction(int transactionType, String remoteTransactionId,
571 String transactionChainId, short clientVersion) {
573 ShardTransactionIdentifier transactionId =
574 ShardTransactionIdentifier.builder()
575 .remoteTransactionId(remoteTransactionId)
578 if(LOG.isDebugEnabled()) {
579 LOG.debug("Creating transaction : {} ", transactionId);
582 ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
583 transactionChainId, clientVersion);
585 return transactionActor;
588 private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
589 throws ExecutionException, InterruptedException {
590 DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
591 commitCohort.preCommit().get();
592 commitCohort.commit().get();
595 private void commitWithNewTransaction(final Modification modification) {
596 DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
597 modification.apply(tx);
599 syncCommitTransaction(tx);
600 shardMBean.incrementCommittedTransactionCount();
601 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
602 } catch (InterruptedException | ExecutionException e) {
603 shardMBean.incrementFailedTransactionsCount();
604 LOG.error(e, "Failed to commit");
608 private void updateSchemaContext(final UpdateSchemaContext message) {
609 this.schemaContext = message.getSchemaContext();
610 updateSchemaContext(message.getSchemaContext());
611 store.onGlobalContextUpdated(message.getSchemaContext());
615 void updateSchemaContext(final SchemaContext schemaContext) {
616 store.onGlobalContextUpdated(schemaContext);
619 private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
621 LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
623 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
624 NormalizedNode<?, ?>>> registration;
626 registration = doChangeListenerRegistration(registerChangeListener);
628 LOG.debug("Shard is not the leader - delaying registration");
630 DelayedListenerRegistration delayedReg =
631 new DelayedListenerRegistration(registerChangeListener);
632 delayedListenerRegistrations.add(delayedReg);
633 registration = delayedReg;
636 ActorRef listenerRegistration = getContext().actorOf(
637 DataChangeListenerRegistration.props(registration));
639 LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
640 listenerRegistration.path());
642 getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf());
645 private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
646 NormalizedNode<?, ?>>> doChangeListenerRegistration(
647 final RegisterChangeListener registerChangeListener) {
649 ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
650 registerChangeListener.getDataChangeListenerPath());
652 // Notify the listener if notifications should be enabled or not
653 // If this shard is the leader then it will enable notifications else
655 dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
657 // Now store a reference to the data change listener so it can be notified
658 // at a later point if notifications should be enabled or disabled
659 dataChangeListeners.add(dataChangeListenerPath);
661 AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
662 new DataChangeListenerProxy(dataChangeListenerPath);
664 LOG.debug("Registering for path {}", registerChangeListener.getPath());
666 return store.registerChangeListener(registerChangeListener.getPath(), listener,
667 registerChangeListener.getScope());
670 private boolean isMetricsCaptureEnabled(){
671 CommonConfig config = new CommonConfig(getContext().system().settings().config());
672 return config.isMetricCaptureEnabled();
677 void startLogRecoveryBatch(final int maxBatchSize) {
678 currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
680 if(LOG.isDebugEnabled()) {
681 LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
686 protected void appendRecoveredLogEntry(final Payload data) {
687 if (data instanceof CompositeModificationPayload) {
688 currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
689 } else if (data instanceof CompositeModificationByteStringPayload) {
690 currentLogRecoveryBatch.add(((CompositeModificationByteStringPayload) data).getModification());
692 LOG.error("Unknown state received {} during recovery", data);
697 protected void applyRecoverySnapshot(final ByteString snapshot) {
698 if(recoveryCoordinator == null) {
699 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
702 recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
704 if(LOG.isDebugEnabled()) {
705 LOG.debug("{} : submitted recovery sbapshot", persistenceId());
710 protected void applyCurrentLogRecoveryBatch() {
711 if(recoveryCoordinator == null) {
712 recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
715 recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
717 if(LOG.isDebugEnabled()) {
718 LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
719 currentLogRecoveryBatch.size());
724 protected void onRecoveryComplete() {
725 if(recoveryCoordinator != null) {
726 Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
728 if(LOG.isDebugEnabled()) {
729 LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
732 for(DOMStoreWriteTransaction tx: txList) {
734 syncCommitTransaction(tx);
735 shardMBean.incrementCommittedTransactionCount();
736 } catch (InterruptedException | ExecutionException e) {
737 shardMBean.incrementFailedTransactionsCount();
738 LOG.error(e, "Failed to commit");
743 recoveryCoordinator = null;
744 currentLogRecoveryBatch = null;
745 updateJournalStats();
747 //notify shard manager
748 getContext().parent().tell(new ActorInitialized(), getSelf());
750 // Being paranoid here - this method should only be called once but just in case...
751 if(txCommitTimeoutCheckSchedule == null) {
752 // Schedule a message to be periodically sent to check if the current in-progress
753 // transaction should be expired and aborted.
754 FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
755 txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
756 period, period, getSelf(),
757 TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
762 protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
764 if (data instanceof CompositeModificationPayload) {
765 Object modification = ((CompositeModificationPayload) data).getModification();
767 applyModificationToState(clientActor, identifier, modification);
768 } else if(data instanceof CompositeModificationByteStringPayload ){
769 Object modification = ((CompositeModificationByteStringPayload) data).getModification();
771 applyModificationToState(clientActor, identifier, modification);
774 LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
775 data, data.getClass().getClassLoader(),
776 CompositeModificationPayload.class.getClassLoader());
779 updateJournalStats();
783 private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
784 if(modification == null) {
786 "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
787 identifier, clientActor != null ? clientActor.path().toString() : null);
788 } else if(clientActor == null) {
789 // There's no clientActor to which to send a commit reply so we must be applying
790 // replicated state from the leader.
791 commitWithNewTransaction(MutableCompositeModification.fromSerializable(
792 modification, schemaContext));
794 // This must be the OK to commit after replication consensus.
795 finishCommit(clientActor, identifier);
799 private void updateJournalStats() {
800 ReplicatedLogEntry lastLogEntry = getLastLogEntry();
802 if (lastLogEntry != null) {
803 shardMBean.setLastLogIndex(lastLogEntry.getIndex());
804 shardMBean.setLastLogTerm(lastLogEntry.getTerm());
807 shardMBean.setCommitIndex(getCommitIndex());
808 shardMBean.setLastApplied(getLastApplied());
809 shardMBean.setInMemoryJournalDataSize(getRaftActorContext().getReplicatedLog().dataSize());
813 protected void createSnapshot() {
814 if (createSnapshotTransaction == null) {
816 // Create a transaction. We are really going to treat the transaction as a worker
817 // so that this actor does not get block building the snapshot
818 createSnapshotTransaction = createTransaction(
819 TransactionProxy.TransactionType.READ_ONLY.ordinal(),
820 "createSnapshot" + ++createSnapshotTransactionCounter, "",
821 DataStoreVersions.CURRENT_VERSION);
823 createSnapshotTransaction.tell(
824 new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
831 protected void applySnapshot(final ByteString snapshot) {
832 // Since this will be done only on Recovery or when this actor is a Follower
833 // we can safely commit everything in here. We not need to worry about event notifications
834 // as they would have already been disabled on the follower
836 LOG.info("Applying snapshot");
838 DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
839 NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
840 NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext)
841 .decode(serializedNode);
843 // delete everything first
844 transaction.delete(YangInstanceIdentifier.builder().build());
846 // Add everything from the remote node back
847 transaction.write(YangInstanceIdentifier.builder().build(), node);
848 syncCommitTransaction(transaction);
849 } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
850 LOG.error(e, "An exception occurred when applying snapshot");
852 LOG.info("Done applying snapshot");
857 protected void onStateChanged() {
858 boolean isLeader = isLeader();
859 for (ActorSelection dataChangeListener : dataChangeListeners) {
860 dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
864 for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
865 if(!reg.isClosed()) {
866 reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
870 delayedListenerRegistrations.clear();
873 shardMBean.setRaftState(getRaftState().name());
874 shardMBean.setCurrentTerm(getCurrentTerm());
876 // If this actor is no longer the leader close all the transaction chains
878 for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
879 if(LOG.isDebugEnabled()) {
881 "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
882 entry.getKey(), getId());
884 entry.getValue().close();
887 transactionChains.clear();
892 protected DataPersistenceProvider persistence() {
893 return dataPersistenceProvider;
896 @Override protected void onLeaderChanged(final String oldLeader, final String newLeader) {
897 shardMBean.setLeader(newLeader);
900 @Override public String persistenceId() {
901 return this.name.toString();
905 DataPersistenceProvider getDataPersistenceProvider() {
906 return dataPersistenceProvider;
909 private static class ShardCreator implements Creator<Shard> {
911 private static final long serialVersionUID = 1L;
913 final ShardIdentifier name;
914 final Map<ShardIdentifier, String> peerAddresses;
915 final DatastoreContext datastoreContext;
916 final SchemaContext schemaContext;
918 ShardCreator(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
919 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
921 this.peerAddresses = peerAddresses;
922 this.datastoreContext = datastoreContext;
923 this.schemaContext = schemaContext;
927 public Shard create() throws Exception {
928 return new Shard(name, peerAddresses, datastoreContext, schemaContext);
933 InMemoryDOMDataStore getDataStore() {
938 ShardStats getShardMBean() {
942 private static class DelayedListenerRegistration implements
943 ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
945 private volatile boolean closed;
947 private final RegisterChangeListener registerChangeListener;
949 private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
950 NormalizedNode<?, ?>>> delegate;
952 DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
953 this.registerChangeListener = registerChangeListener;
956 void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
957 NormalizedNode<?, ?>>> registration) {
958 this.delegate = registration;
965 RegisterChangeListener getRegisterChangeListener() {
966 return registerChangeListener;
970 public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
971 return delegate != null ? delegate.getInstance() : null;
975 public void close() {
977 if(delegate != null) {