+ final TransactionIdentifier txId = nextTransactionId();
+ modification.ready();
+ final ReadyLocalTransaction readyMessage = new ReadyLocalTransaction(txId, modification, false);
+
+ shard.tell(readyMessage, getRef());
+
+ expectMsgClass(ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(txId, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CommitTransaction(txId, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(CommitTransactionReply.class);
+
+ final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
+ assertEquals(TestModel.OUTER_LIST_QNAME.getLocalName(), mergeData, actualNode);
+ }
+ };
+ }
+
+ @Test
+ public void testReadWriteCommitWithPersistenceDisabled() throws Exception {
+ dataStoreContextBuilder.persistent(false);
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWithPersistenceDisabled");
+
+ waitUntilLeader(shard);
+
+ // Setup a simulated transactions with a mock cohort.
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ final TransactionIdentifier transactionID = nextTransactionId();
+ final NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ shard.tell(prepareBatchedModifications(transactionID, TestModel.TEST_PATH, containerNode, false),
+ getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+ assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
+ }
+ };
+ }
+
+ @Test
+ public void testReadWriteCommitWhenTransactionHasNoModifications() {
+ testCommitWhenTransactionHasNoModifications(true);
+ }
+
+ @Test
+ public void testWriteOnlyCommitWhenTransactionHasNoModifications() {
+ testCommitWhenTransactionHasNoModifications(false);
+ }
+
+ private void testCommitWhenTransactionHasNoModifications(final boolean readWrite) {
+ // Note that persistence is enabled which would normally result in the
+ // entry getting written to the journal
+ // but here that need not happen
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWhenTransactionHasNoModifications-" + readWrite);
+
+ waitUntilLeader(shard);
+
+ final TransactionIdentifier transactionID = nextTransactionId();
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ if (readWrite) {
+ final ReadWriteShardDataTreeTransaction rwTx = shard.underlyingActor().getDataStore()
+ .newReadWriteTransaction(transactionID);
+ shard.tell(new ForwardedReadyTransaction(transactionID, CURRENT_VERSION, rwTx, false), getRef());
+ } else {
+ shard.tell(prepareBatchedModifications(transactionID, new MutableCompositeModification()),
+ getRef());
+ }
+
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ shard.tell(Shard.GET_SHARD_MBEAN_MESSAGE, getRef());
+ final ShardStats shardStats = expectMsgClass(duration, ShardStats.class);
+
+ // Use MBean for verification
+ // Committed transaction count should increase as usual
+ assertEquals(1, shardStats.getCommittedTransactionsCount());
+
+ // Commit index should not advance because this does not go into
+ // the journal
+ assertEquals(-1, shardStats.getCommitIndex());
+ }
+ };
+ }
+
+ @Test
+ public void testReadWriteCommitWhenTransactionHasModifications() throws Exception {
+ testCommitWhenTransactionHasModifications(true);
+ }
+
+ @Test
+ public void testWriteOnlyCommitWhenTransactionHasModifications() throws Exception {
+ testCommitWhenTransactionHasModifications(false);
+ }
+
+ private void testCommitWhenTransactionHasModifications(final boolean readWrite) throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TipProducingDataTree dataTree = createDelegatingMockDataTree();
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().dataTree(dataTree).props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitWhenTransactionHasModifications-" + readWrite);
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final TransactionIdentifier transactionID = nextTransactionId();
+
+ if (readWrite) {
+ shard.tell(prepareForwardedReadyTransaction(shard, transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ } else {
+ shard.tell(prepareBatchedModifications(transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ }
+
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ final InOrder inOrder = inOrder(dataTree);
+ inOrder.verify(dataTree).validate(any(DataTreeModification.class));
+ inOrder.verify(dataTree).prepare(any(DataTreeModification.class));
+ inOrder.verify(dataTree).commit(any(DataTreeCandidate.class));
+
+ shard.tell(Shard.GET_SHARD_MBEAN_MESSAGE, getRef());
+ final ShardStats shardStats = expectMsgClass(duration, ShardStats.class);
+
+ // Use MBean for verification
+ // Committed transaction count should increase as usual
+ assertEquals(1, shardStats.getCommittedTransactionsCount());
+
+ // Commit index should advance as we do not have an empty
+ // modification
+ assertEquals(0, shardStats.getCommitIndex());
+ }
+ };
+ }
+
+ @Test
+ public void testCommitPhaseFailure() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TipProducingDataTree dataTree = createDelegatingMockDataTree();
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().dataTree(dataTree).props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
+
+ // Setup 2 simulated transactions with mock cohorts. The first
+ // one fails in the
+ // commit phase.
+
+ doThrow(new RuntimeException("mock commit failure")).when(dataTree)
+ .commit(any(DataTreeCandidate.class));
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message for the 2nd Tx. This
+ // should get queued and
+ // processed after the first Tx completes.
+
+ final Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), timeout);
+
+ // Send the CommitTransaction message for the first Tx. This
+ // should send back an error
+ // and trigger the 2nd Tx to proceed.
+
+ shard.tell(new CommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable failure, final Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
+
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
+
+ final InOrder inOrder = inOrder(dataTree);
+ inOrder.verify(dataTree).validate(any(DataTreeModification.class));
+ inOrder.verify(dataTree).prepare(any(DataTreeModification.class));
+
+ // FIXME: this invocation is done on the result of validate(). To test it, we need to make sure mock
+ // validate performs wrapping and we capture that mock
+ // inOrder.verify(dataTree).validate(any(DataTreeModification.class));
+
+ inOrder.verify(dataTree).commit(any(DataTreeCandidate.class));
+ }
+ };
+ }
+
+ @Test
+ public void testPreCommitPhaseFailure() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TipProducingDataTree dataTree = createDelegatingMockDataTree();
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().dataTree(dataTree).props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testPreCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
+
+ doThrow(new RuntimeException("mock preCommit failure")).when(dataTree)
+ .prepare(any(DataTreeModification.class));
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message for the 2nd Tx. This
+ // should get queued and
+ // processed after the first Tx completes.
+
+ final Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), timeout);
+
+ // Send the CommitTransaction message for the first Tx. This
+ // should send back an error
+ // and trigger the 2nd Tx to proceed.
+
+ shard.tell(new CommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(final Throwable failure, final Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
+
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
+
+ final InOrder inOrder = inOrder(dataTree);
+ inOrder.verify(dataTree).validate(any(DataTreeModification.class));
+ inOrder.verify(dataTree).prepare(any(DataTreeModification.class));
+ inOrder.verify(dataTree).validate(any(DataTreeModification.class));
+ }
+ };
+ }
+
+ @Test
+ public void testCanCommitPhaseFailure() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TipProducingDataTree dataTree = createDelegatingMockDataTree();
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().dataTree(dataTree).props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+
+ doThrow(new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock canCommit failure"))
+ .doNothing().when(dataTree).validate(any(DataTreeModification.class));
+
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Send another can commit to ensure the failed one got cleaned
+ // up.
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+ final CanCommitTransactionReply reply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(CanCommitTransactionReply.class));
+ assertEquals("getCanCommit", true, reply.getCanCommit());
+ }
+ };
+ }
+
+ @Test
+ public void testImmediateCommitWithCanCommitPhaseFailure() throws Exception {
+ testImmediateCommitWithCanCommitPhaseFailure(true);
+ testImmediateCommitWithCanCommitPhaseFailure(false);
+ }
+
+ private void testImmediateCommitWithCanCommitPhaseFailure(final boolean readWrite) throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TipProducingDataTree dataTree = createDelegatingMockDataTree();
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().dataTree(dataTree).props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testImmediateCommitWithCanCommitPhaseFailure-" + readWrite);
+
+ waitUntilLeader(shard);
+
+ doThrow(new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock canCommit failure"))
+ .doNothing().when(dataTree).validate(any(DataTreeModification.class));
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+
+ if (readWrite) {
+ shard.tell(prepareForwardedReadyTransaction(shard, transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
+ } else {
+ shard.tell(prepareBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
+ }
+
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Send another can commit to ensure the failed one got cleaned
+ // up.
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ if (readWrite) {
+ shard.tell(prepareForwardedReadyTransaction(shard, transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
+ } else {
+ shard.tell(prepareBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true), getRef());
+ }
+
+ expectMsgClass(duration, CommitTransactionReply.class);
+ }
+ };
+ }
+
+ @Test
+ public void testAbortWithCommitPending() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final Creator<Shard> creator = () -> new Shard(newShardBuilder()) {
+ @Override
+ void persistPayload(final TransactionIdentifier transactionId, final Payload payload,
+ boolean batchHint) {
+ // Simulate an AbortTransaction message occurring during
+ // replication, after
+ // persisting and before finishing the commit to the
+ // in-memory store.
+
+ doAbortTransaction(transactionId, null);
+ super.persistPayload(transactionId, payload, batchHint);
+ }
+ };
+
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(Props
+ .create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testAbortWithCommitPending");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ final TransactionIdentifier transactionID = nextTransactionId();
+
+ shard.tell(prepareBatchedModifications(transactionID, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST_PATH);
+
+ // Since we're simulating an abort occurring during replication
+ // and before finish commit,
+ // the data should still get written to the in-memory store
+ // since we've gotten past
+ // canCommit and preCommit and persisted the data.
+ assertNotNull(TestModel.TEST_QNAME.getLocalName() + " not found", node);
+ }
+ };
+ }
+
+ @Test
+ public void testTransactionCommitTimeout() throws Exception {
+ dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1);
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitTimeout");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ writeToStore(shard, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
+
+ // Ready 2 Tx's - the first will timeout
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(
+ prepareBatchedModifications(transactionID1,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1), false),
+ getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ final YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
+ shard.tell(
+ prepareBatchedModifications(transactionID2, listNodePath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2), false),
+ getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // canCommit 1st Tx. We don't send the commit so it should
+ // timeout.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ // canCommit the 2nd Tx - it should complete after the 1st Tx
+ // times out.
+
+ shard.tell(new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ // Try to commit the 1st Tx - should fail as it's not the
+ // current Tx.
+
+ shard.tell(new CommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Commit the 2nd Tx.
+
+ shard.tell(new CommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ final NormalizedNode<?, ?> node = readStore(shard, listNodePath);
+ assertNotNull(listNodePath + " not found", node);
+ }
+ };
+ }
+
+// @Test
+// @Ignore
+// public void testTransactionCommitQueueCapacityExceeded() throws Throwable {
+// dataStoreContextBuilder.shardTransactionCommitQueueCapacity(2);
+//
+// new ShardTestKit(getSystem()) {{
+// final TestActorRef<Shard> shard = actorFactory.createTestActor(
+// newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+// "testTransactionCommitQueueCapacityExceeded");
+//
+// waitUntilLeader(shard);
+//
+// final FiniteDuration duration = duration("5 seconds");
+//
+// final ShardDataTree dataStore = shard.underlyingActor().getDataStore();
+//
+// final TransactionIdentifier transactionID1 = nextTransactionId();
+// final MutableCompositeModification modification1 = new MutableCompositeModification();
+// final ShardDataTreeCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+// TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), transactionID1,
+// modification1);
+//
+// final TransactionIdentifier transactionID2 = nextTransactionId();
+// final MutableCompositeModification modification2 = new MutableCompositeModification();
+// final ShardDataTreeCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+// TestModel.OUTER_LIST_PATH,
+// ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), transactionID2,
+// modification2);
+//
+// final TransactionIdentifier transactionID3 = nextTransactionId();
+// final MutableCompositeModification modification3 = new MutableCompositeModification();
+// final ShardDataTreeCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+// TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), transactionID3,
+// modification3);
+//
+// // Ready the Tx's
+//
+// shard.tell(prepareReadyTransactionMessage(false, shard.underlyingActor(), cohort1, transactionID1,
+// modification1), getRef());
+// expectMsgClass(duration, ReadyTransactionReply.class);
+//
+// shard.tell(prepareReadyTransactionMessage(false, shard.underlyingActor(), cohort2, transactionID2,
+// modification2), getRef());
+// expectMsgClass(duration, ReadyTransactionReply.class);
+//
+// // The 3rd Tx should exceed queue capacity and fail.
+//
+// shard.tell(prepareReadyTransactionMessage(false, shard.underlyingActor(), cohort3, transactionID3,
+// modification3), getRef());
+// expectMsgClass(duration, akka.actor.Status.Failure.class);
+//
+// // canCommit 1st Tx.
+//
+// shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+// expectMsgClass(duration, CanCommitTransactionReply.class);
+//
+// // canCommit the 2nd Tx - it should get queued.
+//
+// shard.tell(new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+//
+// // canCommit the 3rd Tx - should exceed queue capacity and fail.
+//
+// shard.tell(new CanCommitTransaction(transactionID3, CURRENT_VERSION).toSerializable(), getRef());
+// expectMsgClass(duration, akka.actor.Status.Failure.class);
+// }};
+// }
+
+ @Test
+ public void testTransactionCommitWithPriorExpiredCohortEntries() throws Exception {
+ dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1);
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitWithPriorExpiredCohortEntries");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID3 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID3, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // All Tx's are readied. We'll send canCommit for the last one
+ // but not the others. The others
+ // should expire from the queue and the last one should be
+ // processed.
+
+ shard.tell(new CanCommitTransaction(transactionID3, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+ }
+ };
+ }
+
+ @Test
+ public void testTransactionCommitWithSubsequentExpiredCohortEntry() throws Exception {
+ dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1);
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitWithSubsequentExpiredCohortEntry");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ final ShardDataTree dataStore = shard.underlyingActor().getDataStore();
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(prepareBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // CanCommit the first Tx so it's the current in-progress Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ // Ready the second Tx.
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(prepareBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Ready the third Tx.
+
+ final TransactionIdentifier transactionID3 = nextTransactionId();
+ final DataTreeModification modification3 = dataStore.newModification();
+ new WriteModification(TestModel.TEST2_PATH, ImmutableNodes.containerNode(TestModel.TEST2_QNAME))
+ .apply(modification3);
+ modification3.ready();
+ final ReadyLocalTransaction readyMessage = new ReadyLocalTransaction(transactionID3, modification3,
+ true);
+ shard.tell(readyMessage, getRef());
+
+ // Commit the first Tx. After completing, the second should
+ // expire from the queue and the third
+ // Tx committed.
+
+ shard.tell(new CommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ // Expect commit reply from the third Tx.
+
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST2_PATH);
+ assertNotNull(TestModel.TEST2_PATH + " not found", node);
+ }
+ };
+ }
+
+ @Test
+ public void testCanCommitBeforeReadyFailure() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitBeforeReadyFailure");
+
+ shard.tell(new CanCommitTransaction(nextTransactionId(), CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+ }
+ };
+ }
+
+ @Test
+ public void testAbortAfterCanCommit() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), "testAbortAfterCanCommit");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
+
+ // Ready 2 transactions - the first one will be aborted.
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply
+ .fromSerializable(expectMsgClass(duration, CanCommitTransactionReply.class));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message for the 2nd Tx. This
+ // should get queued and
+ // processed after the first Tx completes.
+
+ final Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), timeout);
+
+ // Send the AbortTransaction message for the first Tx. This
+ // should trigger the 2nd
+ // Tx to proceed.
+
+ shard.tell(new AbortTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, AbortTransactionReply.class);
+
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ canCommitReply = (CanCommitTransactionReply) Await.result(canCommitFuture, duration);
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+ }
+ };
+ }
+
+ @Test
+ public void testAbortAfterReady() throws Exception {
+ dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1);
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), "testAbortAfterReady");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ // Ready a tx.
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Send the AbortTransaction message.
+
+ shard.tell(new AbortTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, AbortTransactionReply.class);
+
+ assertEquals("getPendingTxCommitQueueSize", 0, shard.underlyingActor().getPendingTxCommitQueueSize());
+
+ // Now send CanCommitTransaction - should fail.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ final Throwable failure = expectMsgClass(duration, akka.actor.Status.Failure.class).cause();
+ assertTrue("Failure type", failure instanceof IllegalStateException);
+
+ // Ready and CanCommit another and verify success.
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+ }
+ };
+ }
+
+ @Test
+ public void testAbortQueuedTransaction() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()), "testAbortAfterReady");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ // Ready 3 tx's.
+
+ final TransactionIdentifier transactionID1 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID2 = nextTransactionId();
+ shard.tell(newBatchedModifications(transactionID2, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), true, false, 1), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ final TransactionIdentifier transactionID3 = nextTransactionId();
+ shard.tell(
+ newBatchedModifications(transactionID3, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(), true, false, 1),
+ getRef());
+ expectMsgClass(duration, ReadyTransactionReply.class);
+
+ // Abort the second tx while it's queued.
+
+ shard.tell(new AbortTransaction(transactionID2, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, AbortTransactionReply.class);
+
+ // Commit the other 2.
+
+ shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ shard.tell(new CommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ shard.tell(new CanCommitTransaction(transactionID3, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.class);
+
+ shard.tell(new CommitTransaction(transactionID3, CURRENT_VERSION).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.class);
+
+ assertEquals("getPendingTxCommitQueueSize", 0, shard.underlyingActor().getPendingTxCommitQueueSize());
+ }
+ };
+ }
+
+ @Test
+ public void testCreateSnapshotWithNonPersistentData() throws Exception {
+ testCreateSnapshot(false, "testCreateSnapshotWithNonPersistentData");
+ }
+
+ @Test
+ public void testCreateSnapshot() throws Exception {
+ testCreateSnapshot(true, "testCreateSnapshot");
+ }
+
+ private void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception {
+ final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
+
+ final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
+ class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
+ TestPersistentDataProvider(final DataPersistenceProvider delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public void saveSnapshot(final Object obj) {
+ savedSnapshot.set(obj);
+ super.saveSnapshot(obj);
+ }
+ }
+
+ dataStoreContextBuilder.persistent(persistent);
+
+ class TestShard extends Shard {
+
+ protected TestShard(final AbstractBuilder<?, ?> builder) {
+ super(builder);
+ setPersistence(new TestPersistentDataProvider(super.persistence()));
+ }
+
+ @Override
+ public void handleCommand(final Object message) {
+ super.handleCommand(message);
+
+ // XXX: commit_snapshot equality check references RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT
+ if (message instanceof SaveSnapshotSuccess || "commit_snapshot".equals(message.toString())) {
+ latch.get().countDown();
+ }
+ }
+
+ @Override
+ public RaftActorContext getRaftActorContext() {
+ return super.getRaftActorContext();
+ }
+ }
+
+ new ShardTestKit(getSystem()) {
+ {
+ final Creator<Shard> creator = () -> new TestShard(newShardBuilder());
+
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(Props
+ .create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
+ shardActorName);
+
+ waitUntilLeader(shard);
+ writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ final NormalizedNode<?, ?> expectedRoot = readStore(shard, YangInstanceIdentifier.EMPTY);
+
+ // Trigger creation of a snapshot by ensuring
+ final RaftActorContext raftActorContext = ((TestShard) shard.underlyingActor()).getRaftActorContext();
+ raftActorContext.getSnapshotManager().capture(mock(ReplicatedLogEntry.class), -1);
+ awaitAndValidateSnapshot(expectedRoot);
+
+ raftActorContext.getSnapshotManager().capture(mock(ReplicatedLogEntry.class), -1);
+ awaitAndValidateSnapshot(expectedRoot);
+ }
+
+ private void awaitAndValidateSnapshot(final NormalizedNode<?, ?> expectedRoot)
+ throws InterruptedException, IOException {
+ assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
+
+ assertTrue("Invalid saved snapshot " + savedSnapshot.get(), savedSnapshot.get() instanceof Snapshot);
+
+ verifySnapshot((Snapshot) savedSnapshot.get(), expectedRoot);
+
+ latch.set(new CountDownLatch(1));
+ savedSnapshot.set(null);
+ }
+
+ private void verifySnapshot(final Snapshot snapshot, final NormalizedNode<?, ?> expectedRoot)
+ throws IOException {
+ final NormalizedNode<?, ?> actual = ShardDataTreeSnapshot.deserialize(snapshot.getState()).getRootNode()
+ .get();
+ assertEquals("Root node", expectedRoot, actual);
+ }
+ };
+ }
+
+ /**
+ * This test simply verifies that the applySnapShot logic will work.
+ */
+ @Test
+ public void testInMemoryDataTreeRestore() throws ReadFailedException, DataValidationFailedException {
+ final DataTree store = InMemoryDataTreeFactory.getInstance().create(TreeType.OPERATIONAL);
+ store.setSchemaContext(SCHEMA_CONTEXT);
+
+ final DataTreeModification putTransaction = store.takeSnapshot().newModification();
+ putTransaction.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ commitTransaction(store, putTransaction);
+
+
+ final NormalizedNode<?, ?> expected = readStore(store, YangInstanceIdentifier.EMPTY);
+
+ final DataTreeModification writeTransaction = store.takeSnapshot().newModification();
+
+ writeTransaction.delete(YangInstanceIdentifier.EMPTY);
+ writeTransaction.write(YangInstanceIdentifier.EMPTY, expected);
+
+ commitTransaction(store, writeTransaction);
+
+ final NormalizedNode<?, ?> actual = readStore(store, YangInstanceIdentifier.EMPTY);
+
+ assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testRecoveryApplicable() {
+
+ final DatastoreContext persistentContext = DatastoreContext.newBuilder()
+ .shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(true).build();
+
+ final Props persistentProps = Shard.builder().id(shardID).datastoreContext(persistentContext)
+ .schemaContext(SCHEMA_CONTEXT).props();
+
+ final DatastoreContext nonPersistentContext = DatastoreContext.newBuilder()
+ .shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(false).build();
+
+ final Props nonPersistentProps = Shard.builder().id(shardID).datastoreContext(nonPersistentContext)
+ .schemaContext(SCHEMA_CONTEXT).props();
+
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard1 = actorFactory.createTestActor(persistentProps, "testPersistence1");
+
+ assertTrue("Recovery Applicable", shard1.underlyingActor().persistence().isRecoveryApplicable());
+
+ final TestActorRef<Shard> shard2 = actorFactory.createTestActor(nonPersistentProps, "testPersistence2");
+
+ assertFalse("Recovery Not Applicable", shard2.underlyingActor().persistence().isRecoveryApplicable());
+ }
+ };
+ }
+
+ @Test
+ public void testOnDatastoreContext() {
+ new ShardTestKit(getSystem()) {
+ {
+ dataStoreContextBuilder.persistent(true);
+
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(newShardProps(),
+ "testOnDatastoreContext");
+
+ assertEquals("isRecoveryApplicable", true,
+ shard.underlyingActor().persistence().isRecoveryApplicable());
+
+ waitUntilLeader(shard);
+
+ shard.tell(dataStoreContextBuilder.persistent(false).build(), ActorRef.noSender());
+
+ assertEquals("isRecoveryApplicable", false,
+ shard.underlyingActor().persistence().isRecoveryApplicable());
+
+ shard.tell(dataStoreContextBuilder.persistent(true).build(), ActorRef.noSender());
+
+ assertEquals("isRecoveryApplicable", true,
+ shard.underlyingActor().persistence().isRecoveryApplicable());
+ }
+ };
+ }
+
+ @Test
+ public void testRegisterRoleChangeListener() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testRegisterRoleChangeListener");
+
+ waitUntilLeader(shard);
+
+ final TestActorRef<MessageCollectorActor> listener =
+ TestActorRef.create(getSystem(), Props.create(MessageCollectorActor.class));
+
+ shard.tell(new RegisterRoleChangeListener(), listener);
+
+ MessageCollectorActor.expectFirstMatching(listener, RegisterRoleChangeListenerReply.class);
+
+ ShardLeaderStateChanged leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
+ ShardLeaderStateChanged.class);
+ assertEquals("getLocalShardDataTree present", true,
+ leaderStateChanged.getLocalShardDataTree().isPresent());
+ assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(),
+ leaderStateChanged.getLocalShardDataTree().get());
+
+ MessageCollectorActor.clearMessages(listener);
+
+ // Force a leader change
+
+ shard.tell(new RequestVote(10000, "member2", 50, 50), getRef());
+
+ leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
+ ShardLeaderStateChanged.class);
+ assertEquals("getLocalShardDataTree present", false,
+ leaderStateChanged.getLocalShardDataTree().isPresent());
+ }
+ };
+ }
+
+ @Test
+ public void testFollowerInitialSyncStatus() throws Exception {
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testFollowerInitialSyncStatus");
+
+ shard.underlyingActor().handleNonRaftCommand(new FollowerInitialSyncUpStatus(false,
+ "member-1-shard-inventory-operational"));
+
+ assertEquals(false, shard.underlyingActor().getShardMBean().getFollowerInitialSyncStatus());
+
+ shard.underlyingActor().handleNonRaftCommand(new FollowerInitialSyncUpStatus(true,
+ "member-1-shard-inventory-operational"));
+
+ assertEquals(true, shard.underlyingActor().getShardMBean().getFollowerInitialSyncStatus());
+ }
+
+ @Test
+ public void testClusteredDataChangeListenerDelayedRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final String testName = "testClusteredDataChangeListenerDelayedRegistration";
+ dataStoreContextBuilder.shardElectionTimeoutFactor(1000)
+ .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
+
+ final MockDataChangeListener listener = new MockDataChangeListener(1);
+ final ActorRef dclActor = actorFactory.createActor(DataChangeListener.props(listener),
+ actorFactory.generateActorId(testName + "-DataChangeListener"));
+
+ setupInMemorySnapshotStore();
+
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ actorFactory.generateActorId(testName + "-shard"));
+
+ waitUntilNoLeader(shard);
+
+ final YangInstanceIdentifier path = TestModel.TEST_PATH;
+
+ shard.tell(new RegisterChangeListener(path, dclActor, AsyncDataBroker.DataChangeScope.BASE, true),
+ getRef());
+ final RegisterChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterChangeListenerReply.class);
+ assertNotNull("getListenerRegistrationPath", reply.getListenerRegistrationPath());
+
+ shard.tell(DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
+ .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
+
+ listener.waitForChangeEvents();
+ }
+ };
+ }
+
+ @Test
+ public void testClusteredDataChangeListenerRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final String testName = "testClusteredDataChangeListenerRegistration";
+ final ShardIdentifier followerShardID = ShardIdentifier.create("inventory",
+ MemberName.forName(actorFactory.generateActorId(testName + "-follower")), "config");
+
+ final ShardIdentifier leaderShardID = ShardIdentifier.create("inventory",
+ MemberName.forName(actorFactory.generateActorId(testName + "-leader")), "config");
+
+ final TestActorRef<Shard> followerShard = actorFactory
+ .createTestActor(Shard.builder().id(followerShardID)
+ .datastoreContext(dataStoreContextBuilder.shardElectionTimeoutFactor(1000).build())
+ .peerAddresses(Collections.singletonMap(leaderShardID.toString(),
+ "akka://test/user/" + leaderShardID.toString()))
+ .schemaContext(SCHEMA_CONTEXT).props()
+ .withDispatcher(Dispatchers.DefaultDispatcherId()), followerShardID.toString());
+
+ final TestActorRef<Shard> leaderShard = actorFactory
+ .createTestActor(Shard.builder().id(leaderShardID).datastoreContext(newDatastoreContext())
+ .peerAddresses(Collections.singletonMap(followerShardID.toString(),
+ "akka://test/user/" + followerShardID.toString()))
+ .schemaContext(SCHEMA_CONTEXT).props()
+ .withDispatcher(Dispatchers.DefaultDispatcherId()), leaderShardID.toString());
+
+ leaderShard.tell(TimeoutNow.INSTANCE, ActorRef.noSender());
+ final String leaderPath = waitUntilLeader(followerShard);
+ assertEquals("Shard leader path", leaderShard.path().toString(), leaderPath);
+
+ final YangInstanceIdentifier path = TestModel.TEST_PATH;
+ final MockDataChangeListener listener = new MockDataChangeListener(1);
+ final ActorRef dclActor = actorFactory.createActor(DataChangeListener.props(listener),
+ actorFactory.generateActorId(testName + "-DataChangeListener"));
+
+ followerShard.tell(
+ new RegisterChangeListener(path, dclActor, AsyncDataBroker.DataChangeScope.BASE, true),
+ getRef());
+ final RegisterChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterChangeListenerReply.class);
+ assertNotNull("getListenerRegistratioznPath", reply.getListenerRegistrationPath());
+
+ writeToStore(followerShard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents();
+ }
+ };
+ }
+
+ @Test
+ public void testClusteredDataTreeChangeListenerDelayedRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final String testName = "testClusteredDataTreeChangeListenerDelayedRegistration";
+ dataStoreContextBuilder.shardElectionTimeoutFactor(1000)
+ .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
+
+ final MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ final ActorRef dclActor = actorFactory.createActor(DataTreeChangeListenerActor.props(listener),
+ actorFactory.generateActorId(testName + "-DataTreeChangeListener"));
+
+ setupInMemorySnapshotStore();
+
+ final TestActorRef<Shard> shard = actorFactory.createTestActor(
+ newShardBuilder().props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ actorFactory.generateActorId(testName + "-shard"));
+
+ waitUntilNoLeader(shard);
+
+ shard.tell(new RegisterDataTreeChangeListener(TestModel.TEST_PATH, dclActor, true), getRef());
+ final RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ assertNotNull("getListenerRegistrationPath", reply.getListenerRegistrationPath());
+
+ shard.tell(DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
+ .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
+
+ listener.waitForChangeEvents();
+ }
+ };
+ }
+
+ @Test
+ public void testClusteredDataTreeChangeListenerRegistration() throws Exception {
+ new ShardTestKit(getSystem()) {
+ {
+ final String testName = "testClusteredDataTreeChangeListenerRegistration";
+ final ShardIdentifier followerShardID = ShardIdentifier.create("inventory",
+ MemberName.forName(actorFactory.generateActorId(testName + "-follower")), "config");
+
+ final ShardIdentifier leaderShardID = ShardIdentifier.create("inventory",
+ MemberName.forName(actorFactory.generateActorId(testName + "-leader")), "config");
+
+ final TestActorRef<Shard> followerShard = actorFactory
+ .createTestActor(Shard.builder().id(followerShardID)
+ .datastoreContext(dataStoreContextBuilder.shardElectionTimeoutFactor(1000).build())
+ .peerAddresses(Collections.singletonMap(leaderShardID.toString(),
+ "akka://test/user/" + leaderShardID.toString()))
+ .schemaContext(SCHEMA_CONTEXT).props()
+ .withDispatcher(Dispatchers.DefaultDispatcherId()), followerShardID.toString());
+
+ final TestActorRef<Shard> leaderShard = actorFactory
+ .createTestActor(Shard.builder().id(leaderShardID).datastoreContext(newDatastoreContext())
+ .peerAddresses(Collections.singletonMap(followerShardID.toString(),
+ "akka://test/user/" + followerShardID.toString()))
+ .schemaContext(SCHEMA_CONTEXT).props()
+ .withDispatcher(Dispatchers.DefaultDispatcherId()), leaderShardID.toString());
+
+ leaderShard.tell(TimeoutNow.INSTANCE, ActorRef.noSender());
+ final String leaderPath = waitUntilLeader(followerShard);
+ assertEquals("Shard leader path", leaderShard.path().toString(), leaderPath);
+
+ final YangInstanceIdentifier path = TestModel.TEST_PATH;
+ final MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
+ final ActorRef dclActor = actorFactory.createActor(DataTreeChangeListenerActor.props(listener),
+ actorFactory.generateActorId(testName + "-DataTreeChangeListener"));
+
+ followerShard.tell(new RegisterDataTreeChangeListener(TestModel.TEST_PATH, dclActor, true), getRef());
+ final RegisterDataTreeChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterDataTreeChangeListenerReply.class);
+ assertNotNull("getListenerRegistrationPath", reply.getListenerRegistrationPath());
+
+ writeToStore(followerShard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents();
+ }
+ };
+ }
+
+ @Test
+ public void testServerRemoved() throws Exception {
+ final TestActorRef<MessageCollectorActor> parent = actorFactory.createTestActor(MessageCollectorActor.props());
+
+ final ActorRef shard = parent.underlyingActor().context().actorOf(
+ newShardBuilder().props().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testServerRemoved");
+
+ shard.tell(new ServerRemoved("test"), ActorRef.noSender());
+
+ MessageCollectorActor.expectFirstMatching(parent, ServerRemoved.class);