Add UnsignedLongBitmap
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / ShardDataTree.java
index 5ca0acba4d26e6df0ec2c99699a2ea244d878cfa..428cf8467c8188170d7985341118940a0186c13e 100644 (file)
@@ -7,14 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static akka.actor.ActorRef.noSender;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+import static java.util.Objects.requireNonNullElse;
+
 import akka.actor.ActorRef;
 import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
-import com.google.common.base.Verify;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
@@ -33,6 +36,8 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.Queue;
 import java.util.SortedSet;
 import java.util.concurrent.TimeUnit;
@@ -40,26 +45,29 @@ import java.util.concurrent.TimeoutException;
 import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.UnaryOperator;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
+import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
@@ -78,30 +86,33 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
 import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * Internal shard state, similar to a DOMStore, but optimized for use in the actor system,
- * e.g. it does not expose public interfaces and assumes it is only ever called from a
- * single thread.
+ * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
+ * public interfaces and assumes it is only ever called from a single thread.
  *
  * <p>
- * This class is not part of the API contract and is subject to change at any time.
+ * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
  */
-@NotThreadSafe
+@VisibleForTesting
+// non-final for mocking
 public class ShardDataTree extends ShardDataTreeTransactionParent {
     private static final class CommitEntry {
         final SimpleShardDataTreeCohort cohort;
         long lastAccess;
 
         CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
-            this.cohort = Preconditions.checkNotNull(cohort);
+            this.cohort = requireNonNull(cohort);
             lastAccess = now;
         }
 
@@ -153,21 +164,21 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     private int currentTransactionBatch;
 
-    ShardDataTree(final Shard shard, final SchemaContext schemaContext, final DataTree dataTree,
+    ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final DataTree dataTree,
             final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
             final String logContext,
             final ShardDataTreeMetadata<?>... metadata) {
-        this.dataTree = Preconditions.checkNotNull(dataTree);
+        this.dataTree = requireNonNull(dataTree);
         updateSchemaContext(schemaContext);
 
-        this.shard = Preconditions.checkNotNull(shard);
-        this.treeChangeListenerPublisher = Preconditions.checkNotNull(treeChangeListenerPublisher);
-        this.logContext = Preconditions.checkNotNull(logContext);
+        this.shard = requireNonNull(shard);
+        this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
+        this.logContext = requireNonNull(logContext);
         this.metadata = ImmutableList.copyOf(metadata);
         tip = dataTree;
     }
 
-    ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType,
+    ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType,
             final YangInstanceIdentifier root,
             final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
             final String logContext,
@@ -185,8 +196,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @VisibleForTesting
-    public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
-        this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY,
+    public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) {
+        this(shard, schemaContext, treeType, YangInstanceIdentifier.empty(),
                 new DefaultShardDataTreeChangeListenerPublisher(""), "");
     }
 
@@ -198,21 +209,22 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return shard.ticker().read();
     }
 
-    public DataTree getDataTree() {
+    final DataTree getDataTree() {
         return dataTree;
     }
 
-    SchemaContext getSchemaContext() {
+    @VisibleForTesting
+    final SchemaContext getSchemaContext() {
         return schemaContext;
     }
 
-    void updateSchemaContext(final SchemaContext newSchemaContext) {
-        dataTree.setSchemaContext(newSchemaContext);
-        this.schemaContext = Preconditions.checkNotNull(newSchemaContext);
+    final void updateSchemaContext(final @NonNull EffectiveModelContext newSchemaContext) {
+        dataTree.setEffectiveModelContext(newSchemaContext);
+        this.schemaContext = newSchemaContext;
         this.dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
     }
 
-    void resetTransactionBatch() {
+    final void resetTransactionBatch() {
         currentTransactionBatch = 0;
     }
 
@@ -222,7 +234,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @return A state snapshot
      */
     @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
-        final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY).get();
+        final NormalizedNode rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty()).get();
         final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
                 ImmutableMap.builder();
 
@@ -264,18 +276,18 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             }
         }
 
-        final DataTreeModification mod = wrapper.apply(dataTree.takeSnapshot().newModification());
+        final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
+        final DataTreeModification mod = wrapper.apply(unwrapped);
         // delete everything first
-        mod.delete(YangInstanceIdentifier.EMPTY);
+        mod.delete(YangInstanceIdentifier.empty());
 
-        final java.util.Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
+        final Optional<NormalizedNode> maybeNode = snapshot.getRootNode();
         if (maybeNode.isPresent()) {
             // Add everything from the remote node back
-            mod.write(YangInstanceIdentifier.EMPTY, maybeNode.get());
+            mod.write(YangInstanceIdentifier.empty(), maybeNode.get());
         }
         mod.ready();
 
-        final DataTreeModification unwrapped = unwrap(mod);
         dataTree.validate(unwrapped);
         DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
         dataTree.commit(candidate);
@@ -291,21 +303,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param snapshot Snapshot that needs to be applied
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
+    final void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
+        // TODO: we should be taking ShardSnapshotState here and performing forward-compatibility translation
         applySnapshot(snapshot, UnaryOperator.identity());
     }
 
-    private PruningDataTreeModification wrapWithPruning(final DataTreeModification delegate) {
-        return new PruningDataTreeModification(delegate, dataTree, dataSchemaContext);
-    }
-
-    private static DataTreeModification unwrap(final DataTreeModification modification) {
-        if (modification instanceof PruningDataTreeModification) {
-            return ((PruningDataTreeModification)modification).delegate();
-        }
-        return modification;
-    }
-
     /**
      * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
      * pruning in an attempt to adjust the state to our current SchemaContext.
@@ -313,17 +315,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param snapshot Snapshot that needs to be applied
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyRecoverySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
-        applySnapshot(snapshot, this::wrapWithPruning);
+    final void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
+        // TODO: we should be able to reuse the pruner, provided we are not reentrant
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
+            dataSchemaContext);
+        if (snapshot.needsMigration()) {
+            final ReusableNormalizedNodePruner uintPruner = pruner.withUintAdaption();
+            applySnapshot(snapshot.getSnapshot(),
+                delegate -> new PruningDataTreeModification.Proactive(delegate, dataTree, uintPruner));
+        } else {
+            applySnapshot(snapshot.getSnapshot(),
+                delegate -> new PruningDataTreeModification.Reactive(delegate, dataTree, pruner));
+        }
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void applyRecoveryCandidate(final DataTreeCandidate candidate) {
-        final PruningDataTreeModification mod = wrapWithPruning(dataTree.takeSnapshot().newModification());
-        DataTreeCandidates.applyToModification(mod, candidate);
-        mod.ready();
+    private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
+        final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.acquireCandidate();
+        final DataTreeModification unwrapped = dataTree.takeSnapshot().newModification();
+        final PruningDataTreeModification mod = createPruningModification(unwrapped,
+            NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.getValue().getVersion()) > 0);
 
-        final DataTreeModification unwrapped = mod.delegate();
+        DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
+        mod.ready();
         LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
 
         try {
@@ -337,6 +351,17 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                     "%s: Failed to apply recovery payload. Modification data was written to file %s",
                     logContext, file), e);
         }
+
+        allMetadataCommittedTransaction(entry.getKey());
+    }
+
+    private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
+            final boolean uintAdapting) {
+        // TODO: we should be able to reuse the pruner, provided we are not reentrant
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
+            dataSchemaContext);
+        return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
+                : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
     }
 
     /**
@@ -347,12 +372,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @throws IOException when the snapshot fails to deserialize
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
+    final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
         if (payload instanceof CommitTransactionPayload) {
-            final Entry<TransactionIdentifier, DataTreeCandidate> e =
-                    ((CommitTransactionPayload) payload).getCandidate();
-            applyRecoveryCandidate(e.getValue());
-            allMetadataCommittedTransaction(e.getKey());
+            applyRecoveryCandidate((CommitTransactionPayload) payload);
         } else if (payload instanceof AbortTransactionPayload) {
             allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
         } else if (payload instanceof PurgeTransactionPayload) {
@@ -368,12 +390,15 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    private void applyReplicatedCandidate(final TransactionIdentifier identifier, final DataTreeCandidate foreign)
-            throws DataValidationFailedException {
+    private void applyReplicatedCandidate(final CommitTransactionPayload payload)
+            throws DataValidationFailedException, IOException {
+        final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> entry = payload.acquireCandidate();
+        final TransactionIdentifier identifier = entry.getKey();
         LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
 
         final DataTreeModification mod = dataTree.takeSnapshot().newModification();
-        DataTreeCandidates.applyToModification(mod, foreign);
+        // TODO: check version here, which will enable us to perform forward-compatibility transformations
+        DataTreeCandidates.applyToModification(mod, entry.getValue().getCandidate());
         mod.ready();
 
         LOG.trace("{}: Applying foreign modification {}", logContext, mod);
@@ -394,7 +419,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @throws IOException when the snapshot fails to deserialize
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
+    final void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
             DataValidationFailedException {
         /*
          * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
@@ -409,13 +434,20 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
          */
         if (payload instanceof CommitTransactionPayload) {
             if (identifier == null) {
-                final Entry<TransactionIdentifier, DataTreeCandidate> e =
-                        ((CommitTransactionPayload) payload).getCandidate();
-                applyReplicatedCandidate(e.getKey(), e.getValue());
+                applyReplicatedCandidate((CommitTransactionPayload) payload);
             } else {
-                Verify.verify(identifier instanceof TransactionIdentifier);
-                payloadReplicationComplete((TransactionIdentifier) identifier);
+                verify(identifier instanceof TransactionIdentifier);
+                // if we did not track this transaction before, it means that it came from another leader and we are in
+                // the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to
+                // the local DataTree and would be lost if it was only applied via payloadReplicationComplete().
+                if (!payloadReplicationComplete((TransactionIdentifier) identifier)) {
+                    applyReplicatedCandidate((CommitTransactionPayload) payload);
+                }
             }
+
+            // make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed.
+            checkRootOverwrite(((CommitTransactionPayload) payload).acquireCandidate().getValue()
+                    .getCandidate());
         } else if (payload instanceof AbortTransactionPayload) {
             if (identifier != null) {
                 payloadReplicationComplete((AbortTransactionPayload) payload);
@@ -446,6 +478,25 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
+    private void checkRootOverwrite(final DataTreeCandidate candidate) {
+        final DatastoreContext datastoreContext = shard.getDatastoreContext();
+        if (!datastoreContext.isSnapshotOnRootOverwrite()) {
+            return;
+        }
+
+        if (!datastoreContext.isPersistent()) {
+            // FIXME: why don't we want a snapshot in non-persistent state?
+            return;
+        }
+
+        // top level container ie "/"
+        if (candidate.getRootPath().isEmpty()
+                && candidate.getRootNode().getModificationType() == ModificationType.WRITE) {
+            LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext);
+            shard.self().tell(new InitiateCaptureSnapshot(), noSender());
+        }
+    }
+
     private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
         if (callback != null) {
             replicationCallbacks.put(payload, callback);
@@ -463,22 +514,23 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    private void payloadReplicationComplete(final TransactionIdentifier txId) {
+    private boolean payloadReplicationComplete(final TransactionIdentifier txId) {
         final CommitEntry current = pendingFinishCommits.peek();
         if (current == null) {
             LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
             allMetadataCommittedTransaction(txId);
-            return;
+            return false;
         }
 
         if (!current.cohort.getIdentifier().equals(txId)) {
             LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
                 current.cohort.getIdentifier(), txId);
             allMetadataCommittedTransaction(txId);
-            return;
+            return false;
         }
 
         finishCommit(current.cohort);
+        return true;
     }
 
     private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
@@ -525,16 +577,15 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param closed True if the chain should be created in closed state (i.e. pending purge)
      * @return Transaction chain handle
      */
-    ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
+    final ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
             final boolean closed) {
         final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
         final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
-        Preconditions.checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId,
-                existing);
+        checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
         return ret;
     }
 
-    ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
+    final ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
             final @Nullable Runnable callback) {
         ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
         if (chain == null) {
@@ -549,7 +600,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return chain;
     }
 
-    ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+    final ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+        shard.getShardMBean().incrementReadOnlyTransactionCount();
+
         if (txId.getHistoryId().getHistoryId() == 0) {
             return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
         }
@@ -557,7 +610,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
     }
 
-    ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+    final ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+        shard.getShardMBean().incrementReadWriteTransactionCount();
+
         if (txId.getHistoryId().getHistoryId() == 0) {
             return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
                     .newModification());
@@ -567,7 +622,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @VisibleForTesting
-    public void notifyListeners(final DataTreeCandidate candidate) {
+    final void notifyListeners(final DataTreeCandidate candidate) {
         treeChangeListenerPublisher.publishChanges(candidate);
     }
 
@@ -575,7 +630,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
      * replication callbacks.
      */
-    void purgeLeaderState() {
+    final void purgeLeaderState() {
         for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
             chain.close();
         }
@@ -590,19 +645,34 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param id History identifier
      * @param callback Callback to invoke upon completion, may be null
      */
-    void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+    final void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+        if (commonCloseTransactionChain(id, callback)) {
+            replicatePayload(id, CloseLocalHistoryPayload.create(id,
+                shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+        }
+    }
+
+    /**
+     * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
+     *
+     * @param id History identifier
+     */
+    final void closeTransactionChain(final LocalHistoryIdentifier id) {
+        commonCloseTransactionChain(id, null);
+    }
+
+    private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
         final ShardDataTreeTransactionChain chain = transactionChains.get(id);
         if (chain == null) {
             LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
             if (callback != null) {
                 callback.run();
             }
-            return;
+            return false;
         }
 
         chain.close();
-        replicatePayload(id, CloseLocalHistoryPayload.create(
-                id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+        return true;
     }
 
     /**
@@ -611,7 +681,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param id History identifier
      * @param callback Callback to invoke upon completion, may be null
      */
-    void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+    final void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
         final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
         if (chain == null) {
             LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
@@ -625,25 +695,23 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
     }
 
-    Optional<DataTreeCandidate> readCurrentData() {
-        final java.util.Optional<NormalizedNode<?, ?>> currentState =
-                dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
-        return currentState.isPresent() ? Optional.of(DataTreeCandidates.fromNormalizedNode(
-            YangInstanceIdentifier.EMPTY, currentState.get())) : Optional.<DataTreeCandidate>absent();
+    final Optional<DataTreeCandidate> readCurrentData() {
+        return dataTree.takeSnapshot().readNode(YangInstanceIdentifier.empty())
+                .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.empty(), state));
     }
 
-    public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
+    final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
             final Optional<DataTreeCandidate> initialState,
             final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
         treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
     }
 
-    int getQueueSize() {
+    final int getQueueSize() {
         return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
     }
 
     @Override
-    void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
+    final void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
         final TransactionIdentifier id = transaction.getIdentifier();
         LOG.debug("{}: aborting transaction {}", logContext, id);
         replicatePayload(id, AbortTransactionPayload.create(
@@ -651,40 +719,43 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @Override
-    void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
+    final void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
         // No-op for free-standing transactions
-
     }
 
     @Override
-    ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    final ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
+            final Optional<SortedSet<String>> participatingShardNames) {
         final DataTreeModification snapshot = transaction.getSnapshot();
+        final TransactionIdentifier id = transaction.getIdentifier();
+        LOG.debug("{}: readying transaction {}", logContext, id);
         snapshot.ready();
+        LOG.debug("{}: transaction {} ready", logContext, id);
 
         return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
     }
 
-    void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
+    final void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
         LOG.debug("{}: purging transaction {}", logContext, id);
         replicatePayload(id, PurgeTransactionPayload.create(
                 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
     }
 
-    public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
-        return Optional.fromJavaUtil(dataTree.takeSnapshot().readNode(path));
+    @VisibleForTesting
+    public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+        return dataTree.takeSnapshot().readNode(path);
     }
 
-    DataTreeSnapshot takeSnapshot() {
+    final DataTreeSnapshot takeSnapshot() {
         return dataTree.takeSnapshot();
     }
 
     @VisibleForTesting
-    public DataTreeModification newModification() {
+    final DataTreeModification newModification() {
         return dataTree.takeSnapshot().newModification();
     }
 
-    public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
+    final Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
         Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
 
         for (CommitEntry entry: pendingFinishCommits) {
@@ -709,7 +780,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     /**
      * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
      */
-    void resumeNextPendingTransaction() {
+    final void resumeNextPendingTransaction() {
         LOG.debug("{}: attempting to resume transaction processing", logContext);
         processNextPending();
     }
@@ -745,8 +816,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
                 // For debugging purposes, allow dumping of the modification. Coupled with the above
                 // precondition log, it should allow us to understand what went on.
-                LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", logContext, cohort.getIdentifier(),
-                    modification, dataTree);
+                LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.getIdentifier(), modification);
+                LOG.trace("{}: Current tree: {}", logContext, dataTree);
                 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
             } catch (Exception e) {
                 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
@@ -795,6 +866,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return first != null && first.cohort.getState() == State.COMMIT_PENDING;
     }
 
+    // non-final for mocking
     void startCanCommit(final SimpleShardDataTreeCohort cohort) {
         final CommitEntry head = pendingTransactions.peek();
         if (head == null) {
@@ -898,8 +970,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         tempStack.forEach(queue::addFirst);
     }
 
-    private Collection<String> extractPrecedingShardNames(
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
         return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
             set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
     }
@@ -910,20 +981,23 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         processNextPendingTransaction();
     }
 
+    // non-final for mocking
     @SuppressWarnings("checkstyle:IllegalCatch")
     void startPreCommit(final SimpleShardDataTreeCohort cohort) {
         final CommitEntry entry = pendingTransactions.peek();
-        Preconditions.checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
+        checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
 
         final SimpleShardDataTreeCohort current = entry.cohort;
-        Verify.verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
+        verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
 
-        LOG.debug("{}: Preparing transaction {}", logContext, current.getIdentifier());
+        final TransactionIdentifier currentId = current.getIdentifier();
+        LOG.debug("{}: Preparing transaction {}", logContext, currentId);
 
         final DataTreeCandidateTip candidate;
         try {
             candidate = tip.prepare(cohort.getDataTreeModification());
-        } catch (RuntimeException e) {
+            LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
+        } catch (DataValidationFailedException | RuntimeException e) {
             failPreCommit(e);
             return;
         }
@@ -932,14 +1006,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             @Override
             public void onSuccess(final Void noop) {
                 // Set the tip of the data tree.
-                tip = Verify.verifyNotNull(candidate);
+                tip = verifyNotNull(candidate);
 
                 entry.lastAccess = readTime();
 
                 pendingTransactions.remove();
                 pendingCommits.add(entry);
 
-                LOG.debug("{}: Transaction {} prepared", logContext, current.getIdentifier());
+                LOG.debug("{}: Transaction {} prepared", logContext, currentId);
 
                 cohort.successfulPreCommit(candidate);
 
@@ -992,9 +1066,10 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         });
     }
 
+    // non-final for mocking
     void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
         final CommitEntry entry = pendingCommits.peek();
-        Preconditions.checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
+        checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
 
         final SimpleShardDataTreeCohort current = entry.cohort;
         if (!cohort.equals(current)) {
@@ -1007,7 +1082,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final TransactionIdentifier txId = cohort.getIdentifier();
         final Payload payload;
         try {
-            payload = CommitTransactionPayload.create(txId, candidate,
+            payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
                     shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
         } catch (IOException e) {
             LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
@@ -1046,16 +1121,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         processNextPendingCommit();
     }
 
-    Collection<ActorRef> getCohortActors() {
+    final Collection<ActorRef> getCohortActors() {
         return cohortRegistry.getCohortActors();
     }
 
-    void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
+    final void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
         cohortRegistry.process(sender, message);
     }
 
     @Override
-    ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+    final ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
             final Exception failure) {
         final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
         pendingTransactions.add(new CommitEntry(cohort, readTime()));
@@ -1063,8 +1138,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @Override
-    ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    final ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+            final Optional<SortedSet<String>> participatingShardNames) {
         SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
                 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
                         COMMIT_STEP_TIMEOUT), participatingShardNames);
@@ -1074,8 +1149,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
     // the newReadWriteTransaction()
-    ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+            final Optional<SortedSet<String>> participatingShardNames) {
         if (txId.getHistoryId().getHistoryId() == 0) {
             return createReadyCohort(txId, mod, participatingShardNames);
         }
@@ -1084,8 +1159,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
-    void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
-            final Function<SimpleShardDataTreeCohort, Optional<Long>> accessTimeUpdater) {
+    final void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
+            final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
         final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
         final long now = readTime();
 
@@ -1103,9 +1178,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             return;
         }
 
-        final Optional<Long> updateOpt = accessTimeUpdater.apply(currentTx.cohort);
+        final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
         if (updateOpt.isPresent()) {
-            final long newAccess =  updateOpt.get().longValue();
+            final long newAccess =  updateOpt.getAsLong();
             final long newDelta = now - newAccess;
             if (newDelta < delta) {
                 LOG.debug("{}: Updated current transaction {} access time", logContext,
@@ -1183,6 +1258,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
+    // non-final for mocking
     boolean startAbort(final SimpleShardDataTreeCohort cohort) {
         final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
                 pendingTransactions).iterator();
@@ -1211,7 +1287,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             return false;
         }
 
-        DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
+        DataTreeTip newTip = requireNonNullElse(first.cohort.getCandidate(), dataTree);
         while (it.hasNext()) {
             final CommitEntry e = it.next();
             if (cohort.equals(e.cohort)) {
@@ -1224,7 +1300,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
                 return true;
             } else {
-                newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
+                newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
             }
         }
 
@@ -1234,7 +1310,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
-        tip = Preconditions.checkNotNull(newTip);
+        tip = requireNonNull(newTip);
         while (iter.hasNext()) {
             final SimpleShardDataTreeCohort cohort = iter.next().cohort;
             if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
@@ -1263,7 +1339,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    void setRunOnPendingTransactionsComplete(final Runnable operation) {
+    final void setRunOnPendingTransactionsComplete(final Runnable operation) {
         runOnPendingTransactionsComplete = operation;
         maybeRunOperationOnPendingTransactionsComplete();
     }
@@ -1278,16 +1354,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    ShardStats getStats() {
+    final ShardStats getStats() {
         return shard.getShardMBean();
     }
 
-    Iterator<SimpleShardDataTreeCohort> cohortIterator() {
+    final Iterator<SimpleShardDataTreeCohort> cohortIterator() {
         return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
             e -> e.cohort).iterator();
     }
 
-    void removeTransactionChain(final LocalHistoryIdentifier id) {
+    final void removeTransactionChain(final LocalHistoryIdentifier id) {
         if (transactionChains.remove(id) != null) {
             LOG.debug("{}: Removed transaction chain {}", logContext, id);
         }