Merge "Update lispflowmapping options in custom.properties"
[controller.git] / opendaylight / md-sal / sal-distributed-datastore / src / main / java / org / opendaylight / controller / cluster / datastore / Shard.java
index d53cb48e501c75c25e3532ce2c71e481717612ad..81449c574780705196e28d3dbd738151c2d948cd 100644 (file)
@@ -11,30 +11,22 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
-import akka.actor.PoisonPill;
 import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import akka.persistence.RecoveryFailure;
 import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
-import java.util.Collection;
+import java.io.IOException;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
@@ -47,40 +39,37 @@ import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
+import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
 import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
-import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
+import org.opendaylight.controller.cluster.datastore.utils.MessageTracker;
+import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
+import org.opendaylight.controller.cluster.notifications.RoleChangeNotifier;
 import org.opendaylight.controller.cluster.raft.RaftActor;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
+import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import scala.concurrent.duration.Duration;
 import scala.concurrent.duration.FiniteDuration;
@@ -93,100 +82,98 @@ import scala.concurrent.duration.FiniteDuration;
  */
 public class Shard extends RaftActor {
 
-    private static final Object COMMIT_TRANSACTION_REPLY = new CommitTransactionReply().toSerializable();
-
     private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
 
-    public static final String DEFAULT_NAME = "default";
+    @VisibleForTesting
+    static final String DEFAULT_NAME = "default";
 
     // The state of this Shard
     private final InMemoryDOMDataStore store;
 
-    private final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
-
     /// The name of this shard
-    private final ShardIdentifier name;
+    private final String name;
 
     private final ShardStats shardMBean;
 
-    private final List<ActorSelection> dataChangeListeners =  Lists.newArrayList();
+    private DatastoreContext datastoreContext;
 
-    private final List<DelayedListenerRegistration> delayedListenerRegistrations =
-                                                                       Lists.newArrayList();
+    private final ShardCommitCoordinator commitCoordinator;
 
-    private final DatastoreContext datastoreContext;
+    private long transactionCommitTimeout;
 
-    private final DataPersistenceProvider dataPersistenceProvider;
+    private Cancellable txCommitTimeoutCheckSchedule;
 
-    private SchemaContext schemaContext;
+    private final Optional<ActorRef> roleChangeNotifier;
 
-    private ActorRef createSnapshotTransaction;
+    private final MessageTracker appendEntriesReplyTracker;
 
-    private int createSnapshotTransactionCounter;
+    private final ReadyTransactionReply READY_TRANSACTION_REPLY = new ReadyTransactionReply(
+            Serialization.serializedActorPath(getSelf()));
 
-    private final ShardCommitCoordinator commitCoordinator;
+    private final DOMTransactionFactory domTransactionFactory;
 
-    private final long transactionCommitTimeout;
+    private final ShardTransactionActorFactory transactionActorFactory;
 
-    private Cancellable txCommitTimeoutCheckSchedule;
+    private final ShardSnapshotCohort snapshotCohort;
 
-    /**
-     * Coordinates persistence recovery on startup.
-     */
-    private ShardRecoveryCoordinator recoveryCoordinator;
-    private List<Object> currentLogRecoveryBatch;
+    private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
+    private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
 
-    private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
-
-    protected Shard(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
+    protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
             final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
-        super(name.toString(), mapPeerAddresses(peerAddresses),
-                Optional.of(datastoreContext.getShardRaftConfig()));
+        super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
 
-        this.name = name;
+        this.name = name.toString();
         this.datastoreContext = datastoreContext;
-        this.schemaContext = schemaContext;
-        this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
 
-        LOG.info("Shard created : {} persistent : {}", name, datastoreContext.isPersistent());
+        setPersistence(datastoreContext.isPersistent());
+
+        LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
 
         store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
                 datastoreContext.getDataStoreProperties());
 
-        if(schemaContext != null) {
+        if (schemaContext != null) {
             store.onGlobalContextUpdated(schemaContext);
         }
 
         shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
                 datastoreContext.getDataStoreMXBeanType());
         shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
+        shardMBean.setShardActor(getSelf());
 
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
         }
 
-        commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
-                datastoreContext.getShardTransactionCommitQueueCapacity());
+        domTransactionFactory = new DOMTransactionFactory(store, shardMBean, LOG, this.name);
 
-        transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
-                datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
-    }
+        commitCoordinator = new ShardCommitCoordinator(domTransactionFactory,
+                TimeUnit.SECONDS.convert(5, TimeUnit.MINUTES),
+                datastoreContext.getShardTransactionCommitQueueCapacity(), self(), LOG, this.name);
 
-    private static Map<String, String> mapPeerAddresses(
-        final Map<ShardIdentifier, String> peerAddresses) {
-        Map<String, String> map = new HashMap<>();
+        setTransactionCommitTimeout();
 
-        for (Map.Entry<ShardIdentifier, String> entry : peerAddresses
-            .entrySet()) {
-            map.put(entry.getKey().toString(), entry.getValue());
-        }
+        // create a notifier actor for each cluster member
+        roleChangeNotifier = createRoleChangeNotifier(name.toString());
 
-        return map;
+        appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
+                getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
+
+        transactionActorFactory = new ShardTransactionActorFactory(domTransactionFactory, datastoreContext,
+                new Dispatchers(context().system().dispatchers()).getDispatcherPath(
+                        Dispatchers.DispatcherType.Transaction), self(), getContext(), shardMBean);
+
+        snapshotCohort = new ShardSnapshotCohort(transactionActorFactory, store, LOG, this.name);
+    }
+
+    private void setTransactionCommitTimeout() {
+        transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
+                datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
     }
 
     public static Props props(final ShardIdentifier name,
-        final Map<ShardIdentifier, String> peerAddresses,
+        final Map<String, String> peerAddresses,
         final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
         Preconditions.checkNotNull(name, "name should not be null");
         Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
@@ -196,76 +183,127 @@ public class Shard extends RaftActor {
         return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
     }
 
+    private Optional<ActorRef> createRoleChangeNotifier(String shardId) {
+        ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
+            RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
+        return Optional.of(shardRoleChangeNotifier);
+    }
+
     @Override
     public void postStop() {
+        LOG.info("Stopping Shard {}", persistenceId());
+
         super.postStop();
 
         if(txCommitTimeoutCheckSchedule != null) {
             txCommitTimeoutCheckSchedule.cancel();
         }
+
+        shardMBean.unregisterMBean();
     }
 
     @Override
     public void onReceiveRecover(final Object message) throws Exception {
         if(LOG.isDebugEnabled()) {
-            LOG.debug("onReceiveRecover: Received message {} from {}",
-                message.getClass().toString(),
-                getSender());
+            LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(),
+                message.getClass().toString(), getSender());
         }
 
         if (message instanceof RecoveryFailure){
-            LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
+            LOG.error("{}: Recovery failed because of this cause",
+                    persistenceId(), ((RecoveryFailure) message).cause());
 
             // Even though recovery failed, we still need to finish our recovery, eg send the
             // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
             onRecoveryComplete();
         } else {
             super.onReceiveRecover(message);
+            if(LOG.isTraceEnabled()) {
+                appendEntriesReplyTracker.begin();
+            }
         }
     }
 
     @Override
     public void onReceiveCommand(final Object message) throws Exception {
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
+
+        MessageTracker.Context context = appendEntriesReplyTracker.received(message);
+
+        if(context.error().isPresent()){
+            LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
+                    context.error());
         }
 
-        if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
-            handleReadDataReply(message);
-        } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
-            handleCreateTransaction(message);
-        } else if(message instanceof ForwardedReadyTransaction) {
-            handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
-        } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
-            handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
-        } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
-            handleCommitTransaction(CommitTransaction.fromSerializable(message));
-        } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
-            handleAbortTransaction(AbortTransaction.fromSerializable(message));
-        } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
-            closeTransactionChain(CloseTransactionChain.fromSerializable(message));
-        } else if (message instanceof RegisterChangeListener) {
-            registerChangeListener((RegisterChangeListener) message);
-        } else if (message instanceof UpdateSchemaContext) {
-            updateSchemaContext((UpdateSchemaContext) message);
-        } else if (message instanceof PeerAddressResolved) {
-            PeerAddressResolved resolved = (PeerAddressResolved) message;
-            setPeerAddress(resolved.getPeerId().toString(),
-                resolved.getPeerAddress());
-        } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
-            handleTransactionCommitTimeoutCheck();
-        } else {
-            super.onReceiveCommand(message);
+        try {
+            if (CreateTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
+                handleCreateTransaction(message);
+            } else if (BatchedModifications.class.isInstance(message)) {
+                handleBatchedModifications((BatchedModifications)message);
+            } else if (message instanceof ForwardedReadyTransaction) {
+                handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
+            } else if (CanCommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
+                handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+            } else if (CommitTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
+                handleCommitTransaction(CommitTransaction.fromSerializable(message));
+            } else if (AbortTransaction.SERIALIZABLE_CLASS.isInstance(message)) {
+                handleAbortTransaction(AbortTransaction.fromSerializable(message));
+            } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
+                closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+            } else if (message instanceof RegisterChangeListener) {
+                changeSupport.onMessage((RegisterChangeListener) message, isLeader());
+            } else if (message instanceof RegisterDataTreeChangeListener) {
+                treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
+            } else if (message instanceof UpdateSchemaContext) {
+                updateSchemaContext((UpdateSchemaContext) message);
+            } else if (message instanceof PeerAddressResolved) {
+                PeerAddressResolved resolved = (PeerAddressResolved) message;
+                setPeerAddress(resolved.getPeerId().toString(),
+                        resolved.getPeerAddress());
+            } else if (message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
+                handleTransactionCommitTimeoutCheck();
+            } else if(message instanceof DatastoreContext) {
+                onDatastoreContext((DatastoreContext)message);
+            } else if(message instanceof RegisterRoleChangeListener){
+                roleChangeNotifier.get().forward(message, context());
+            } else if (message instanceof FollowerInitialSyncUpStatus){
+                shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
+                context().parent().tell(message, self());
+            } else {
+                super.onReceiveCommand(message);
+            }
+        } finally {
+            context.done();
         }
     }
 
+    @Override
+    protected Optional<ActorRef> getRoleChangeNotifier() {
+        return roleChangeNotifier;
+    }
+
+    private void onDatastoreContext(DatastoreContext context) {
+        datastoreContext = context;
+
+        commitCoordinator.setQueueCapacity(datastoreContext.getShardTransactionCommitQueueCapacity());
+
+        setTransactionCommitTimeout();
+
+        if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
+            setPersistence(true);
+        } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
+            setPersistence(false);
+        }
+
+        updateConfigParams(datastoreContext.getShardRaftConfig());
+    }
+
     private void handleTransactionCommitTimeoutCheck() {
         CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
         if(cohortEntry != null) {
             long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
             if(elapsed > transactionCommitTimeout) {
-                LOG.warning("Current transaction {} has timed out after {} ms - aborting",
-                        cohortEntry.getTransactionID(), transactionCommitTimeout);
+                LOG.warn("{}: Current transaction {} has timed out after {} ms - aborting",
+                        persistenceId(), cohortEntry.getTransactionID(), transactionCommitTimeout);
 
                 doAbortTransaction(cohortEntry.getTransactionID(), null);
             }
@@ -275,7 +313,7 @@ public class Shard extends RaftActor {
     private void handleCommitTransaction(final CommitTransaction commit) {
         final String transactionID = commit.getTransactionID();
 
-        LOG.debug("Committing transaction {}", transactionID);
+        LOG.debug("{}: Committing transaction {}", persistenceId(), transactionID);
 
         // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
         // this transaction.
@@ -284,8 +322,8 @@ public class Shard extends RaftActor {
             // We're not the current Tx - the Tx was likely expired b/c it took too long in
             // between the canCommit and commit messages.
             IllegalStateException ex = new IllegalStateException(
-                    String.format("Cannot commit transaction %s - it is not the current transaction",
-                            transactionID));
+                    String.format("%s: Cannot commit transaction %s - it is not the current transaction",
+                            persistenceId(), transactionID));
             LOG.error(ex.getMessage());
             shardMBean.incrementFailedTransactionsCount();
             getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
@@ -303,11 +341,18 @@ public class Shard extends RaftActor {
             // currently uses a same thread executor anyway.
             cohortEntry.getCohort().preCommit().get();
 
-            Shard.this.persistData(getSender(), transactionID,
-                    new CompositeModificationPayload(cohortEntry.getModification().toSerializable()));
-        } catch (InterruptedException | ExecutionException e) {
-            LOG.error(e, "An exception occurred while preCommitting transaction {}",
-                    cohortEntry.getTransactionID());
+            // If we do not have any followers and we are not using persistence
+            // or if cohortEntry has no modifications
+            // we can apply modification to the state immediately
+            if((!hasFollowers() && !persistence().isRecoveryApplicable()) || (!cohortEntry.hasModifications())){
+                applyModificationToState(getSender(), transactionID, cohortEntry.getModification());
+            } else {
+                Shard.this.persistData(getSender(), transactionID,
+                        new ModificationPayload(cohortEntry.getModification()));
+            }
+        } catch (Exception e) {
+            LOG.error("{} An exception occurred while preCommitting transaction {}",
+                    persistenceId(), cohortEntry.getTransactionID(), e);
             shardMBean.incrementFailedTransactionsCount();
             getSender().tell(new akka.actor.Status.Failure(e), getSelf());
         }
@@ -330,13 +375,13 @@ public class Shard extends RaftActor {
             cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
             if(cohortEntry != null) {
                 commitWithNewTransaction(cohortEntry.getModification());
-                sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
+                sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
             } else {
                 // This really shouldn't happen - it likely means that persistence or replication
                 // took so long to complete such that the cohort entry was expired from the cache.
                 IllegalStateException ex = new IllegalStateException(
-                        String.format("Could not finish committing transaction %s - no CohortEntry found",
-                                transactionID));
+                        String.format("%s: Could not finish committing transaction %s - no CohortEntry found",
+                                persistenceId(), transactionID));
                 LOG.error(ex.getMessage());
                 sender.tell(new akka.actor.Status.Failure(ex), getSelf());
             }
@@ -344,7 +389,7 @@ public class Shard extends RaftActor {
             return;
         }
 
-        LOG.debug("Finishing commit for transaction {}", cohortEntry.getTransactionID());
+        LOG.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
 
         try {
             // We block on the future here so we don't have to worry about possibly accessing our
@@ -352,62 +397,113 @@ public class Shard extends RaftActor {
             // currently uses a same thread executor anyway.
             cohortEntry.getCohort().commit().get();
 
-            sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
+            sender.tell(CommitTransactionReply.INSTANCE.toSerializable(), getSelf());
 
             shardMBean.incrementCommittedTransactionCount();
             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
 
-        } catch (InterruptedException | ExecutionException e) {
+        } catch (Exception e) {
             sender.tell(new akka.actor.Status.Failure(e), getSelf());
 
-            LOG.error(e, "An exception occurred while committing transaction {}", transactionID);
+            LOG.error("{}, An exception occurred while committing transaction {}", persistenceId(),
+                    transactionID, e);
             shardMBean.incrementFailedTransactionsCount();
+        } finally {
+            commitCoordinator.currentTransactionComplete(transactionID, true);
         }
-
-        commitCoordinator.currentTransactionComplete(transactionID, true);
     }
 
     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
-        LOG.debug("Can committing transaction {}", canCommit.getTransactionID());
+        LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
         commitCoordinator.handleCanCommit(canCommit, getSender(), self());
     }
 
+    private void handleBatchedModifications(BatchedModifications batched) {
+        // This message is sent to prepare the modificationsa transaction directly on the Shard as an
+        // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
+        // BatchedModifications message, the caller sets the ready flag in the message indicating
+        // modifications are complete. The reply contains the cohort actor path (this actor) for the caller
+        // to initiate the 3-phase commit. This also avoids the overhead of sending an additional
+        // ReadyTransaction message.
+
+        // If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
+        // normally get here if we're not the leader as the front-end (TransactionProxy) should determine
+        // the primary/leader shard. However with timing and caching on the front-end, there's a small
+        // window where it could have a stale leader during leadership transitions.
+        //
+        if(isLeader()) {
+            try {
+                boolean ready = commitCoordinator.handleTransactionModifications(batched);
+                if(ready) {
+                    sender().tell(READY_TRANSACTION_REPLY, self());
+                } else {
+                    sender().tell(new BatchedModificationsReply(batched.getModifications().size()), self());
+                }
+            } catch (Exception e) {
+                LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
+                        batched.getTransactionID(), e);
+                getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+            }
+        } else {
+            ActorSelection leader = getLeader();
+            if(leader != null) {
+                // TODO: what if this is not the first batch and leadership changed in between batched messages?
+                // We could check if the commitCoordinator already has a cached entry and forward all the previous
+                // batched modifications.
+                LOG.debug("{}: Forwarding BatchedModifications to leader {}", persistenceId(), leader);
+                leader.forward(batched, getContext());
+            } else {
+                // TODO: rather than throwing an immediate exception, we could schedule a timer to try again to make
+                // it more resilient in case we're in the process of electing a new leader.
+                getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
+                    "Could not find the leader for shard %s. This typically happens" +
+                    " when the system is coming up or recovering and a leader is being elected. Try again" +
+                    " later.", persistenceId()))), getSelf());
+            }
+        }
+    }
+
     private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
-        LOG.debug("Readying transaction {}, client version {}", ready.getTransactionID(),
-                ready.getTxnClientVersion());
+        LOG.debug("{}: Readying transaction {}, client version {}", persistenceId(),
+                ready.getTransactionID(), ready.getTxnClientVersion());
 
         // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
         // commitCoordinator in preparation for the subsequent three phase commit initiated by
         // the front-end.
         commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
-                ready.getModification());
+                (MutableCompositeModification) ready.getModification());
 
         // Return our actor path as we'll handle the three phase commit, except if the Tx client
         // version < 1 (Helium-1 version). This means the Tx was initiated by a base Helium version
         // node. In that case, the subsequent 3-phase commit messages won't contain the
         // transactionId so to maintain backwards compatibility, we create a separate cohort actor
         // to provide the compatible behavior.
-        ActorRef replyActorPath = self();
-        if(ready.getTxnClientVersion() < CreateTransaction.HELIUM_1_VERSION) {
-            LOG.debug("Creating BackwardsCompatibleThreePhaseCommitCohort");
-            replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
-                    ready.getTransactionID()));
-        }
+        if(ready.getTxnClientVersion() < DataStoreVersions.LITHIUM_VERSION) {
+            ActorRef replyActorPath = getSelf();
+            if(ready.getTxnClientVersion() < DataStoreVersions.HELIUM_1_VERSION) {
+                LOG.debug("{}: Creating BackwardsCompatibleThreePhaseCommitCohort", persistenceId());
+                replyActorPath = getContext().actorOf(BackwardsCompatibleThreePhaseCommitCohort.props(
+                        ready.getTransactionID()));
+            }
 
-        ReadyTransactionReply readyTransactionReply = new ReadyTransactionReply(
-                Serialization.serializedActorPath(replyActorPath));
-        getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
+            ReadyTransactionReply readyTransactionReply =
+                    new ReadyTransactionReply(Serialization.serializedActorPath(replyActorPath),
+                            ready.getTxnClientVersion());
+            getSender().tell(ready.isReturnSerialized() ? readyTransactionReply.toSerializable() :
                 readyTransactionReply, getSelf());
+        } else {
+            getSender().tell(READY_TRANSACTION_REPLY, getSelf());
+        }
     }
 
     private void handleAbortTransaction(final AbortTransaction abort) {
         doAbortTransaction(abort.getTransactionID(), getSender());
     }
 
-    private void doAbortTransaction(final String transactionID, final ActorRef sender) {
+    void doAbortTransaction(final String transactionID, final ActorRef sender) {
         final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
         if(cohortEntry != null) {
-            LOG.debug("Aborting transaction {}", transactionID);
+            LOG.debug("{}: Aborting transaction {}", persistenceId(), transactionID);
 
             // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
             // aborted during replication in which case we may still commit locally if replication
@@ -423,13 +519,13 @@ public class Shard extends RaftActor {
                     shardMBean.incrementAbortTransactionsCount();
 
                     if(sender != null) {
-                        sender.tell(new AbortTransactionReply().toSerializable(), self);
+                        sender.tell(AbortTransactionReply.INSTANCE.toSerializable(), self);
                     }
                 }
 
                 @Override
                 public void onFailure(final Throwable t) {
-                    LOG.error(t, "An exception happened during abort");
+                    LOG.error("{}: An exception happened during abort", persistenceId(), t);
 
                     if(sender != null) {
                         sender.tell(new akka.actor.Status.Failure(t), self);
@@ -445,89 +541,23 @@ public class Shard extends RaftActor {
         } else if (getLeader() != null) {
             getLeader().forward(message, getContext());
         } else {
-            getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
-                "Could not find shard leader so transaction cannot be created. This typically happens" +
+            getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(String.format(
+                "Could not find leader for shard %s so transaction cannot be created. This typically happens" +
                 " when the system is coming up or recovering and a leader is being elected. Try again" +
-                " later.")), getSelf());
+                " later.", persistenceId()))), getSelf());
         }
     }
 
-    private void handleReadDataReply(final Object message) {
-        // This must be for install snapshot. Don't want to open this up and trigger
-        // deSerialization
-
-        self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
-                self());
-
-        createSnapshotTransaction = null;
-
-        // Send a PoisonPill instead of sending close transaction because we do not really need
-        // a response
-        getSender().tell(PoisonPill.getInstance(), self());
-    }
-
     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
-        DOMStoreTransactionChain chain =
-            transactionChains.remove(closeTransactionChain.getTransactionChainId());
-
-        if(chain != null) {
-            chain.close();
-        }
+        domTransactionFactory.closeTransactionChain(closeTransactionChain.getTransactionChainId());
     }
 
     private ActorRef createTypedTransactionActor(int transactionType,
-            ShardTransactionIdentifier transactionId, String transactionChainId, int clientVersion ) {
-
-        DOMStoreTransactionFactory factory = store;
-
-        if(!transactionChainId.isEmpty()) {
-            factory = transactionChains.get(transactionChainId);
-            if(factory == null){
-                DOMStoreTransactionChain transactionChain = store.createTransactionChain();
-                transactionChains.put(transactionChainId, transactionChain);
-                factory = transactionChain;
-            }
-        }
+            ShardTransactionIdentifier transactionId, String transactionChainId,
+            short clientVersion ) {
 
-        if(this.schemaContext == null) {
-            throw new IllegalStateException("SchemaContext is not set");
-        }
-
-        if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
-
-            shardMBean.incrementReadOnlyTransactionCount();
-
-            return getContext().actorOf(
-                ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
-                        schemaContext,datastoreContext, shardMBean,
-                        transactionId.getRemoteTransactionId(), clientVersion),
-                        transactionId.toString());
-
-        } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
-
-            shardMBean.incrementReadWriteTransactionCount();
-
-            return getContext().actorOf(
-                ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
-                        schemaContext, datastoreContext, shardMBean,
-                        transactionId.getRemoteTransactionId(), clientVersion),
-                        transactionId.toString());
-
-
-        } else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
-
-            shardMBean.incrementWriteOnlyTransactionCount();
-
-            return getContext().actorOf(
-                ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
-                        schemaContext, datastoreContext, shardMBean,
-                        transactionId.getRemoteTransactionId(), clientVersion),
-                        transactionId.toString());
-        } else {
-            throw new IllegalArgumentException(
-                "Shard="+name + ":CreateTransaction message has unidentified transaction type="
-                    + transactionType);
-        }
+        return transactionActorFactory.newShardTransaction(TransactionProxy.TransactionType.fromInt(transactionType),
+                transactionId, transactionChainId, clientVersion);
     }
 
     private void createTransaction(CreateTransaction createTransaction) {
@@ -544,15 +574,13 @@ public class Shard extends RaftActor {
     }
 
     private ActorRef createTransaction(int transactionType, String remoteTransactionId,
-            String transactionChainId, int clientVersion) {
+            String transactionChainId, short clientVersion) {
 
-        ShardTransactionIdentifier transactionId =
-            ShardTransactionIdentifier.builder()
-                .remoteTransactionId(remoteTransactionId)
-                .build();
+
+        ShardTransactionIdentifier transactionId = new ShardTransactionIdentifier(remoteTransactionId);
 
         if(LOG.isDebugEnabled()) {
-            LOG.debug("Creating transaction : {} ", transactionId);
+            LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
         }
 
         ActorRef transactionActor = createTypedTransactionActor(transactionType, transactionId,
@@ -561,30 +589,21 @@ public class Shard extends RaftActor {
         return transactionActor;
     }
 
-    private void syncCommitTransaction(final DOMStoreWriteTransaction transaction)
-        throws ExecutionException, InterruptedException {
-        DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
-        commitCohort.preCommit().get();
-        commitCohort.commit().get();
-    }
-
     private void commitWithNewTransaction(final Modification modification) {
         DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
         modification.apply(tx);
         try {
-            syncCommitTransaction(tx);
+            snapshotCohort.syncCommitTransaction(tx);
             shardMBean.incrementCommittedTransactionCount();
             shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
         } catch (InterruptedException | ExecutionException e) {
             shardMBean.incrementFailedTransactionsCount();
-            LOG.error(e, "Failed to commit");
+            LOG.error("{}: Failed to commit", persistenceId(), e);
         }
     }
 
     private void updateSchemaContext(final UpdateSchemaContext message) {
-        this.schemaContext = message.getSchemaContext();
         updateSchemaContext(message.getSchemaContext());
-        store.onGlobalContextUpdated(message.getSchemaContext());
     }
 
     @VisibleForTesting
@@ -592,132 +611,24 @@ public class Shard extends RaftActor {
         store.onGlobalContextUpdated(schemaContext);
     }
 
-    private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
-
-        LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
-
-        ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                                     NormalizedNode<?, ?>>> registration;
-        if(isLeader()) {
-            registration = doChangeListenerRegistration(registerChangeListener);
-        } else {
-            LOG.debug("Shard is not the leader - delaying registration");
-
-            DelayedListenerRegistration delayedReg =
-                    new DelayedListenerRegistration(registerChangeListener);
-            delayedListenerRegistrations.add(delayedReg);
-            registration = delayedReg;
-        }
-
-        ActorRef listenerRegistration = getContext().actorOf(
-                DataChangeListenerRegistration.props(registration));
-
-        LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
-                    listenerRegistration.path());
-
-        getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()), getSelf());
-    }
-
-    private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                               NormalizedNode<?, ?>>> doChangeListenerRegistration(
-            final RegisterChangeListener registerChangeListener) {
-
-        ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
-                registerChangeListener.getDataChangeListenerPath());
-
-        // Notify the listener if notifications should be enabled or not
-        // If this shard is the leader then it will enable notifications else
-        // it will not
-        dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
-
-        // Now store a reference to the data change listener so it can be notified
-        // at a later point if notifications should be enabled or disabled
-        dataChangeListeners.add(dataChangeListenerPath);
-
-        AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
-                new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
-
-        LOG.debug("Registering for path {}", registerChangeListener.getPath());
-
-        return store.registerChangeListener(registerChangeListener.getPath(), listener,
-                registerChangeListener.getScope());
-    }
-
-    private boolean isMetricsCaptureEnabled(){
+    private boolean isMetricsCaptureEnabled() {
         CommonConfig config = new CommonConfig(getContext().system().settings().config());
         return config.isMetricCaptureEnabled();
     }
 
     @Override
-    protected
-    void startLogRecoveryBatch(final int maxBatchSize) {
-        currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
-
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
-        }
-    }
-
-    @Override
-    protected void appendRecoveredLogEntry(final Payload data) {
-        if (data instanceof CompositeModificationPayload) {
-            currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
-        } else {
-            LOG.error("Unknown state received {} during recovery", data);
-        }
-    }
-
-    @Override
-    protected void applyRecoverySnapshot(final ByteString snapshot) {
-        if(recoveryCoordinator == null) {
-            recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
-        }
-
-        recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
-
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{} : submitted recovery sbapshot", persistenceId());
-        }
+    protected RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+        return snapshotCohort;
     }
 
     @Override
-    protected void applyCurrentLogRecoveryBatch() {
-        if(recoveryCoordinator == null) {
-            recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
-        }
-
-        recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
-
-        if(LOG.isDebugEnabled()) {
-            LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
-                    currentLogRecoveryBatch.size());
-        }
+    @Nonnull
+    protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+        return new ShardRecoveryCoordinator(store, persistenceId(), LOG);
     }
 
     @Override
     protected void onRecoveryComplete() {
-        if(recoveryCoordinator != null) {
-            Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
-
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
-            }
-
-            for(DOMStoreWriteTransaction tx: txList) {
-                try {
-                    syncCommitTransaction(tx);
-                    shardMBean.incrementCommittedTransactionCount();
-                } catch (InterruptedException | ExecutionException e) {
-                    shardMBean.incrementFailedTransactionsCount();
-                    LOG.error(e, "Failed to commit");
-                }
-            }
-        }
-
-        recoveryCoordinator = null;
-        currentLogRecoveryBatch = null;
-        updateJournalStats();
-
         //notify shard manager
         getContext().parent().tell(new ActorInitialized(), getSelf());
 
@@ -735,151 +646,82 @@ public class Shard extends RaftActor {
     @Override
     protected void applyState(final ActorRef clientActor, final String identifier, final Object data) {
 
-        if (data instanceof CompositeModificationPayload) {
-            Object modification = ((CompositeModificationPayload) data).getModification();
-
-            if(modification == null) {
-                LOG.error(
-                     "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
-                     identifier, clientActor != null ? clientActor.path().toString() : null);
-            } else if(clientActor == null) {
-                // There's no clientActor to which to send a commit reply so we must be applying
-                // replicated state from the leader.
-                commitWithNewTransaction(MutableCompositeModification.fromSerializable(
-                        modification, schemaContext));
-            } else {
-                // This must be the OK to commit after replication consensus.
-                finishCommit(clientActor, identifier);
+        if(data instanceof ModificationPayload) {
+            try {
+                applyModificationToState(clientActor, identifier, ((ModificationPayload) data).getModification());
+            } catch (ClassNotFoundException | IOException e) {
+                LOG.error("{}: Error extracting ModificationPayload", persistenceId(), e);
             }
-        } else {
-            LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
-                    data, data.getClass().getClassLoader(),
-                    CompositeModificationPayload.class.getClassLoader());
         }
+        else if (data instanceof CompositeModificationPayload) {
+            Object modification = ((CompositeModificationPayload) data).getModification();
 
-        updateJournalStats();
-
-    }
-
-    private void updateJournalStats() {
-        ReplicatedLogEntry lastLogEntry = getLastLogEntry();
-
-        if (lastLogEntry != null) {
-            shardMBean.setLastLogIndex(lastLogEntry.getIndex());
-            shardMBean.setLastLogTerm(lastLogEntry.getTerm());
-        }
-
-        shardMBean.setCommitIndex(getCommitIndex());
-        shardMBean.setLastApplied(getLastApplied());
-    }
-
-    @Override
-    protected void createSnapshot() {
-        if (createSnapshotTransaction == null) {
-
-            // Create a transaction. We are really going to treat the transaction as a worker
-            // so that this actor does not get block building the snapshot
-            createSnapshotTransaction = createTransaction(
-                TransactionProxy.TransactionType.READ_ONLY.ordinal(),
-                "createSnapshot" + ++createSnapshotTransactionCounter, "",
-                CreateTransaction.CURRENT_VERSION);
-
-            createSnapshotTransaction.tell(
-                new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
+            applyModificationToState(clientActor, identifier, modification);
+        } else if(data instanceof CompositeModificationByteStringPayload ){
+            Object modification = ((CompositeModificationByteStringPayload) data).getModification();
 
+            applyModificationToState(clientActor, identifier, modification);
+        } else {
+            LOG.error("{}: Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
+                    persistenceId(), data, data.getClass().getClassLoader(),
+                    CompositeModificationPayload.class.getClassLoader());
         }
     }
 
-    @VisibleForTesting
-    @Override
-    protected void applySnapshot(final ByteString snapshot) {
-        // Since this will be done only on Recovery or when this actor is a Follower
-        // we can safely commit everything in here. We not need to worry about event notifications
-        // as they would have already been disabled on the follower
-
-        LOG.info("Applying snapshot");
-        try {
-            DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
-            NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
-            NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext)
-                .decode(serializedNode);
-
-            // delete everything first
-            transaction.delete(YangInstanceIdentifier.builder().build());
-
-            // Add everything from the remote node back
-            transaction.write(YangInstanceIdentifier.builder().build(), node);
-            syncCommitTransaction(transaction);
-        } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
-            LOG.error(e, "An exception occurred when applying snapshot");
-        } finally {
-            LOG.info("Done applying snapshot");
+    private void applyModificationToState(ActorRef clientActor, String identifier, Object modification) {
+        if(modification == null) {
+            LOG.error(
+                    "{}: modification is null - this is very unexpected, clientActor = {}, identifier = {}",
+                    persistenceId(), identifier, clientActor != null ? clientActor.path().toString() : null);
+        } else if(clientActor == null) {
+            // There's no clientActor to which to send a commit reply so we must be applying
+            // replicated state from the leader.
+            commitWithNewTransaction(MutableCompositeModification.fromSerializable(modification));
+        } else {
+            // This must be the OK to commit after replication consensus.
+            finishCommit(clientActor, identifier);
         }
     }
 
     @Override
     protected void onStateChanged() {
         boolean isLeader = isLeader();
-        for (ActorSelection dataChangeListener : dataChangeListeners) {
-            dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
-        }
-
-        if(isLeader) {
-            for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
-                if(!reg.isClosed()) {
-                    reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
-                }
-            }
-
-            delayedListenerRegistrations.clear();
-        }
-
-        shardMBean.setRaftState(getRaftState().name());
-        shardMBean.setCurrentTerm(getCurrentTerm());
+        changeSupport.onLeadershipChange(isLeader);
+        treeChangeSupport.onLeadershipChange(isLeader);
 
         // If this actor is no longer the leader close all the transaction chains
-        if(!isLeader){
-            for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
-                if(LOG.isDebugEnabled()) {
-                    LOG.debug(
-                        "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
-                        entry.getKey(), getId());
-                }
-                entry.getValue().close();
+        if (!isLeader) {
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
+                    persistenceId(), getId());
             }
 
-            transactionChains.clear();
+            domTransactionFactory.closeAllTransactionChains();
         }
     }
 
     @Override
-    protected DataPersistenceProvider persistence() {
-        return dataPersistenceProvider;
-    }
-
-    @Override protected void onLeaderChanged(final String oldLeader, final String newLeader) {
-        shardMBean.setLeader(newLeader);
-    }
-
-    @Override public String persistenceId() {
-        return this.name.toString();
+    public String persistenceId() {
+        return this.name;
     }
 
     @VisibleForTesting
-    DataPersistenceProvider getDataPersistenceProvider() {
-        return dataPersistenceProvider;
+    ShardCommitCoordinator getCommitCoordinator() {
+        return commitCoordinator;
     }
 
+
     private static class ShardCreator implements Creator<Shard> {
 
         private static final long serialVersionUID = 1L;
 
         final ShardIdentifier name;
-        final Map<ShardIdentifier, String> peerAddresses;
+        final Map<String, String> peerAddresses;
         final DatastoreContext datastoreContext;
         final SchemaContext schemaContext;
 
-        ShardCreator(final ShardIdentifier name, final Map<ShardIdentifier, String> peerAddresses,
+        ShardCreator(final ShardIdentifier name, final Map<String, String> peerAddresses,
                 final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
             this.name = name;
             this.peerAddresses = peerAddresses;
@@ -894,7 +736,7 @@ public class Shard extends RaftActor {
     }
 
     @VisibleForTesting
-    InMemoryDOMDataStore getDataStore() {
+    public InMemoryDOMDataStore getDataStore() {
         return store;
     }
 
@@ -902,45 +744,4 @@ public class Shard extends RaftActor {
     ShardStats getShardMBean() {
         return shardMBean;
     }
-
-    private static class DelayedListenerRegistration implements
-        ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
-
-        private volatile boolean closed;
-
-        private final RegisterChangeListener registerChangeListener;
-
-        private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                                             NormalizedNode<?, ?>>> delegate;
-
-        DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
-            this.registerChangeListener = registerChangeListener;
-        }
-
-        void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                            NormalizedNode<?, ?>>> registration) {
-            this.delegate = registration;
-        }
-
-        boolean isClosed() {
-            return closed;
-        }
-
-        RegisterChangeListener getRegisterChangeListener() {
-            return registerChangeListener;
-        }
-
-        @Override
-        public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
-            return delegate != null ? delegate.getInstance() : null;
-        }
-
-        @Override
-        public void close() {
-            closed = true;
-            if(delegate != null) {
-                delegate.close();
-            }
-        }
-    }
 }