Merge branch 'master' of ../controller
authorAnil Belur <abelur@linuxfoundation.org>
Thu, 31 Oct 2019 14:28:57 +0000 (19:58 +0530)
committerAnil Belur <abelur@linuxfoundation.org>
Thu, 31 Oct 2019 14:28:57 +0000 (19:58 +0530)
Change-Id: I3611c067a26521829ae9212e4be1ed44acbd7dd2

93 files changed:
java/org/opendaylight/controller/cluster/ActorSystemProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/ActorSystemProviderListener.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/DataPersistenceProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/PersistentDataProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/AkkaConfigurationReader.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/Dispatchers.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfActor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfMessage.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/ExplicitAsk.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/FileAkkaConfigurationReader.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/MessageTracker.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/Monitor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/UnboundedDequeBasedControlAwareMailbox.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeVisitor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java [new file with mode: 0755]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingNormalizedNodeDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNode.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeInputStreamReader.java [new file with mode: 0755]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeOutputStreamWriter.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumPathArgument.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumTokens.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumValue.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumNode.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumPathArgument.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumValue.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeInputStreamReader.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeOutputStreamWriter.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2Tokens.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamVersion.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataOutput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/stream/VersionedNormalizedNodeDataInput.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamFactory.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/io/SharedFileBackedOutputStream.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/AbortSlicing.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/AssembledMessageState.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/AssemblerClosedException.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/AssemblerSealedException.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageSlice.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageSliceException.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageSliceReply.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/SliceOptions.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/messaging/SlicedMessageState.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListener.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListenerReply.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/RoleChangeNotification.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/notifications/RoleChanged.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/reporting/MetricsReporter.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java [new file with mode: 0644]
java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java [new file with mode: 0644]

diff --git a/java/org/opendaylight/controller/cluster/ActorSystemProvider.java b/java/org/opendaylight/controller/cluster/ActorSystemProvider.java
new file mode 100644 (file)
index 0000000..32e0210
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.actor.ActorSystem;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Interface that provides an akka ActorSystem instance.
+ *
+ * @author Thomas Pantelis
+ */
+public interface ActorSystemProvider {
+
+    /**
+     * Returns the ActorSystem.
+     *
+     * @return the ActorSystem.
+     */
+    @NonNull ActorSystem getActorSystem();
+
+    /**
+     * Register a listener for ActorSystem lifecycle events.
+     *
+     * @param listener the ActorSystemProviderListener to register
+     * @return a ListenerRegistration instance to be used to unregister
+     */
+    ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
+            @NonNull ActorSystemProviderListener listener);
+}
diff --git a/java/org/opendaylight/controller/cluster/ActorSystemProviderListener.java b/java/org/opendaylight/controller/cluster/ActorSystemProviderListener.java
new file mode 100644 (file)
index 0000000..ebb66f4
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.actor.ActorSystem;
+import java.util.EventListener;
+
+/**
+ * Listener interface for notification of ActorSystem changes from an ActorSystemProvider.
+ *
+ * @author Thomas Pantelis
+ */
+public interface ActorSystemProviderListener extends EventListener {
+    /**
+     * Method called when the current actor system is about to be shutdown.
+     */
+    void onPreShutdownActorSystem();
+
+    /**
+     * Method called when the current actor system is shutdown and a new actor system is created. This method
+     * is always preceded by a call to {@link #onPreShutdownActorSystem}.
+     *
+     * @param actorSytem the new ActorSystem
+     */
+    void onNewActorSystem(ActorSystem actorSytem);
+}
diff --git a/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java b/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java
new file mode 100644 (file)
index 0000000..c655dcd
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
+ * API.
+ */
+public interface DataPersistenceProvider {
+
+    /**
+     * Returns whether or not persistence recovery is applicable/enabled.
+     *
+     * @return true if recovery is applicable, otherwise false, in which case the provider is not persistent and may
+     *         not have anything to be recovered
+     */
+    boolean isRecoveryApplicable();
+
+    /**
+     * Persists an entry to the applicable journal synchronously.
+     *
+     * @param entry the journal entry to persist
+     * @param procedure the callback when persistence is complete
+     * @param <T> the type of the journal entry
+     */
+    <T> void persist(T entry, Procedure<T> procedure);
+
+    /**
+     * Persists an entry to the applicable journal asynchronously.
+     *
+     * @param entry the journal entry to persist
+     * @param procedure the callback when persistence is complete
+     * @param <T> the type of the journal entry
+     */
+    <T> void persistAsync(T entry, Procedure<T> procedure);
+
+    /**
+     * Saves a snapshot.
+     *
+     * @param snapshot the snapshot object to save
+     */
+    void saveSnapshot(Object snapshot);
+
+    /**
+     * Deletes snapshots based on the given criteria.
+     *
+     * @param criteria the search criteria
+     */
+    void deleteSnapshots(SnapshotSelectionCriteria criteria);
+
+    /**
+     * Deletes journal entries up to the given sequence number.
+     *
+     * @param sequenceNumber the sequence number
+     */
+    void deleteMessages(long sequenceNumber);
+
+    /**
+     * Returns the last sequence number contained in the journal.
+     *
+     * @return the last sequence number
+     */
+    long getLastSequenceNumber();
+}
diff --git a/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java b/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java
new file mode 100644 (file)
index 0000000..f1a20fc
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * A DataPersistenceProvider implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
+    private DataPersistenceProvider delegate;
+
+    public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+        this.delegate = delegate;
+    }
+
+    public void setDelegate(DataPersistenceProvider delegate) {
+        this.delegate = delegate;
+    }
+
+    public DataPersistenceProvider getDelegate() {
+        return delegate;
+    }
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return delegate.isRecoveryApplicable();
+    }
+
+    @Override
+    public <T> void persist(T entry, Procedure<T> procedure) {
+        delegate.persist(entry, procedure);
+    }
+
+    @Override
+    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+        delegate.persistAsync(entry, procedure);
+    }
+
+    @Override
+    public void saveSnapshot(Object entry) {
+        delegate.saveSnapshot(entry);
+    }
+
+    @Override
+    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+        delegate.deleteSnapshots(criteria);
+    }
+
+    @Override
+    public void deleteMessages(long sequenceNumber) {
+        delegate.deleteMessages(sequenceNumber);
+    }
+
+    @Override
+    public long getLastSequenceNumber() {
+        return delegate.getLastSequenceNumber();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java b/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java
new file mode 100644 (file)
index 0000000..9a4a34c
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.opendaylight.controller.cluster.common.actor.ExecuteInSelfActor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A DataPersistenceProvider implementation with persistence disabled, essentially a no-op.
+ */
+public class NonPersistentDataProvider implements DataPersistenceProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(NonPersistentDataProvider.class);
+
+    private final ExecuteInSelfActor actor;
+
+    public NonPersistentDataProvider(final ExecuteInSelfActor actor) {
+        this.actor = requireNonNull(actor);
+    }
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return false;
+    }
+
+    @Override
+    public <T> void persist(final T entry, final Procedure<T> procedure) {
+        invokeProcedure(procedure, entry);
+    }
+
+    @Override
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
+        actor.executeInSelf(() -> invokeProcedure(procedure, entry));
+    }
+
+    @Override
+    public void saveSnapshot(final Object snapshot) {
+        // no-op
+    }
+
+    @Override
+    public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
+        // no-op
+    }
+
+    @Override
+    public void deleteMessages(final long sequenceNumber) {
+        // no-op
+    }
+
+    @Override
+    public long getLastSequenceNumber() {
+        return -1;
+    }
+
+    @SuppressWarnings("checkstyle:IllegalCatch")
+    static <T> void invokeProcedure(final Procedure<T> procedure, final T argument) {
+        try {
+            procedure.apply(argument);
+        } catch (Exception e) {
+            LOG.error("An unexpected error occurred", e);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/PersistentDataProvider.java b/java/org/opendaylight/controller/cluster/PersistentDataProvider.java
new file mode 100644 (file)
index 0000000..21102f1
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.japi.Procedure;
+import akka.persistence.AbstractPersistentActor;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * A DataPersistenceProvider implementation with persistence enabled.
+ */
+public class PersistentDataProvider implements DataPersistenceProvider {
+
+    private final AbstractPersistentActor persistentActor;
+
+    public PersistentDataProvider(AbstractPersistentActor persistentActor) {
+        this.persistentActor = requireNonNull(persistentActor, "persistentActor can't be null");
+    }
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return true;
+    }
+
+    @Override
+    public <T> void persist(T entry, Procedure<T> procedure) {
+        persistentActor.persist(entry, procedure);
+    }
+
+    @Override
+    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+        persistentActor.persistAsync(entry, procedure);
+    }
+
+    @Override
+    public void saveSnapshot(Object snapshot) {
+        persistentActor.saveSnapshot(snapshot);
+    }
+
+    @Override
+    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+        persistentActor.deleteSnapshots(criteria);
+    }
+
+    @Override
+    public void deleteMessages(long sequenceNumber) {
+        persistentActor.deleteMessages(sequenceNumber);
+    }
+
+    @Override
+    public long getLastSequenceNumber() {
+        return persistentActor.lastSequenceNr();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java b/java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java
new file mode 100644 (file)
index 0000000..976d48d
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.util.HashMap;
+import java.util.Map;
+
+public abstract class AbstractConfig implements UnifiedConfig {
+
+    private final Config config;
+
+    public AbstractConfig(Config config) {
+        this.config = config;
+    }
+
+    @Override
+    public Config get() {
+        return config;
+    }
+
+    public abstract static class Builder<T extends Builder<T>> {
+        protected Map<String, Object> configHolder;
+        protected Config fallback;
+
+        private final String actorSystemName;
+
+        public Builder(String actorSystemName) {
+            Preconditions.checkArgument(actorSystemName != null, "Actor system name must not be null");
+            this.actorSystemName = actorSystemName;
+            configHolder = new HashMap<>();
+        }
+
+        @SuppressWarnings("unchecked")
+        public T withConfigReader(AkkaConfigurationReader reader) {
+            fallback = reader.read().getConfig(actorSystemName);
+            return (T)this;
+        }
+
+        protected Config merge() {
+            Config config = ConfigFactory.parseMap(configHolder);
+            if (fallback != null) {
+                config = config.withFallback(fallback);
+            }
+
+            return config;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java b/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
new file mode 100644 (file)
index 0000000..6af52fb
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.AbstractActor;
+import akka.actor.ActorRef;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.eclipse.jdt.annotation.NonNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractUntypedActor extends AbstractActor implements ExecuteInSelfActor {
+    // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
+    @SuppressWarnings("checkstyle:MemberName")
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
+
+    protected AbstractUntypedActor() {
+        LOG.debug("Actor created {}", getSelf());
+        getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
+    }
+
+    @Override
+    public final void executeInSelf(@NonNull final Runnable runnable) {
+        final ExecuteInSelfMessage message = new ExecuteInSelfMessage(runnable);
+        self().tell(message, ActorRef.noSender());
+    }
+
+    @Override
+    public Receive createReceive() {
+        return receiveBuilder()
+                .match(ExecuteInSelfMessage.class, ExecuteInSelfMessage::run)
+                .matchAny(this::handleReceive)
+                .build();
+    }
+
+    /**
+     * Receive and handle an incoming message. If the implementation does not handle this particular message,
+     * it should call {@link #ignoreMessage(Object)} or {@link #unknownMessage(Object)}.
+     *
+     * @param message the incoming message
+     */
+    protected abstract void handleReceive(Object message);
+
+    protected final void ignoreMessage(final Object message) {
+        LOG.debug("Ignoring unhandled message {}", message);
+    }
+
+    protected final void unknownMessage(final Object message) {
+        LOG.debug("Received unhandled message {}", message);
+        unhandled(message);
+    }
+
+    protected boolean isValidSender(final ActorRef sender) {
+        // If the caller passes in a null sender (ActorRef.noSender()), akka translates that to the
+        // deadLetters actor.
+        return sender != null && !getContext().system().deadLetters().equals(sender);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java b/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java
new file mode 100644 (file)
index 0000000..2124b24
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+/**
+ * Actor with its behaviour metered. Metering is enabled by configuration.
+ */
+public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
+
+    //this is used in the metric name. Some transient actors do not have defined names
+    private String actorNameOverride;
+
+    public AbstractUntypedActorWithMetering() {
+        if (isMetricsCaptureEnabled()) {
+            getContext().become(new MeteringBehavior(this));
+        }
+    }
+
+    public AbstractUntypedActorWithMetering(String actorNameOverride) {
+        this.actorNameOverride = actorNameOverride;
+        if (isMetricsCaptureEnabled()) {
+            getContext().become(new MeteringBehavior(this));
+        }
+    }
+
+    private boolean isMetricsCaptureEnabled() {
+        CommonConfig config = new CommonConfig(getContext().system().settings().config());
+        return config.isMetricCaptureEnabled();
+    }
+
+    public String getActorNameOverride() {
+        return actorNameOverride;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java b/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
new file mode 100644 (file)
index 0000000..711a431
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.persistence.AbstractPersistentActor;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.eclipse.jdt.annotation.NonNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractUntypedPersistentActor extends AbstractPersistentActor implements ExecuteInSelfActor {
+
+    // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
+    @SuppressWarnings("checkstyle:MemberName")
+    protected final Logger LOG = LoggerFactory.getLogger(getClass());
+
+    protected AbstractUntypedPersistentActor() {
+        LOG.trace("Actor created {}", getSelf());
+        getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
+    }
+
+    @Override
+    public final void executeInSelf(@NonNull final Runnable runnable) {
+        final ExecuteInSelfMessage message = new ExecuteInSelfMessage(runnable);
+        LOG.trace("Scheduling execution of {}", message);
+        self().tell(message, ActorRef.noSender());
+    }
+
+    @Override
+    public final Receive createReceive() {
+        return receiveBuilder()
+                .match(ExecuteInSelfMessage.class, ExecuteInSelfMessage::run)
+                .matchAny(this::handleCommand)
+                .build();
+    }
+
+    @Override
+    public final Receive createReceiveRecover() {
+        return receiveBuilder().matchAny(this::handleRecover).build();
+    }
+
+    protected abstract void handleRecover(Object message) throws Exception;
+
+    protected abstract void handleCommand(Object message) throws Exception;
+
+    protected void ignoreMessage(final Object message) {
+        LOG.debug("Unhandled message {} ", message);
+    }
+
+    protected void unknownMessage(final Object message) {
+        LOG.debug("Received unhandled message {}", message);
+        unhandled(message);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java b/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java
new file mode 100644 (file)
index 0000000..ed03d33
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+/**
+ * Actor with its behaviour metered. Metering is enabled by configuration.
+ */
+public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor {
+
+    public AbstractUntypedPersistentActorWithMetering() {
+        if (isMetricsCaptureEnabled()) {
+            getContext().become(new MeteringBehavior(this));
+        }
+    }
+
+    private boolean isMetricsCaptureEnabled() {
+        CommonConfig config = new CommonConfig(getContext().system().settings().config());
+        return config.isMetricCaptureEnabled();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/AkkaConfigurationReader.java b/java/org/opendaylight/controller/cluster/common/actor/AkkaConfigurationReader.java
new file mode 100644 (file)
index 0000000..c2e2128
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.typesafe.config.Config;
+
+public interface AkkaConfigurationReader {
+    Config read();
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java b/java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java
new file mode 100644 (file)
index 0000000..6a350af
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.typesafe.config.Config;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import scala.concurrent.duration.FiniteDuration;
+
+public class CommonConfig extends AbstractConfig {
+
+    protected static final String TAG_ACTOR_SYSTEM_NAME = "actor-system-name";
+    protected static final String TAG_METRIC_CAPTURE_ENABLED = "metric-capture-enabled";
+    protected static final String TAG_MAILBOX_CAPACITY = "mailbox-capacity";
+    protected static final String TAG_MAILBOX = "bounded-mailbox";
+    protected static final String TAG_MAILBOX_PUSH_TIMEOUT = "mailbox-push-timeout-time";
+
+    //TODO: Ideally these defaults should go to reference.conf
+    // https://bugs.opendaylight.org/show_bug.cgi?id=1709
+    private static final int DEFAULT_MAILBOX_CAPACITY = 1000;
+    private static final int DEFAULT_MAILBOX_PUSH_TIMEOUT = 100;
+
+    //locally cached values
+    private FiniteDuration cachedMailBoxPushTimeout;
+    private Integer cachedMailBoxCapacity;
+    private Boolean cachedMetricCaptureEnableFlag;
+
+    public CommonConfig(Config config) {
+        super(config);
+    }
+
+    public String getActorSystemName() {
+        return get().getString(TAG_ACTOR_SYSTEM_NAME);
+    }
+
+    public boolean isMetricCaptureEnabled() {
+        if (cachedMetricCaptureEnableFlag != null) {
+            return cachedMetricCaptureEnableFlag;
+        }
+
+        cachedMetricCaptureEnableFlag = get().hasPath(TAG_METRIC_CAPTURE_ENABLED)
+                ? get().getBoolean(TAG_METRIC_CAPTURE_ENABLED)
+                : false;
+
+        return cachedMetricCaptureEnableFlag;
+    }
+
+    public String getMailBoxName() {
+        return TAG_MAILBOX;
+    }
+
+    public Integer getMailBoxCapacity() {
+
+        if (cachedMailBoxCapacity != null) {
+            return cachedMailBoxCapacity;
+        }
+
+        final String PATH = TAG_MAILBOX + "." + TAG_MAILBOX_CAPACITY;
+        cachedMailBoxCapacity = get().hasPath(PATH)
+                ? get().getInt(PATH)
+                : DEFAULT_MAILBOX_CAPACITY;
+
+        return cachedMailBoxCapacity;
+    }
+
+    public FiniteDuration getMailBoxPushTimeout() {
+
+        if (cachedMailBoxPushTimeout != null) {
+            return cachedMailBoxPushTimeout;
+        }
+
+        final String PATH = TAG_MAILBOX + "." + TAG_MAILBOX_PUSH_TIMEOUT;
+
+        long timeout = get().hasPath(PATH)
+                ? get().getDuration(PATH, TimeUnit.NANOSECONDS)
+                : DEFAULT_MAILBOX_PUSH_TIMEOUT;
+
+        cachedMailBoxPushTimeout = FiniteDuration.create(timeout, TimeUnit.NANOSECONDS);
+        return cachedMailBoxPushTimeout;
+    }
+
+    public static class Builder<T extends Builder<T>> extends AbstractConfig.Builder<T> {
+
+        public Builder(String actorSystemName) {
+            super(actorSystemName);
+
+            //actor system config
+            configHolder.put(TAG_ACTOR_SYSTEM_NAME, actorSystemName);
+
+            //config for bounded mailbox
+            configHolder.put(TAG_MAILBOX, new HashMap<String, Object>());
+        }
+
+        @SuppressWarnings("unchecked")
+        public T metricCaptureEnabled(boolean enabled) {
+            configHolder.put(TAG_METRIC_CAPTURE_ENABLED, String.valueOf(enabled));
+            return (T)this;
+        }
+
+        @SuppressWarnings("unchecked")
+        public T mailboxCapacity(int capacity) {
+            checkArgument(capacity > 0, "mailbox capacity must be >0");
+
+            Map<String, Object> boundedMailbox = (Map<String, Object>) configHolder.get(TAG_MAILBOX);
+            boundedMailbox.put(TAG_MAILBOX_CAPACITY, capacity);
+            return (T)this;
+        }
+
+        @SuppressWarnings("unchecked")
+        public T mailboxPushTimeout(String timeout) {
+            checkArgument(FiniteDuration.create(timeout).isFinite(), "invalid value \"%s\" for mailbox push timeout",
+                timeout);
+
+            Map<String, Object> boundedMailbox = (Map<String, Object>) configHolder.get(TAG_MAILBOX);
+            boundedMailbox.put(TAG_MAILBOX_PUSH_TIMEOUT, timeout);
+            return (T)this;
+        }
+
+        public CommonConfig build() {
+            return new CommonConfig(merge());
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/Dispatchers.java b/java/org/opendaylight/controller/cluster/common/actor/Dispatchers.java
new file mode 100644 (file)
index 0000000..62043e2
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import static java.util.Objects.requireNonNull;
+
+import scala.concurrent.ExecutionContext;
+
+public class Dispatchers {
+    public static final String DEFAULT_DISPATCHER_PATH = "akka.actor.default-dispatcher";
+    public static final String CLIENT_DISPATCHER_PATH = "client-dispatcher";
+    public static final String TXN_DISPATCHER_PATH = "txn-dispatcher";
+    public static final String SHARD_DISPATCHER_PATH = "shard-dispatcher";
+    public static final String NOTIFICATION_DISPATCHER_PATH = "notification-dispatcher";
+    public static final String SERIALIZATION_DISPATCHER_PATH = "serialization-dispatcher";
+
+    private final akka.dispatch.Dispatchers dispatchers;
+
+    public enum DispatcherType {
+        Client(CLIENT_DISPATCHER_PATH),
+        Transaction(TXN_DISPATCHER_PATH),
+        Shard(SHARD_DISPATCHER_PATH),
+        Notification(NOTIFICATION_DISPATCHER_PATH),
+        Serialization(SERIALIZATION_DISPATCHER_PATH);
+
+        private final String path;
+
+        DispatcherType(final String path) {
+            this.path = path;
+        }
+
+        String path(final akka.dispatch.Dispatchers knownDispatchers) {
+            if (knownDispatchers.hasDispatcher(path)) {
+                return path;
+            }
+            return DEFAULT_DISPATCHER_PATH;
+        }
+
+        ExecutionContext dispatcher(final akka.dispatch.Dispatchers knownDispatchers) {
+            if (knownDispatchers.hasDispatcher(path)) {
+                return knownDispatchers.lookup(path);
+            }
+            return knownDispatchers.defaultGlobalDispatcher();
+        }
+    }
+
+    public Dispatchers(final akka.dispatch.Dispatchers dispatchers) {
+        this.dispatchers = requireNonNull(dispatchers, "dispatchers should not be null");
+    }
+
+    public ExecutionContext getDispatcher(final DispatcherType dispatcherType) {
+        return dispatcherType.dispatcher(this.dispatchers);
+    }
+
+    public String getDispatcherPath(final DispatcherType dispatcherType) {
+        return dispatcherType.path(this.dispatchers);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfActor.java b/java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfActor.java
new file mode 100644 (file)
index 0000000..fe0bbed
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.japi.Procedure;
+import com.google.common.annotations.Beta;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Interface implemented by Actors, who can schedule invocation of a {@link Procedure} in their context.
+ *
+ * @author Robert Varga
+ */
+@Beta
+public interface ExecuteInSelfActor {
+    /**
+     * Run a Runnable in the context of this actor.
+     *
+     * @param runnable Runnable to run
+     */
+    void executeInSelf(@NonNull Runnable runnable);
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfMessage.java b/java/org/opendaylight/controller/cluster/common/actor/ExecuteInSelfMessage.java
new file mode 100644 (file)
index 0000000..1b14e48
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.dispatch.ControlMessage;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Message internal to {@link ExecuteInSelfActor} implementations in this package.
+ *
+ * @author Robert Varga
+ */
+final class ExecuteInSelfMessage implements ControlMessage {
+    private final Runnable runnable;
+
+    ExecuteInSelfMessage(final @NonNull Runnable runnable) {
+        this.runnable = requireNonNull(runnable);
+    }
+
+    void run() {
+        runnable.run();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/ExplicitAsk.java b/java/org/opendaylight/controller/cluster/common/actor/ExplicitAsk.java
new file mode 100644 (file)
index 0000000..e241d88
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.pattern.ExplicitAskSupport;
+import akka.util.Timeout;
+import com.google.common.annotations.Beta;
+import java.util.function.Function;
+import scala.Function1;
+import scala.concurrent.Future;
+import scala.runtime.AbstractFunction1;
+
+/**
+ * Unfortunately Akka's explicit ask pattern does not work with its Java API, as it fails to invoke passed message.
+ * In order to make this work for now, we tap directly into ExplicitAskSupport and use a Scala function instead
+ * of akka.japi.Function.
+ *
+ * @author Robert Varga
+ */
+@Beta
+public final class ExplicitAsk {
+    private static final ExplicitAskSupport ASK_SUPPORT = akka.pattern.extended.package$.MODULE$;
+
+    private ExplicitAsk() {
+        throw new UnsupportedOperationException();
+    }
+
+    public static <T> Function1<ActorRef, T> toScala(final Function<ActorRef, T> function) {
+        return new AbstractFunction1<ActorRef, T>() {
+            @Override
+            public T apply(final ActorRef askSender) {
+                return function.apply(askSender);
+            }
+        };
+    }
+
+    @SuppressWarnings("unchecked")
+    public static Future<Object> ask(final ActorRef actor, final Function1<ActorRef, ?> function,
+            final Timeout timeout) {
+        return ASK_SUPPORT.ask(actor, (Function1<ActorRef, Object>)function, timeout);
+    }
+
+    @SuppressWarnings("unchecked")
+    public static Future<Object> ask(final ActorSelection actor, final Function1<ActorRef, ?> function,
+            final Timeout timeout) {
+        return ASK_SUPPORT.ask(actor, (Function1<ActorRef, Object>)function, timeout);
+    }
+
+    public static Future<Object> ask(final ActorRef actor, final Function<ActorRef, ?> function,
+            final Timeout timeout) {
+        return ask(actor, toScala(function), timeout);
+    }
+
+    public static Future<Object> ask(final ActorSelection actor, final Function<ActorRef, ?> function,
+            final Timeout timeout) {
+        return ask(actor, toScala(function), timeout);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/FileAkkaConfigurationReader.java b/java/org/opendaylight/controller/cluster/common/actor/FileAkkaConfigurationReader.java
new file mode 100644 (file)
index 0000000..883dbd7
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.io.File;
+
+public class FileAkkaConfigurationReader implements AkkaConfigurationReader {
+    private static final String CUSTOM_AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+    private static final String FACTORY_AKKA_CONF_PATH = "./configuration/factory/akka.conf";
+
+    @Override
+    public Config read() {
+        File customConfigFile = new File(CUSTOM_AKKA_CONF_PATH);
+        Preconditions.checkState(customConfigFile.exists(), "%s is missing", customConfigFile);
+
+        File factoryConfigFile = new File(FACTORY_AKKA_CONF_PATH);
+        if (factoryConfigFile.exists()) {
+            return ConfigFactory.parseFile(customConfigFile).withFallback(ConfigFactory.parseFile(factoryConfigFile));
+        }
+
+        return ConfigFactory.parseFile(customConfigFile);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/MessageTracker.java b/java/org/opendaylight/controller/cluster/common/actor/MessageTracker.java
new file mode 100644 (file)
index 0000000..1ba5ac6
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.NANOSECONDS;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Stopwatch;
+import com.google.common.base.Ticker;
+import com.google.common.collect.ImmutableList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * MessageTracker is a diagnostic utility class to be used for figuring out why a certain message which was
+ * expected to arrive in a given time interval does not arrive. It attempts to keep track of all the messages that
+ * received between the arrival of two instances of the same message and the amount of time it took to process each
+ * of those messages.
+ * <br>
+ * Usage of the API is as follows,
+ * <pre>
+ *
+ *      // Track the Foo class, Here we expect to see a message of type Foo come in every 10 millis
+ *     MessageTracker tracker = new MessageTracker(Foo.class, 10);
+ *
+ *     // Begin the tracking process. If this is not called then calling received and done on the resultant Context
+ *     // will do nothing
+ *     tracker.begin();
+ *
+ *     .....
+ *
+ *     try (MessageTracker.Context context = tracker.received(message)) {
+ *
+ *         if (context.error().isPresent()){
+ *             LOG.error("{}", context.error().get());
+ *         }
+ *
+ *         // Some custom processing
+ *         process(message);
+ *     }
+ *
+ * </pre>
+ *
+ * <p>
+ * This class is NOT thread-safe.
+ */
+@Beta
+public final class MessageTracker {
+    public abstract static class Context implements AutoCloseable {
+        Context() {
+            // Hidden to prevent outside instantiation
+        }
+
+        public abstract Optional<Error> error();
+
+        @Override
+        public abstract void close();
+    }
+
+    public interface Error {
+        Object getLastExpectedMessage();
+
+        Object getCurrentExpectedMessage();
+
+        List<MessageProcessingTime> getMessageProcessingTimesSinceLastExpectedMessage();
+    }
+
+
+    public static final class MessageProcessingTime {
+        private final Class<?> messageClass;
+        private final long elapsedTimeInNanos;
+
+        MessageProcessingTime(final Class<?> messageClass, final long elapsedTimeInNanos) {
+            this.messageClass = requireNonNull(messageClass);
+            this.elapsedTimeInNanos = elapsedTimeInNanos;
+        }
+
+        @Override
+        public String toString() {
+            return "MessageProcessingTime [messageClass=" + messageClass + ", elapsedTimeInMillis="
+                   + NANOSECONDS.toMillis(elapsedTimeInNanos) + "]";
+        }
+
+
+        public Class<?> getMessageClass() {
+            return messageClass;
+        }
+
+        public long getElapsedTimeInNanos() {
+            return elapsedTimeInNanos;
+        }
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(MessageTracker.class);
+    private static final Context NO_OP_CONTEXT = new Context() {
+        @Override
+        public void close() {
+            // No-op
+        }
+
+        @Override
+        public Optional<Error> error() {
+            return Optional.empty();
+        }
+    };
+
+    private final List<MessageProcessingTime> messagesSinceLastExpectedMessage = new LinkedList<>();
+
+    private final CurrentMessageContext currentMessageContext;
+
+    private final Stopwatch expectedMessageWatch;
+
+    private final Class<?> expectedMessageClass;
+
+    private final long expectedArrivalInterval;
+
+    private final Ticker ticker;
+
+    private Object lastExpectedMessage;
+
+    @VisibleForTesting
+    MessageTracker(final Class<?> expectedMessageClass, final long expectedArrivalIntervalInMillis,
+            final Ticker ticker) {
+        checkArgument(expectedArrivalIntervalInMillis >= 0);
+        this.expectedMessageClass = requireNonNull(expectedMessageClass);
+        this.expectedArrivalInterval = MILLISECONDS.toNanos(expectedArrivalIntervalInMillis);
+        this.ticker = requireNonNull(ticker);
+        this.expectedMessageWatch = Stopwatch.createUnstarted(ticker);
+        this.currentMessageContext = new CurrentMessageContext();
+    }
+
+    /**
+     * Constructs an instance.
+     *
+     * @param expectedMessageClass the class of the message to track
+     * @param expectedArrivalIntervalInMillis the expected arrival interval between two instances of the expected
+     *                                        message
+     */
+    public MessageTracker(final Class<?> expectedMessageClass, final long expectedArrivalIntervalInMillis) {
+        this(expectedMessageClass, expectedArrivalIntervalInMillis, Ticker.systemTicker());
+    }
+
+    public void begin() {
+        if (!expectedMessageWatch.isRunning()) {
+            LOG.trace("Started tracking class {} timeout {}ns", expectedMessageClass, expectedArrivalInterval);
+            expectedMessageWatch.start();
+        }
+    }
+
+    public Context received(final Object message) {
+        if (!expectedMessageWatch.isRunning()) {
+            return NO_OP_CONTEXT;
+        }
+
+        if (expectedMessageClass.isInstance(message)) {
+            final long actualElapsedTime = expectedMessageWatch.elapsed(NANOSECONDS);
+            if (actualElapsedTime > expectedArrivalInterval) {
+                return new ErrorContext(message, new FailedExpectation(lastExpectedMessage, message,
+                        messagesSinceLastExpectedMessage, expectedArrivalInterval, actualElapsedTime));
+            }
+            lastExpectedMessage = message;
+            messagesSinceLastExpectedMessage.clear();
+            expectedMessageWatch.reset().start();
+        }
+
+        currentMessageContext.reset(message);
+        return currentMessageContext;
+    }
+
+    void processed(final Object message, final long messageElapseTimeInNanos) {
+        if (expectedMessageWatch.isRunning() && !expectedMessageClass.isInstance(message)) {
+            messagesSinceLastExpectedMessage.add(new MessageProcessingTime(message.getClass(),
+                messageElapseTimeInNanos));
+        }
+    }
+
+    public List<MessageProcessingTime> getMessagesSinceLastExpectedMessage() {
+        return ImmutableList.copyOf(messagesSinceLastExpectedMessage);
+    }
+
+    private static final class FailedExpectation implements Error {
+        private final Object lastExpectedMessage;
+        private final Object currentExpectedMessage;
+        private final List<MessageProcessingTime> messagesSinceLastExpectedMessage;
+        private final long expectedTimeInMillis;
+        private final long actualTimeInMillis;
+
+        FailedExpectation(final Object lastExpectedMessage, final Object message,
+                final List<MessageProcessingTime> messagesSinceLastExpectedMessage, final long expectedTimeNanos,
+                final long actualTimeNanos) {
+            this.lastExpectedMessage = lastExpectedMessage;
+            this.currentExpectedMessage = message;
+            this.messagesSinceLastExpectedMessage = ImmutableList.copyOf(messagesSinceLastExpectedMessage);
+            this.expectedTimeInMillis = NANOSECONDS.toMillis(expectedTimeNanos);
+            this.actualTimeInMillis = NANOSECONDS.toMillis(actualTimeNanos);
+        }
+
+        @Override
+        public Object getLastExpectedMessage() {
+            return lastExpectedMessage;
+        }
+
+        @Override
+        public Object getCurrentExpectedMessage() {
+            return currentExpectedMessage;
+        }
+
+        @Override
+        public List<MessageProcessingTime>  getMessageProcessingTimesSinceLastExpectedMessage() {
+            return messagesSinceLastExpectedMessage;
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder builder = new StringBuilder()
+                    .append("\n> Last Expected Message = ").append(lastExpectedMessage)
+                    .append("\n> Current Expected Message = ").append(currentExpectedMessage)
+                    .append("\n> Expected time in between messages = ").append(expectedTimeInMillis)
+                    .append("\n> Actual time in between messages = ").append(actualTimeInMillis);
+            for (MessageProcessingTime time : messagesSinceLastExpectedMessage) {
+                builder.append("\n\t> ").append(time);
+            }
+            return builder.toString();
+        }
+    }
+
+    private abstract class AbstractTimedContext extends Context {
+        abstract Object message();
+
+        abstract Stopwatch stopTimer();
+
+        @Override
+        public final void close() {
+            processed(message(), stopTimer().elapsed(NANOSECONDS));
+        }
+    }
+
+    private final class CurrentMessageContext extends AbstractTimedContext {
+        private final Stopwatch stopwatch = Stopwatch.createUnstarted(ticker);
+        private Object message;
+
+        void reset(final Object newMessage) {
+            this.message = requireNonNull(newMessage);
+            checkState(!stopwatch.isRunning(), "Trying to reset a context that is not done (%s). currentMessage = %s",
+                this, newMessage);
+            stopwatch.start();
+        }
+
+        @Override
+        Object message() {
+            return message;
+        }
+
+        @Override
+        Stopwatch stopTimer() {
+            return stopwatch.stop();
+        }
+
+        @Override
+        public Optional<Error> error() {
+            return Optional.empty();
+        }
+    }
+
+    private final class ErrorContext extends AbstractTimedContext {
+        private final Stopwatch stopwatch = Stopwatch.createStarted(ticker);
+        private final Object message;
+        private final Error error;
+
+        ErrorContext(final Object message, final Error error) {
+            this.message = requireNonNull(message);
+            this.error = requireNonNull(error);
+        }
+
+        @Override
+        Object message() {
+            return message;
+        }
+
+        @Override
+        Stopwatch stopTimer() {
+            return stopwatch.stop();
+        }
+
+        @Override
+        public Optional<Error> error() {
+            return Optional.of(error);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java b/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java
new file mode 100644 (file)
index 0000000..d234532
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.dispatch.BoundedDequeBasedMailbox;
+import akka.dispatch.MailboxType;
+import akka.dispatch.ProducesMessageQueue;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.typesafe.config.Config;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
+
+public class MeteredBoundedMailbox implements MailboxType,
+        ProducesMessageQueue<MeteredBoundedMailbox.MeteredMessageQueue> {
+    private static final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
+    private static final String QUEUE_SIZE = "q-size";
+
+    private final Integer capacity;
+    private final FiniteDuration pushTimeOut;
+
+    public MeteredBoundedMailbox(final ActorSystem.Settings settings, final Config config) {
+
+        CommonConfig commonConfig = new CommonConfig(settings.config());
+        this.capacity = commonConfig.getMailBoxCapacity();
+        this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
+    }
+
+
+    @Override
+    public MeteredMessageQueue create(final scala.Option<ActorRef> owner, final scala.Option<ActorSystem> system) {
+        final MeteredMessageQueue queue = new MeteredMessageQueue(this.capacity, this.pushTimeOut);
+        monitorQueueSize(owner, queue);
+        return queue;
+    }
+
+    private static void monitorQueueSize(final scala.Option<ActorRef> owner, final MeteredMessageQueue monitoredQueue) {
+        registerMetric(owner, QUEUE_SIZE, getQueueSizeGuage(monitoredQueue));
+    }
+
+    private static Gauge<Integer> getQueueSizeGuage(final MeteredMessageQueue monitoredQueue) {
+        return monitoredQueue::size;
+    }
+
+    static <T extends Metric> void registerMetric(final scala.Option<ActorRef> owner, final String metricName,
+            final T metric) {
+        if (owner.isEmpty()) {
+           // there's no actor to monitor
+            return;
+        }
+
+        String actorName = owner.get().path().toStringWithoutAddress();
+        String fullName = MetricRegistry.name(actorName, metricName);
+
+        MetricRegistry registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
+
+        if (registry.getMetrics().containsKey(fullName)) {
+            // already registered
+            return;
+        }
+
+        try {
+            registry.register(fullName, metric);
+        } catch (IllegalArgumentException e) {
+            // already registered - shouldn't happen here since we check above...
+            LOG.debug("Unable to register '{}' in metrics registry", fullName);
+        }
+    }
+
+    public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
+        private static final long serialVersionUID = 1L;
+
+        public MeteredMessageQueue(final int capacity, final FiniteDuration pushTimeOut) {
+            super(capacity, pushTimeOut);
+        }
+    }
+}
+
diff --git a/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java b/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java
new file mode 100644 (file)
index 0000000..a49bc30
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.AbstractActor;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import scala.PartialFunction;
+import scala.runtime.AbstractPartialFunction;
+import scala.runtime.BoxedUnit;
+
+/**
+ * Represents behaviour that can be exhibited by actors of type {@link akka.actor.UntypedActor}
+ *
+ * <p>
+ * This behaviour meters actor's default behaviour. It captures 2 metrics:
+ * <ul>
+ *     <li>message processing rate of actor's receive block</li>
+ *     <li>message processing rate by message type</li>
+ * </ul>
+ * The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
+ */
+public class MeteringBehavior extends AbstractPartialFunction<Object, BoxedUnit> {
+    public static final String DOMAIN = "org.opendaylight.controller.actor.metric";
+
+    private static final String MSG_PROCESSING_RATE = "msg-rate";
+
+    private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DOMAIN).getMetricsRegistry();
+    private final String actorQualifiedName;
+    private final Timer msgProcessingTimer;
+    private final PartialFunction<Object, BoxedUnit> receive;
+
+    private MeteringBehavior(final String actorName, final AbstractActor meteredActor) {
+        actorQualifiedName = meteredActor.getSelf().path().parent().toStringWithoutAddress() + "/" + actorName;
+        msgProcessingTimer = metricRegistry.timer(MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE));
+        receive = meteredActor.createReceive().onMessage();
+    }
+
+    /**
+     * Constructs an instance.
+     *
+     * @param actor whose behaviour needs to be metered
+     */
+    public MeteringBehavior(final AbstractUntypedActorWithMetering actor) {
+        this(actor.getActorNameOverride() != null ? actor.getActorNameOverride() : actor.getSelf().path().name(),
+                actor);
+    }
+
+    public MeteringBehavior(final AbstractActor actor) {
+        this(actor.getSelf().path().name(), actor);
+    }
+
+    @Override
+    public boolean isDefinedAt(final Object obj) {
+        return receive.isDefinedAt(obj);
+    }
+
+    /**
+     * Uses 2 timers to measure message processing rate. One for overall message processing rate and
+     * another to measure rate by message type. The timers are re-used if they were previously created.
+     *
+     * <p>
+     * {@link com.codahale.metrics.MetricRegistry} maintains a reservoir for different timers where
+     * collected timings are kept. It exposes various metrics for each timer based on collected
+     * data. Eg: count of messages, 99, 95, 50... percentiles, max, mean etc.
+     *
+     * <p>
+     * These metrics are exposed as JMX bean.
+     *
+     * @see <a href="http://dropwizard.github.io/metrics/manual/core/#timers">
+     *     http://dropwizard.github.io/metrics/manual/core/#timers</a>
+     *
+     * @param message the message to process
+     */
+    @Override
+    public BoxedUnit apply(Object message) {
+        final String messageType = message.getClass().getSimpleName();
+        final String msgProcessingTimeByMsgType =
+                MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE, messageType);
+        final Timer msgProcessingTimerByMsgType = metricRegistry.timer(msgProcessingTimeByMsgType);
+
+        //start timers
+        final Timer.Context context = msgProcessingTimer.time();
+        final Timer.Context contextByMsgType = msgProcessingTimerByMsgType.time();
+
+        try {
+            return receive.apply(message);
+        } finally {
+            //stop timers
+            contextByMsgType.stop();
+            context.stop();
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/Monitor.java b/java/org/opendaylight/controller/cluster/common/actor/Monitor.java
new file mode 100644 (file)
index 0000000..004d289
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import java.io.Serializable;
+
+public class Monitor implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private final ActorRef actorRef;
+
+    public Monitor(ActorRef actorRef) {
+        this.actorRef = actorRef;
+    }
+
+    public ActorRef getActorRef() {
+        return actorRef;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java b/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java
new file mode 100644 (file)
index 0000000..52df6ab
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 Huawei Technologies Co., Ltd. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.Props;
+import akka.actor.UntypedAbstractActor;
+import akka.japi.Effect;
+import akka.remote.ThisActorSystemQuarantinedEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class listens to Akka RemotingLifecycleEvent events to detect when this node has been
+ * quarantined by another. Once this node gets quarantined, restart the ActorSystem to allow this
+ * node to rejoin the cluster.
+ *
+ * @author Gary Wu gary.wu1@huawei.com
+ *
+ */
+public class QuarantinedMonitorActor extends UntypedAbstractActor {
+
+    private static final Logger LOG = LoggerFactory.getLogger(QuarantinedMonitorActor.class);
+
+    public static final String ADDRESS = "quarantined-monitor";
+
+    private final Effect callback;
+    private boolean quarantined;
+
+    protected QuarantinedMonitorActor(final Effect callback) {
+        this.callback = callback;
+
+        LOG.debug("Created QuarantinedMonitorActor");
+
+        getContext().system().eventStream().subscribe(getSelf(), ThisActorSystemQuarantinedEvent.class);
+    }
+
+    @Override
+    public void postStop() {
+        LOG.debug("Stopping QuarantinedMonitorActor");
+    }
+
+    @Override
+    public void onReceive(final Object message) throws Exception {
+        final String messageType = message.getClass().getSimpleName();
+        LOG.trace("onReceive {} {}", messageType, message);
+
+        // check to see if we got quarantined by another node
+        if (quarantined) {
+            return;
+        }
+
+        if (message instanceof ThisActorSystemQuarantinedEvent) {
+            final ThisActorSystemQuarantinedEvent event = (ThisActorSystemQuarantinedEvent) message;
+            LOG.warn("Got quarantined by {}", event.remoteAddress());
+            quarantined = true;
+
+            // execute the callback
+            callback.apply();
+        }
+    }
+
+    public static Props props(final Effect callback) {
+        return Props.create(QuarantinedMonitorActor.class, callback);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/UnboundedDequeBasedControlAwareMailbox.java b/java/org/opendaylight/controller/cluster/common/actor/UnboundedDequeBasedControlAwareMailbox.java
new file mode 100644 (file)
index 0000000..3093030
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.dispatch.ControlMessage;
+import akka.dispatch.DequeBasedMessageQueueSemantics;
+import akka.dispatch.Envelope;
+import akka.dispatch.MailboxType;
+import akka.dispatch.ProducesMessageQueue;
+import akka.dispatch.UnboundedControlAwareMailbox;
+import com.codahale.metrics.Gauge;
+import com.typesafe.config.Config;
+import java.util.Deque;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.Option;
+
+/**
+ * An unbounded ControlAwareMailbox that also supports DequeBasedMessageQueueSemantics so it can be used with
+ * persistent actors which use stashing.
+ *
+ * @author Thomas Pantelis
+ */
+public class UnboundedDequeBasedControlAwareMailbox implements MailboxType,
+        ProducesMessageQueue<UnboundedDequeBasedControlAwareMailbox.MessageQueue> {
+    private static final Logger LOG = LoggerFactory.getLogger(UnboundedDequeBasedControlAwareMailbox.class);
+    private static final String NORMAL_QUEUE_SIZE = "normal-q-size";
+    private static final String CONTROL_QUEUE_SIZE = "control-q-size";
+    private static final String TOTAL_QUEUE_SIZE = "total-q-size";
+
+    public UnboundedDequeBasedControlAwareMailbox(ActorSystem.Settings settings, Config config) {
+    }
+
+    @Override
+    public MessageQueue create(Option<ActorRef> owner, Option<ActorSystem> system) {
+        LOG.debug("Creating MessageQueue for {}", owner);
+
+        final MessageQueue queue = new MessageQueue();
+
+        MeteredBoundedMailbox.registerMetric(owner, NORMAL_QUEUE_SIZE, (Gauge<Integer>) () -> queue.queue().size());
+        MeteredBoundedMailbox.registerMetric(owner, CONTROL_QUEUE_SIZE,
+            (Gauge<Integer>) () -> queue.controlQueue().size());
+        MeteredBoundedMailbox.registerMetric(owner, TOTAL_QUEUE_SIZE, (Gauge<Integer>) queue::numberOfMessages);
+
+        return queue;
+    }
+
+    static class MessageQueue extends UnboundedControlAwareMailbox.MessageQueue
+            implements DequeBasedMessageQueueSemantics {
+        private static final long serialVersionUID = 1L;
+
+        private final Deque<Envelope> controlQueue = new ConcurrentLinkedDeque<>();
+        private final Deque<Envelope> queue = new ConcurrentLinkedDeque<>();
+
+        @Override
+        public Queue<Envelope> controlQueue() {
+            return controlQueue;
+        }
+
+        @Override
+        public Queue<Envelope> queue() {
+            return queue;
+        }
+
+        @Override
+        public void enqueueFirst(ActorRef actor, Envelope envelope) {
+            final Object message = envelope.message();
+            LOG.trace("enqueueFirst: actor {}, message type: {}", actor, message.getClass());
+            if (message instanceof ControlMessage) {
+                LOG.trace("Adding {} to the ControlMessage queue", message.getClass());
+                controlQueue.addFirst(envelope);
+            } else {
+                LOG.trace("Adding {} to the normal queue", message.getClass());
+                queue.addFirst(envelope);
+            }
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java b/java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java
new file mode 100644 (file)
index 0000000..d14f3a7
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.typesafe.config.Config;
+
+/**
+ * Represents a unified view of configuration.
+ *
+ * <p>
+ * It merges configuration from:
+ * <ul>
+ *     <li>Config subsystem</li>
+ *     <li>Akka configuration files</li>
+ * </ul>
+ * Configurations defined in config subsystem takes precedence.
+ */
+public interface UnifiedConfig {
+
+    /**
+     * Returns an immutable instance of unified configuration.
+     *
+     * @return a Config instance
+     */
+    Config get();
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java
new file mode 100644 (file)
index 0000000..0bf1a2f
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils;
+
+import static java.util.Objects.requireNonNull;
+
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MixinNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
+
+/**
+ * NormalizedNodeNavigator walks a {@link NormalizedNodeVisitor} through the NormalizedNode.
+ */
+public class NormalizedNodeNavigator {
+
+    private final NormalizedNodeVisitor visitor;
+
+    public NormalizedNodeNavigator(final NormalizedNodeVisitor visitor) {
+        this.visitor = requireNonNull(visitor, "visitor should not be null");
+    }
+
+    public void navigate(String parentPath, final NormalizedNode<?, ?> normalizedNode) {
+        if (parentPath == null) {
+            parentPath = "";
+        }
+        navigateNormalizedNode(0, parentPath, normalizedNode);
+    }
+
+    private void navigateDataContainerNode(final int level, final String parentPath,
+            final DataContainerNode<?> dataContainerNode) {
+        visitor.visitNode(level, parentPath, dataContainerNode);
+
+        String newParentPath = parentPath + "/" + dataContainerNode.getIdentifier().toString();
+
+        for (NormalizedNode<?, ?> node : dataContainerNode.getValue()) {
+            if (node instanceof MixinNode && node instanceof NormalizedNodeContainer) {
+                navigateNormalizedNodeContainerMixin(level, newParentPath, (NormalizedNodeContainer<?, ?, ?>) node);
+            } else {
+                navigateNormalizedNode(level, newParentPath, node);
+            }
+        }
+
+    }
+
+    private void navigateNormalizedNodeContainerMixin(final int level, final String parentPath,
+            final NormalizedNodeContainer<?, ?, ?> node) {
+        visitor.visitNode(level, parentPath, node);
+
+        String newParentPath = parentPath + "/" + node.getIdentifier().toString();
+
+        for (NormalizedNode<?, ?> normalizedNode : node.getValue()) {
+            if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer) {
+                navigateNormalizedNodeContainerMixin(level + 1, newParentPath,
+                        (NormalizedNodeContainer<?, ?, ?>) normalizedNode);
+            } else {
+                navigateNormalizedNode(level, newParentPath, normalizedNode);
+            }
+        }
+
+    }
+
+    private void navigateNormalizedNode(final int level, final String parentPath,
+            final NormalizedNode<?, ?> normalizedNode) {
+        if (normalizedNode instanceof DataContainerNode) {
+
+            final DataContainerNode<?> dataContainerNode = (DataContainerNode<?>) normalizedNode;
+
+            navigateDataContainerNode(level + 1, parentPath, dataContainerNode);
+        } else {
+            visitor.visitNode(level + 1, parentPath, normalizedNode);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeVisitor.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeVisitor.java
new file mode 100644 (file)
index 0000000..c04f0ce
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils;
+
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+public interface NormalizedNodeVisitor {
+    void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode);
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java
new file mode 100644 (file)
index 0000000..a0d8a31
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import java.net.URI;
+import java.util.Objects;
+import java.util.concurrent.ExecutionException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.Revision;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+
+public final class QNameFactory {
+    private static final class StringQName implements Immutable {
+        private final @NonNull String localName;
+        private final @NonNull String namespace;
+        private final @Nullable String revision;
+
+        StringQName(final String localName, final String namespace, final String revision) {
+            this.localName = requireNonNull(localName);
+            this.namespace = requireNonNull(namespace);
+            this.revision = revision;
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(localName, namespace, revision);
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (this == obj) {
+                return true;
+            }
+            if (!(obj instanceof StringQName)) {
+                return false;
+            }
+            final StringQName other = (StringQName) obj;
+            return localName.equals(other.localName) && namespace.equals(other.namespace)
+                    && Objects.equals(revision, other.revision);
+        }
+
+        QName toQName() {
+            return revision != null ? QName.create(namespace, revision, localName) : QName.create(namespace, localName);
+        }
+    }
+
+    private static final class ModuleQName implements Immutable {
+        private final @NonNull QNameModule module;
+        private final @NonNull String localName;
+
+        ModuleQName(final QNameModule module, final String localName) {
+            this.module = requireNonNull(module);
+            this.localName = requireNonNull(localName);
+        }
+
+        @Override
+        public int hashCode() {
+            return 31 * module.hashCode() + localName.hashCode();
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (this == obj) {
+                return true;
+            }
+            if (!(obj instanceof ModuleQName)) {
+                return false;
+            }
+            final ModuleQName other = (ModuleQName) obj;
+            return localName.equals(other.localName) && module.equals(other.module);
+        }
+
+        QName toQName() {
+            return QName.create(module, localName);
+        }
+    }
+
+    private static final class StringModule implements Immutable {
+        private final @NonNull String namespace;
+        private final @Nullable String revision;
+
+        StringModule(final String namespace, final String revision) {
+            this.namespace = requireNonNull(namespace);
+            this.revision = revision;
+        }
+
+        @Override
+        public int hashCode() {
+            return 31 * namespace.hashCode() + Objects.hashCode(revision);
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (this == obj) {
+                return true;
+            }
+            if (!(obj instanceof StringModule)) {
+                return false;
+            }
+            final StringModule other = (StringModule) obj;
+            return namespace.equals(other.namespace) && Objects.equals(revision, other.revision);
+        }
+
+        QNameModule toQNameModule() {
+            return QNameModule.create(URI.create(namespace), Revision.ofNullable(revision));
+        }
+    }
+
+    private static final int MAX_QNAME_CACHE_SIZE = Integer.getInteger(
+        "org.opendaylight.controller.cluster.datastore.node.utils.qname-cache.max-size", 10000);
+    private static final int MAX_MODULE_CACHE_SIZE = Integer.getInteger(
+        "org.opendaylight.controller.cluster.datastore.node.utils.module-cache.max-size", 2000);
+
+    private static final LoadingCache<String, QName> LEGACY_CACHE = CacheBuilder.newBuilder()
+            .maximumSize(MAX_QNAME_CACHE_SIZE).weakValues().build(new CacheLoader<String, QName>() {
+                @Override
+                public QName load(final String key) {
+                    return QName.create(key).intern();
+                }
+            });
+    private static final LoadingCache<StringQName, QName> STRING_CACHE = CacheBuilder.newBuilder()
+            .maximumSize(MAX_QNAME_CACHE_SIZE).weakValues().build(new CacheLoader<StringQName, QName>() {
+                @Override
+                public QName load(final StringQName key) {
+                    return key.toQName().intern();
+                }
+            });
+    private static final LoadingCache<ModuleQName, QName> QNAME_CACHE = CacheBuilder.newBuilder()
+            .maximumSize(MAX_QNAME_CACHE_SIZE).weakValues().build(new CacheLoader<ModuleQName, QName>() {
+                @Override
+                public QName load(final ModuleQName key) {
+                    return key.toQName().intern();
+                }
+            });
+    private static final LoadingCache<StringModule, QNameModule> MODULE_CACHE = CacheBuilder.newBuilder()
+            .maximumSize(MAX_MODULE_CACHE_SIZE).weakValues().build(new CacheLoader<StringModule, QNameModule>() {
+                @Override
+                public QNameModule load(final StringModule key) {
+                    return key.toQNameModule().intern();
+                }
+            });
+    private static final LoadingCache<ModuleQName, NodeIdentifier> NODEID_CACHE = CacheBuilder.newBuilder()
+            .maximumSize(MAX_QNAME_CACHE_SIZE).weakValues().build(new CacheLoader<ModuleQName, NodeIdentifier>() {
+                @Override
+                public NodeIdentifier load(final ModuleQName key) throws ExecutionException {
+                    return NodeIdentifier.create(QNAME_CACHE.get(key));
+                }
+            });
+
+    private QNameFactory() {
+
+    }
+
+    @Deprecated
+    public static QName create(final String name) {
+        return LEGACY_CACHE.getUnchecked(name);
+    }
+
+    public static QName create(final String localName, final String namespace, final @Nullable String revision) {
+        return STRING_CACHE.getUnchecked(new StringQName(localName, namespace, revision));
+    }
+
+    public static QName create(final QNameModule module, final String localName) {
+        return QNAME_CACHE.getUnchecked(new ModuleQName(module, localName));
+    }
+
+    public static QNameModule createModule(final String namespace, final @Nullable String revision) {
+        return MODULE_CACHE.getUnchecked(new StringModule(namespace, revision));
+    }
+
+    public static @NonNull NodeIdentifier getNodeIdentifier(final QNameModule module, final String localName)
+            throws ExecutionException {
+        return NODEID_CACHE.get(new ModuleQName(module, localName));
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataInput.java
new file mode 100644 (file)
index 0000000..d921076
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import com.google.common.collect.Sets;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.StringReader;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.dom.DOMSource;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.util.ImmutableOffsetMapTemplate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Element;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
+
+/**
+ * NormalizedNodeInputStreamReader reads the byte stream and constructs the normalized node including its children
+ * nodes. This process goes in recursive manner, where each NodeTypes object signifies the start of the object, except
+ * END_NODE. If a node can have children, then that node's end is calculated based on appearance of END_NODE.
+ */
+abstract class AbstractLithiumDataInput extends AbstractNormalizedNodeDataInput {
+
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractLithiumDataInput.class);
+
+    private final List<String> codedStringMap = new ArrayList<>();
+
+    private QName lastLeafSetQName;
+
+    AbstractLithiumDataInput(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public final void streamNormalizedNode(final NormalizedNodeStreamWriter writer) throws IOException {
+        streamNormalizedNode(requireNonNull(writer), input.readByte());
+    }
+
+    private void streamNormalizedNode(final NormalizedNodeStreamWriter writer, final byte nodeType) throws IOException {
+        switch (nodeType) {
+            case LithiumNode.ANY_XML_NODE:
+                streamAnyxml(writer);
+                break;
+            case LithiumNode.AUGMENTATION_NODE:
+                streamAugmentation(writer);
+                break;
+            case LithiumNode.CHOICE_NODE:
+                streamChoice(writer);
+                break;
+            case LithiumNode.CONTAINER_NODE:
+                streamContainer(writer);
+                break;
+            case LithiumNode.LEAF_NODE:
+                streamLeaf(writer);
+                break;
+            case LithiumNode.LEAF_SET:
+                streamLeafSet(writer);
+                break;
+            case LithiumNode.ORDERED_LEAF_SET:
+                streamOrderedLeafSet(writer);
+                break;
+            case LithiumNode.LEAF_SET_ENTRY_NODE:
+                streamLeafSetEntry(writer);
+                break;
+            case LithiumNode.MAP_ENTRY_NODE:
+                streamMapEntry(writer);
+                break;
+            case LithiumNode.MAP_NODE:
+                streamMap(writer);
+                break;
+            case LithiumNode.ORDERED_MAP_NODE:
+                streamOrderedMap(writer);
+                break;
+            case LithiumNode.UNKEYED_LIST:
+                streamUnkeyedList(writer);
+                break;
+            case LithiumNode.UNKEYED_LIST_ITEM:
+                streamUnkeyedListItem(writer);
+                break;
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected node " + nodeType);
+        }
+    }
+
+    private void streamAnyxml(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming anyxml node {}", identifier);
+
+        final DOMSource value = readDOMSource();
+        if (writer.startAnyxmlNode(identifier, DOMSource.class)) {
+            writer.domSourceValue(value);
+            writer.endNode();
+        }
+    }
+
+    private void streamAugmentation(final NormalizedNodeStreamWriter writer) throws IOException {
+        final AugmentationIdentifier augIdentifier = readAugmentationIdentifier();
+        LOG.trace("Streaming augmentation node {}", augIdentifier);
+        writer.startAugmentationNode(augIdentifier);
+        commonStreamContainer(writer);
+    }
+
+    private void streamChoice(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming choice node {}", identifier);
+        writer.startChoiceNode(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void streamContainer(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming container node {}", identifier);
+        writer.startContainerNode(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void streamLeaf(final NormalizedNodeStreamWriter writer) throws IOException {
+        startLeaf(writer);
+        endLeaf(writer, readObject());
+    }
+
+    // Leaf inside a MapEntryNode, it can potentially be a key leaf, in which case we want to de-duplicate values.
+    private void streamLeaf(final NormalizedNodeStreamWriter writer, final NodeIdentifierWithPredicates entryId)
+            throws IOException {
+        final NodeIdentifier identifier = startLeaf(writer);
+        final Object value = readObject();
+        final Object entryValue = entryId.getValue(identifier.getNodeType());
+        endLeaf(writer, entryValue == null ? value : entryValue);
+    }
+
+    private NodeIdentifier startLeaf(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming leaf node {}", identifier);
+        writer.startLeafNode(identifier);
+        return identifier;
+    }
+
+    private static void endLeaf(final NormalizedNodeStreamWriter writer, final Object value) throws IOException {
+        writer.scalarValue(value);
+        writer.endNode();
+    }
+
+    private void streamLeafSet(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming leaf set node {}", identifier);
+        writer.startLeafSet(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamLeafSet(writer, identifier);
+    }
+
+    private void streamOrderedLeafSet(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming ordered leaf set node {}", identifier);
+        writer.startOrderedLeafSet(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamLeafSet(writer, identifier);
+    }
+
+    private void commonStreamLeafSet(final NormalizedNodeStreamWriter writer, final NodeIdentifier identifier)
+            throws IOException {
+        lastLeafSetQName = identifier.getNodeType();
+        try {
+            commonStreamContainer(writer);
+        } finally {
+            // Make sure we never leak this
+            lastLeafSetQName = null;
+        }
+    }
+
+    private void streamLeafSetEntry(final NormalizedNodeStreamWriter writer) throws IOException {
+        final QName name = lastLeafSetQName != null ? lastLeafSetQName : readQName();
+        final Object value = readObject();
+        final NodeWithValue<Object> leafIdentifier = new NodeWithValue<>(name, value);
+        LOG.trace("Streaming leaf set entry node {}, value {}", leafIdentifier, value);
+        writer.startLeafSetEntryNode(leafIdentifier);
+        writer.scalarValue(value);
+        writer.endNode();
+    }
+
+    private void streamMap(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming map node {}", identifier);
+        writer.startMapNode(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void streamOrderedMap(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming ordered map node {}", identifier);
+        writer.startOrderedMapNode(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void streamMapEntry(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifierWithPredicates entryIdentifier = readNormalizedNodeWithPredicates();
+        LOG.trace("Streaming map entry node {}", entryIdentifier);
+        writer.startMapEntryNode(entryIdentifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+
+        // Same loop as commonStreamContainer(), but ...
+        for (byte nodeType = input.readByte(); nodeType != LithiumNode.END_NODE; nodeType = input.readByte()) {
+            if (nodeType == LithiumNode.LEAF_NODE) {
+                // ... leaf nodes may need de-duplication
+                streamLeaf(writer, entryIdentifier);
+            } else {
+                streamNormalizedNode(writer, nodeType);
+            }
+        }
+        writer.endNode();
+    }
+
+    private void streamUnkeyedList(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming unkeyed list node {}", identifier);
+        writer.startUnkeyedList(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void streamUnkeyedListItem(final NormalizedNodeStreamWriter writer) throws IOException {
+        final NodeIdentifier identifier = readNodeIdentifier();
+        LOG.trace("Streaming unkeyed list item node {}", identifier);
+        writer.startUnkeyedListItem(identifier, NormalizedNodeStreamWriter.UNKNOWN_SIZE);
+        commonStreamContainer(writer);
+    }
+
+    private void commonStreamContainer(final NormalizedNodeStreamWriter writer) throws IOException {
+        for (byte nodeType = input.readByte(); nodeType != LithiumNode.END_NODE; nodeType = input.readByte()) {
+            streamNormalizedNode(writer, nodeType);
+        }
+        writer.endNode();
+    }
+
+    private DOMSource readDOMSource() throws IOException {
+        String xml = readObject().toString();
+        try {
+            DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+            factory.setNamespaceAware(true);
+            Element node = factory.newDocumentBuilder().parse(
+                    new InputSource(new StringReader(xml))).getDocumentElement();
+            return new DOMSource(node);
+        } catch (SAXException | ParserConfigurationException e) {
+            throw new IOException("Error parsing XML: " + xml, e);
+        }
+    }
+
+    final QName defaultReadQName() throws IOException {
+        // Read in the same sequence of writing
+        String localName = readCodedString();
+        String namespace = readCodedString();
+        String revision = Strings.emptyToNull(readCodedString());
+
+        return QNameFactory.create(localName, namespace, revision);
+    }
+
+    final String readCodedString() throws IOException {
+        final byte valueType = input.readByte();
+        switch (valueType) {
+            case LithiumTokens.IS_NULL_VALUE:
+                return null;
+            case LithiumTokens.IS_CODE_VALUE:
+                final int code = input.readInt();
+                try {
+                    return codedStringMap.get(code);
+                } catch (IndexOutOfBoundsException e) {
+                    throw new IOException("String code " + code + " was not found", e);
+                }
+            case LithiumTokens.IS_STRING_VALUE:
+                final String value = input.readUTF().intern();
+                codedStringMap.add(value);
+                return value;
+            default:
+                throw new IOException("Unhandled string value type " + valueType);
+        }
+    }
+
+    private Set<QName> readQNameSet() throws IOException {
+        // Read the children count
+        final int count = input.readInt();
+        final Set<QName> children = Sets.newHashSetWithExpectedSize(count);
+        for (int i = 0; i < count; i++) {
+            children.add(readQName());
+        }
+        return children;
+    }
+
+    abstract AugmentationIdentifier readAugmentationIdentifier() throws IOException;
+
+    abstract NodeIdentifier readNodeIdentifier() throws IOException;
+
+    final AugmentationIdentifier defaultReadAugmentationIdentifier() throws IOException {
+        return AugmentationIdentifier.create(readQNameSet());
+    }
+
+    private NodeIdentifierWithPredicates readNormalizedNodeWithPredicates() throws IOException {
+        final QName qname = readQName();
+        final int count = input.readInt();
+        switch (count) {
+            case 0:
+                return NodeIdentifierWithPredicates.of(qname);
+            case 1:
+                return NodeIdentifierWithPredicates.of(qname, readQName(), readObject());
+            default:
+                // ImmutableList is used by ImmutableOffsetMapTemplate for lookups, hence we use that.
+                final Builder<QName> keys = ImmutableList.builderWithExpectedSize(count);
+                final Object[] values = new Object[count];
+                for (int i = 0; i < count; i++) {
+                    keys.add(readQName());
+                    values[i] = readObject();
+                }
+
+                return NodeIdentifierWithPredicates.of(qname, ImmutableOffsetMapTemplate.ordered(keys.build())
+                    .instantiateWithValues(values));
+        }
+    }
+
+    private Object readObject() throws IOException {
+        byte objectType = input.readByte();
+        switch (objectType) {
+            case LithiumValue.BITS_TYPE:
+                return readObjSet();
+
+            case LithiumValue.BOOL_TYPE:
+                return input.readBoolean();
+
+            case LithiumValue.BYTE_TYPE:
+                return input.readByte();
+
+            case LithiumValue.INT_TYPE:
+                return input.readInt();
+
+            case LithiumValue.LONG_TYPE:
+                return input.readLong();
+
+            case LithiumValue.QNAME_TYPE:
+                return readQName();
+
+            case LithiumValue.SHORT_TYPE:
+                return input.readShort();
+
+            case LithiumValue.STRING_TYPE:
+                return input.readUTF();
+
+            case LithiumValue.STRING_BYTES_TYPE:
+                return readStringBytes();
+
+            case LithiumValue.BIG_DECIMAL_TYPE:
+                return new BigDecimal(input.readUTF());
+
+            case LithiumValue.BIG_INTEGER_TYPE:
+                return new BigInteger(input.readUTF());
+
+            case LithiumValue.BINARY_TYPE:
+                byte[] bytes = new byte[input.readInt()];
+                input.readFully(bytes);
+                return bytes;
+
+            case LithiumValue.YANG_IDENTIFIER_TYPE:
+                return readYangInstanceIdentifierInternal();
+
+            case LithiumValue.EMPTY_TYPE:
+            // Leaf nodes no longer allow null values and thus we no longer emit null values. Previously, the "empty"
+            // yang type was represented as null so we translate an incoming null value to Empty. It was possible for
+            // a BI user to set a string leaf to null and we're rolling the dice here but the chances for that are
+            // very low. We'd have to know the yang type but, even if we did, we can't let a null value pass upstream
+            // so we'd have to drop the leaf which might cause other issues.
+            case LithiumValue.NULL_TYPE:
+                return Empty.getInstance();
+
+            default:
+                return null;
+        }
+    }
+
+    private String readStringBytes() throws IOException {
+        byte[] bytes = new byte[input.readInt()];
+        input.readFully(bytes);
+        return new String(bytes, StandardCharsets.UTF_8);
+    }
+
+    @Override
+    public final YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+        return readYangInstanceIdentifierInternal();
+    }
+
+    private YangInstanceIdentifier readYangInstanceIdentifierInternal() throws IOException {
+        int size = input.readInt();
+        final Builder<PathArgument> pathArguments = ImmutableList.builderWithExpectedSize(size);
+        for (int i = 0; i < size; i++) {
+            pathArguments.add(readPathArgument());
+        }
+        return YangInstanceIdentifier.create(pathArguments.build());
+    }
+
+    private Set<String> readObjSet() throws IOException {
+        int count = input.readInt();
+        Set<String> children = new HashSet<>(count);
+        for (int i = 0; i < count; i++) {
+            children.add(readCodedString());
+        }
+        return children;
+    }
+
+    @Override
+    public final PathArgument readPathArgument() throws IOException {
+        // read Type
+        int type = input.readByte();
+
+        switch (type) {
+            case LithiumPathArgument.AUGMENTATION_IDENTIFIER:
+                return readAugmentationIdentifier();
+            case LithiumPathArgument.NODE_IDENTIFIER:
+                return readNodeIdentifier();
+            case LithiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES:
+                return readNormalizedNodeWithPredicates();
+            case LithiumPathArgument.NODE_IDENTIFIER_WITH_VALUE:
+                return new NodeWithValue<>(readQName(), readObject());
+            default:
+                // FIXME: throw hard error
+                return null;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractLithiumDataOutput.java
new file mode 100644 (file)
index 0000000..25e14b1
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.Revision;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * "original" type mapping. Baseline is Lithium but it really was introduced in Oxygen, where {@code type empty} was
+ * remapped from null.
+ *
+ * <p>
+ * {@code uint8}, {@code uint16}, {@code uint32} use java.lang types with widening, hence their value types overlap with
+ * mapping of {@code int16}, {@code int32} and {@code int64}, making that difference indiscernible without YANG schema
+ * knowledge.
+ */
+abstract class AbstractLithiumDataOutput extends AbstractNormalizedNodeDataOutput {
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractLithiumDataOutput.class);
+    private static final TransformerFactory TF = TransformerFactory.newInstance();
+    private static final ImmutableMap<Class<?>, Byte> KNOWN_TYPES = ImmutableMap.<Class<?>, Byte>builder()
+            .put(String.class, LithiumValue.STRING_TYPE)
+            .put(Byte.class, LithiumValue.BYTE_TYPE)
+            .put(Integer.class, LithiumValue.INT_TYPE)
+            .put(Long.class, LithiumValue.LONG_TYPE)
+            .put(Boolean.class, LithiumValue.BOOL_TYPE)
+            .put(QName.class, LithiumValue.QNAME_TYPE)
+            .put(Short.class, LithiumValue.SHORT_TYPE)
+            .put(BigInteger.class, LithiumValue.BIG_INTEGER_TYPE)
+            .put(BigDecimal.class, LithiumValue.BIG_DECIMAL_TYPE)
+            .put(byte[].class, LithiumValue.BINARY_TYPE)
+            .put(Empty.class, LithiumValue.EMPTY_TYPE)
+            .build();
+
+    private final Map<String, Integer> stringCodeMap = new HashMap<>();
+
+    private QName lastLeafSetQName;
+    private boolean inSimple;
+
+    AbstractLithiumDataOutput(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    public final void startLeafNode(final NodeIdentifier name) throws IOException {
+        LOG.trace("Starting a new leaf node");
+        startNode(name, LithiumNode.LEAF_NODE);
+        inSimple = true;
+    }
+
+    @Override
+    public final void startLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new leaf set");
+        commonStartLeafSet(name, LithiumNode.LEAF_SET);
+    }
+
+    @Override
+    public final void startOrderedLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new ordered leaf set");
+        commonStartLeafSet(name, LithiumNode.ORDERED_LEAF_SET);
+    }
+
+    private void commonStartLeafSet(final NodeIdentifier name, final byte nodeType) throws IOException {
+        startNode(name, nodeType);
+        lastLeafSetQName = name.getNodeType();
+    }
+
+    @Override
+    public final void startLeafSetEntryNode(final NodeWithValue<?> name) throws IOException {
+        LOG.trace("Starting a new leaf set entry node");
+
+        output.writeByte(LithiumNode.LEAF_SET_ENTRY_NODE);
+
+        // lastLeafSetQName is set if the parent LeafSetNode was previously written. Otherwise this is a
+        // stand alone LeafSetEntryNode so write out it's name here.
+        if (lastLeafSetQName == null) {
+            writeQNameInternal(name.getNodeType());
+        }
+        inSimple = true;
+    }
+
+    @Override
+    public final void startContainerNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new container node");
+        startNode(name, LithiumNode.CONTAINER_NODE);
+    }
+
+    @Override
+    public final void startYangModeledAnyXmlNode(final NodeIdentifier name, final int childSizeHint)
+            throws IOException {
+        LOG.trace("Starting a new yang modeled anyXml node");
+        startNode(name, LithiumNode.YANG_MODELED_ANY_XML_NODE);
+    }
+
+    @Override
+    public final void startUnkeyedList(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new unkeyed list");
+        startNode(name, LithiumNode.UNKEYED_LIST);
+    }
+
+    @Override
+    public final void startUnkeyedListItem(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new unkeyed list item");
+        startNode(name, LithiumNode.UNKEYED_LIST_ITEM);
+    }
+
+    @Override
+    public final void startMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new map node");
+        startNode(name, LithiumNode.MAP_NODE);
+    }
+
+    @Override
+    public final void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
+            throws IOException {
+        LOG.trace("Starting a new map entry node");
+        startNode(identifier, LithiumNode.MAP_ENTRY_NODE);
+        writeKeyValueMap(identifier.entrySet());
+    }
+
+    @Override
+    public final void startOrderedMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new ordered map node");
+        startNode(name, LithiumNode.ORDERED_MAP_NODE);
+    }
+
+    @Override
+    public final void startChoiceNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        LOG.trace("Starting a new choice node");
+        startNode(name, LithiumNode.CHOICE_NODE);
+    }
+
+    @Override
+    public final void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException {
+        requireNonNull(identifier, "Node identifier should not be null");
+        LOG.trace("Starting a new augmentation node");
+
+        output.writeByte(LithiumNode.AUGMENTATION_NODE);
+        writeAugmentationIdentifier(identifier);
+    }
+
+    @Override
+    public final boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        if (DOMSource.class.isAssignableFrom(objectModel)) {
+            LOG.trace("Starting anyxml node");
+            startNode(name, LithiumNode.ANY_XML_NODE);
+            inSimple = true;
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public final void scalarValue(final Object value) throws IOException {
+        writeObject(value);
+    }
+
+    @Override
+    public final void domSourceValue(final DOMSource value) throws IOException {
+        final StringWriter writer = new StringWriter();
+        try {
+            TF.newTransformer().transform(value, new StreamResult(writer));
+        } catch (TransformerException e) {
+            throw new IOException("Error writing anyXml", e);
+        }
+        writeObject(writer.toString());
+    }
+
+    @Override
+    public final void endNode() throws IOException {
+        LOG.trace("Ending the node");
+        if (!inSimple) {
+            lastLeafSetQName = null;
+            output.writeByte(LithiumNode.END_NODE);
+        }
+        inSimple = false;
+    }
+
+    @Override
+    @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST",
+            justification = "The casts in the switch clauses are indirectly confirmed via the determination of 'type'.")
+    final void writePathArgumentInternal(final PathArgument pathArgument) throws IOException {
+        final byte type = LithiumPathArgument.getSerializablePathArgumentType(pathArgument);
+        output.writeByte(type);
+
+        switch (type) {
+            case LithiumPathArgument.NODE_IDENTIFIER:
+                NodeIdentifier nodeIdentifier = (NodeIdentifier) pathArgument;
+                writeQNameInternal(nodeIdentifier.getNodeType());
+                break;
+            case LithiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES:
+                NodeIdentifierWithPredicates nodeIdentifierWithPredicates =
+                    (NodeIdentifierWithPredicates) pathArgument;
+                writeQNameInternal(nodeIdentifierWithPredicates.getNodeType());
+                writeKeyValueMap(nodeIdentifierWithPredicates.entrySet());
+                break;
+            case LithiumPathArgument.NODE_IDENTIFIER_WITH_VALUE:
+                NodeWithValue<?> nodeWithValue = (NodeWithValue<?>) pathArgument;
+                writeQNameInternal(nodeWithValue.getNodeType());
+                writeObject(nodeWithValue.getValue());
+                break;
+            case LithiumPathArgument.AUGMENTATION_IDENTIFIER:
+                // No Qname in augmentation identifier
+                writeAugmentationIdentifier((AugmentationIdentifier) pathArgument);
+                break;
+            default:
+                throw new IllegalStateException("Unknown node identifier type is found : "
+                        + pathArgument.getClass().toString());
+        }
+    }
+
+    @Override
+    final void writeYangInstanceIdentifierInternal(final YangInstanceIdentifier identifier) throws IOException {
+        List<PathArgument> pathArguments = identifier.getPathArguments();
+        output.writeInt(pathArguments.size());
+
+        for (PathArgument pathArgument : pathArguments) {
+            writePathArgumentInternal(pathArgument);
+        }
+    }
+
+    final void defaultWriteAugmentationIdentifier(final @NonNull AugmentationIdentifier aid) throws IOException {
+        final Set<QName> qnames = aid.getPossibleChildNames();
+        // Write each child's qname separately, if list is empty send count as 0
+        if (!qnames.isEmpty()) {
+            output.writeInt(qnames.size());
+            for (QName qname : qnames) {
+                writeQNameInternal(qname);
+            }
+        } else {
+            LOG.debug("augmentation node does not have any child");
+            output.writeInt(0);
+        }
+    }
+
+    final void defaultWriteQName(final QName qname) throws IOException {
+        writeString(qname.getLocalName());
+        writeModule(qname.getModule());
+    }
+
+    final void defaultWriteModule(final QNameModule module) throws IOException {
+        writeString(module.getNamespace().toString());
+        final Optional<Revision> revision = module.getRevision();
+        if (revision.isPresent()) {
+            writeString(revision.get().toString());
+        } else {
+            writeByte(LithiumTokens.IS_NULL_VALUE);
+        }
+    }
+
+    abstract void writeModule(QNameModule module) throws IOException;
+
+    abstract void writeAugmentationIdentifier(@NonNull AugmentationIdentifier aid) throws IOException;
+
+    private void startNode(final PathArgument arg, final byte nodeType) throws IOException {
+        requireNonNull(arg, "Node identifier should not be null");
+        checkState(!inSimple, "Attempted to start a child in a simple node");
+
+        // First write the type of node
+        output.writeByte(nodeType);
+        // Write Start Tag
+        writeQNameInternal(arg.getNodeType());
+    }
+
+    private void writeObjSet(final Set<?> set) throws IOException {
+        output.writeInt(set.size());
+        for (Object o : set) {
+            checkArgument(o instanceof String, "Expected value type to be String but was %s (%s)", o.getClass(), o);
+            writeString((String) o);
+        }
+    }
+
+    private void writeObject(final Object value) throws IOException {
+        byte type = getSerializableType(value);
+        // Write object type first
+        output.writeByte(type);
+
+        switch (type) {
+            case LithiumValue.BOOL_TYPE:
+                output.writeBoolean((Boolean) value);
+                break;
+            case LithiumValue.QNAME_TYPE:
+                writeQNameInternal((QName) value);
+                break;
+            case LithiumValue.INT_TYPE:
+                output.writeInt((Integer) value);
+                break;
+            case LithiumValue.BYTE_TYPE:
+                output.writeByte((Byte) value);
+                break;
+            case LithiumValue.LONG_TYPE:
+                output.writeLong((Long) value);
+                break;
+            case LithiumValue.SHORT_TYPE:
+                output.writeShort((Short) value);
+                break;
+            case LithiumValue.BITS_TYPE:
+                writeObjSet((Set<?>) value);
+                break;
+            case LithiumValue.BINARY_TYPE:
+                byte[] bytes = (byte[]) value;
+                output.writeInt(bytes.length);
+                output.write(bytes);
+                break;
+            case LithiumValue.YANG_IDENTIFIER_TYPE:
+                writeYangInstanceIdentifierInternal((YangInstanceIdentifier) value);
+                break;
+            case LithiumValue.EMPTY_TYPE:
+                break;
+            case LithiumValue.STRING_BYTES_TYPE:
+                final byte[] valueBytes = value.toString().getBytes(StandardCharsets.UTF_8);
+                output.writeInt(valueBytes.length);
+                output.write(valueBytes);
+                break;
+            default:
+                output.writeUTF(value.toString());
+                break;
+        }
+    }
+
+    private void writeKeyValueMap(final Set<Entry<QName, Object>> entrySet) throws IOException {
+        if (!entrySet.isEmpty()) {
+            output.writeInt(entrySet.size());
+            for (Entry<QName, Object> entry : entrySet) {
+                writeQNameInternal(entry.getKey());
+                writeObject(entry.getValue());
+            }
+        } else {
+            output.writeInt(0);
+        }
+    }
+
+    private void writeString(final @NonNull String string) throws IOException {
+        final Integer value = stringCodeMap.get(verifyNotNull(string));
+        if (value == null) {
+            stringCodeMap.put(string, stringCodeMap.size());
+            writeByte(LithiumTokens.IS_STRING_VALUE);
+            writeUTF(string);
+        } else {
+            writeByte(LithiumTokens.IS_CODE_VALUE);
+            writeInt(value);
+        }
+    }
+
+    @VisibleForTesting
+    static final byte getSerializableType(final Object node) {
+        final Byte type = KNOWN_TYPES.get(requireNonNull(node).getClass());
+        if (type != null) {
+            if (type == LithiumValue.STRING_TYPE
+                    && ((String) node).length() >= LithiumValue.STRING_BYTES_LENGTH_THRESHOLD) {
+                return LithiumValue.STRING_BYTES_TYPE;
+            }
+            return type;
+        }
+
+        if (node instanceof Set) {
+            return LithiumValue.BITS_TYPE;
+        }
+
+        if (node instanceof YangInstanceIdentifier) {
+            return LithiumValue.YANG_IDENTIFIER_TYPE;
+        }
+
+        throw new IllegalArgumentException("Unknown value type " + node.getClass().getSimpleName());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataInput.java
new file mode 100644 (file)
index 0000000..a51b121
--- /dev/null
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+import static org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter.UNKNOWN_SIZE;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.UncheckedExecutionException;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.StringReader;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import javax.xml.transform.dom.DOMSource;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.rfc8528.data.api.MountPointIdentifier;
+import org.opendaylight.yangtools.util.xml.UntrustedXML;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
+
+/**
+ * Abstract base class for NormalizedNodeDataInput based on {@link MagnesiumNode}, {@link MagnesiumPathArgument} and
+ * {@link MagnesiumValue}.
+ */
+abstract class AbstractMagnesiumDataInput extends AbstractNormalizedNodeDataInput {
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractMagnesiumDataInput.class);
+
+    // Known singleton objects
+    private static final @NonNull Byte INT8_0 = 0;
+    private static final @NonNull Short INT16_0 = 0;
+    private static final @NonNull Integer INT32_0 = 0;
+    private static final @NonNull Long INT64_0 = 0L;
+    private static final byte @NonNull[] BINARY_0 = new byte[0];
+    private static final @NonNull AugmentationIdentifier EMPTY_AID = AugmentationIdentifier.create(ImmutableSet.of());
+
+    // FIXME: these should be available as constants
+    private static final @NonNull Uint8 UINT8_0 = Uint8.valueOf(0);
+    private static final @NonNull Uint16 UINT16_0 = Uint16.valueOf(0);
+    private static final @NonNull Uint32 UINT32_0 = Uint32.valueOf(0);
+    private static final @NonNull Uint64 UINT64_0 = Uint64.valueOf(0);
+
+    private final List<AugmentationIdentifier> codedAugments = new ArrayList<>();
+    private final List<NodeIdentifier> codedNodeIdentifiers = new ArrayList<>();
+    private final List<QNameModule> codedModules = new ArrayList<>();
+    private final List<String> codedStrings = new ArrayList<>();
+
+    AbstractMagnesiumDataInput(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public final void streamNormalizedNode(final NormalizedNodeStreamWriter writer) throws IOException {
+        streamNormalizedNode(requireNonNull(writer), null, input.readByte());
+    }
+
+    private void streamNormalizedNode(final NormalizedNodeStreamWriter writer, final PathArgument parent,
+            final byte nodeHeader) throws IOException {
+        switch (nodeHeader & MagnesiumNode.TYPE_MASK) {
+            case MagnesiumNode.NODE_LEAF:
+                streamLeaf(writer, parent, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_CONTAINER:
+                streamContainer(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_LIST:
+                streamList(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_MAP:
+                streamMap(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_MAP_ORDERED:
+                streamMapOrdered(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_LEAFSET:
+                streamLeafset(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_LEAFSET_ORDERED:
+                streamLeafsetOrdered(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_CHOICE:
+                streamChoice(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_AUGMENTATION:
+                streamAugmentation(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_ANYXML:
+                streamAnyxml(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_ANYXML_MODELED:
+                streamAnyxmlModeled(writer, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_LIST_ENTRY:
+                streamListEntry(writer, parent, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_LEAFSET_ENTRY:
+                streamLeafsetEntry(writer, parent, nodeHeader);
+                break;
+            case MagnesiumNode.NODE_MAP_ENTRY:
+                streamMapEntry(writer, parent, nodeHeader);
+                break;
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected node header " + nodeHeader);
+        }
+    }
+
+    private void streamAnyxml(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming anyxml node {}", identifier);
+
+        final DOMSource value = readDOMSource();
+        if (writer.startAnyxmlNode(identifier, DOMSource.class)) {
+            writer.domSourceValue(value);
+            writer.endNode();
+        }
+    }
+
+    private void streamAnyxmlModeled(final NormalizedNodeStreamWriter writer, final byte nodeHeader)
+            throws IOException {
+        // TODO: decide how to deal with these
+        throw new UnsupportedOperationException("Reading YANG-modeled anyxml was never supported");
+    }
+
+    private void streamAugmentation(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final AugmentationIdentifier augIdentifier = decodeAugmentationIdentifier(nodeHeader);
+        LOG.trace("Streaming augmentation node {}", augIdentifier);
+        writer.startAugmentationNode(augIdentifier);
+        commonStreamContainer(writer, augIdentifier);
+    }
+
+    private void streamChoice(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming choice node {}", identifier);
+        writer.startChoiceNode(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamContainer(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming container node {}", identifier);
+        writer.startContainerNode(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamLeaf(final NormalizedNodeStreamWriter writer, final PathArgument parent, final byte nodeHeader)
+            throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming leaf node {}", identifier);
+        writer.startLeafNode(identifier);
+
+        final Object value;
+        if ((nodeHeader & MagnesiumNode.PREDICATE_ONE) == MagnesiumNode.PREDICATE_ONE) {
+            if (!(parent instanceof NodeIdentifierWithPredicates)) {
+                throw new InvalidNormalizedNodeStreamException("Invalid predicate leaf " + identifier + " in parent "
+                        + parent);
+            }
+
+            value = ((NodeIdentifierWithPredicates) parent).getValue(identifier.getNodeType());
+            if (value == null) {
+                throw new InvalidNormalizedNodeStreamException("Failed to find predicate leaf " + identifier
+                    + " in parent " + parent);
+            }
+        } else {
+            value = readLeafValue();
+        }
+
+        writer.scalarValue(value);
+        writer.endNode();
+    }
+
+    private void streamLeafset(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming leaf set node {}", identifier);
+        writer.startLeafSet(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamLeafsetOrdered(final NormalizedNodeStreamWriter writer, final byte nodeHeader)
+            throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming ordered leaf set node {}", identifier);
+        writer.startOrderedLeafSet(identifier, UNKNOWN_SIZE);
+
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamLeafsetEntry(final NormalizedNodeStreamWriter writer, final PathArgument parent,
+            final byte nodeHeader) throws IOException {
+        final NodeIdentifier nodeId = decodeNodeIdentifier(nodeHeader, parent);
+        final Object value = readLeafValue();
+        final NodeWithValue<Object> leafIdentifier = new NodeWithValue<>(nodeId.getNodeType(), value);
+        LOG.trace("Streaming leaf set entry node {}", leafIdentifier);
+        writer.startLeafSetEntryNode(leafIdentifier);
+        writer.scalarValue(value);
+        writer.endNode();
+    }
+
+    private void streamList(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        writer.startUnkeyedList(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamListEntry(final NormalizedNodeStreamWriter writer, final PathArgument parent,
+            final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader, parent);
+        LOG.trace("Streaming unkeyed list item node {}", identifier);
+        writer.startUnkeyedListItem(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamMap(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming map node {}", identifier);
+        writer.startMapNode(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamMapOrdered(final NormalizedNodeStreamWriter writer, final byte nodeHeader) throws IOException {
+        final NodeIdentifier identifier = decodeNodeIdentifier(nodeHeader);
+        LOG.trace("Streaming ordered map node {}", identifier);
+        writer.startOrderedMapNode(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void streamMapEntry(final NormalizedNodeStreamWriter writer, final PathArgument parent,
+            final byte nodeHeader) throws IOException {
+        final NodeIdentifier nodeId = decodeNodeIdentifier(nodeHeader, parent);
+
+        final int size;
+        switch (mask(nodeHeader, MagnesiumNode.PREDICATE_MASK)) {
+            case MagnesiumNode.PREDICATE_ZERO:
+                size = 0;
+                break;
+            case MagnesiumNode.PREDICATE_ONE:
+                size = 1;
+                break;
+            case MagnesiumNode.PREDICATE_1B:
+                size = input.readUnsignedByte();
+                break;
+            case MagnesiumNode.PREDICATE_4B:
+                size = input.readInt();
+                break;
+            default:
+                // ISE on purpose: this should never ever happen
+                throw new IllegalStateException("Failed to decode NodeIdentifierWithPredicates size from header "
+                        + nodeHeader);
+        }
+
+        final NodeIdentifierWithPredicates identifier = readNodeIdentifierWithPredicates(nodeId.getNodeType(), size);
+        LOG.trace("Streaming map entry node {}", identifier);
+        writer.startMapEntryNode(identifier, UNKNOWN_SIZE);
+        commonStreamContainer(writer, identifier);
+    }
+
+    private void commonStreamContainer(final NormalizedNodeStreamWriter writer, final PathArgument parent)
+            throws IOException {
+        for (byte nodeType = input.readByte(); nodeType != MagnesiumNode.NODE_END; nodeType = input.readByte()) {
+            streamNormalizedNode(writer, parent, nodeType);
+        }
+        writer.endNode();
+    }
+
+    private @NonNull NodeIdentifier decodeNodeIdentifier() throws IOException {
+        final QNameModule module = decodeQNameModule();
+        final String localName = readRefString();
+        final NodeIdentifier nodeId;
+        try {
+            nodeId = QNameFactory.getNodeIdentifier(module, localName);
+        } catch (ExecutionException e) {
+            throw new InvalidNormalizedNodeStreamException("Illegal QName module=" + module + " localName="
+                    + localName, e);
+        }
+
+        codedNodeIdentifiers.add(nodeId);
+        return nodeId;
+    }
+
+    private NodeIdentifier decodeNodeIdentifier(final byte nodeHeader) throws IOException {
+        return decodeNodeIdentifier(nodeHeader, null);
+    }
+
+    private NodeIdentifier decodeNodeIdentifier(final byte nodeHeader, final PathArgument parent) throws IOException {
+        final int index;
+        switch (nodeHeader & MagnesiumNode.ADDR_MASK) {
+            case MagnesiumNode.ADDR_DEFINE:
+                return readNodeIdentifier();
+            case MagnesiumNode.ADDR_LOOKUP_1B:
+                index = input.readUnsignedByte();
+                break;
+            case MagnesiumNode.ADDR_LOOKUP_4B:
+                index = input.readInt();
+                break;
+            case MagnesiumNode.ADDR_PARENT:
+                if (parent instanceof NodeIdentifier) {
+                    return (NodeIdentifier) parent;
+                }
+                throw new InvalidNormalizedNodeStreamException("Invalid node identifier reference to parent " + parent);
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected node identifier addressing in header "
+                        + nodeHeader);
+        }
+
+        try {
+            return codedNodeIdentifiers.get(index);
+        } catch (IndexOutOfBoundsException e) {
+            throw new InvalidNormalizedNodeStreamException("Invalid QName reference " + index, e);
+        }
+    }
+
+    private AugmentationIdentifier decodeAugmentationIdentifier(final byte nodeHeader) throws IOException {
+        final int index;
+        switch (nodeHeader & MagnesiumNode.ADDR_MASK) {
+            case MagnesiumNode.ADDR_DEFINE:
+                return readAugmentationIdentifier();
+            case MagnesiumNode.ADDR_LOOKUP_1B:
+                index = input.readUnsignedByte();
+                break;
+            case MagnesiumNode.ADDR_LOOKUP_4B:
+                index = input.readInt();
+                break;
+            default:
+                throw new InvalidNormalizedNodeStreamException(
+                    "Unexpected augmentation identifier addressing in header " + nodeHeader);
+        }
+
+        try {
+            return codedAugments.get(index);
+        } catch (IndexOutOfBoundsException e) {
+            throw new InvalidNormalizedNodeStreamException("Invalid augmentation identifier reference " + index, e);
+        }
+    }
+
+    @Override
+    public final YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+        final byte type = input.readByte();
+        if (type == MagnesiumValue.YIID) {
+            return readYangInstanceIdentifier(input.readInt());
+        } else if (type >= MagnesiumValue.YIID_0) {
+            // Note 'byte' is range limited, so it is always '&& type <= MagnesiumValue.YIID_31'
+            return readYangInstanceIdentifier(type - MagnesiumValue.YIID_0);
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Unexpected YangInstanceIdentifier type " + type);
+        }
+    }
+
+    private @NonNull YangInstanceIdentifier readYangInstanceIdentifier(final int size) throws IOException {
+        if (size > 0) {
+            final Builder<PathArgument> builder = ImmutableList.builderWithExpectedSize(size);
+            for (int i = 0; i < size; ++i) {
+                builder.add(readPathArgument());
+            }
+            return YangInstanceIdentifier.create(builder.build());
+        } else if (size == 0) {
+            return YangInstanceIdentifier.empty();
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid YangInstanceIdentifier size " + size);
+        }
+    }
+
+    @Override
+    public final QName readQName() throws IOException {
+        final byte type = input.readByte();
+        switch (type) {
+            case MagnesiumValue.QNAME:
+                return decodeQName();
+            case MagnesiumValue.QNAME_REF_1B:
+                return decodeQNameRef1();
+            case MagnesiumValue.QNAME_REF_2B:
+                return decodeQNameRef2();
+            case MagnesiumValue.QNAME_REF_4B:
+                return decodeQNameRef4();
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected QName type " + type);
+        }
+    }
+
+    @Override
+    public final PathArgument readPathArgument() throws IOException {
+        final byte header = input.readByte();
+        switch (header & MagnesiumPathArgument.TYPE_MASK) {
+            case MagnesiumPathArgument.AUGMENTATION_IDENTIFIER:
+                return readAugmentationIdentifier(header);
+            case MagnesiumPathArgument.NODE_IDENTIFIER:
+                verifyPathIdentifierOnly(header);
+                return readNodeIdentifier(header);
+            case MagnesiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES:
+                return readNodeIdentifierWithPredicates(header);
+            case MagnesiumPathArgument.NODE_WITH_VALUE:
+                verifyPathIdentifierOnly(header);
+                return readNodeWithValue(header);
+            case MagnesiumPathArgument.MOUNTPOINT_IDENTIFIER:
+                verifyPathIdentifierOnly(header);
+                return MountPointIdentifier.create(readNodeIdentifier(header).getNodeType());
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected PathArgument header " + header);
+        }
+    }
+
+    private AugmentationIdentifier readAugmentationIdentifier() throws IOException {
+        final AugmentationIdentifier result = readAugmentationIdentifier(input.readInt());
+        codedAugments.add(result);
+        return result;
+    }
+
+    private AugmentationIdentifier readAugmentationIdentifier(final byte header) throws IOException {
+        final byte count = mask(header, MagnesiumPathArgument.AID_COUNT_MASK);
+        switch (count) {
+            case MagnesiumPathArgument.AID_COUNT_1B:
+                return readAugmentationIdentifier(input.readUnsignedByte());
+            case MagnesiumPathArgument.AID_COUNT_2B:
+                return readAugmentationIdentifier(input.readUnsignedShort());
+            case MagnesiumPathArgument.AID_COUNT_4B:
+                return readAugmentationIdentifier(input.readInt());
+            default:
+                return readAugmentationIdentifier(rshift(count, MagnesiumPathArgument.AID_COUNT_SHIFT));
+        }
+    }
+
+    private AugmentationIdentifier readAugmentationIdentifier(final int size) throws IOException {
+        if (size > 0) {
+            final List<QName> qnames = new ArrayList<>(size);
+            for (int i = 0; i < size; ++i) {
+                qnames.add(readQName());
+            }
+            return AugmentationIdentifier.create(ImmutableSet.copyOf(qnames));
+        } else if (size == 0) {
+            return EMPTY_AID;
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid augmentation identifier size " + size);
+        }
+    }
+
+    private NodeIdentifier readNodeIdentifier() throws IOException {
+        return decodeNodeIdentifier();
+    }
+
+    private NodeIdentifier readNodeIdentifier(final byte header) throws IOException {
+        switch (header & MagnesiumPathArgument.QNAME_MASK) {
+            case MagnesiumPathArgument.QNAME_DEF:
+                return decodeNodeIdentifier();
+            case MagnesiumPathArgument.QNAME_REF_1B:
+                return decodeNodeIdentifierRef1();
+            case MagnesiumPathArgument.QNAME_REF_2B:
+                return decodeNodeIdentifierRef2();
+            case MagnesiumPathArgument.QNAME_REF_4B:
+                return decodeNodeIdentifierRef4();
+            default:
+                throw new InvalidNormalizedNodeStreamException("Invalid QName coding in " + header);
+        }
+    }
+
+    private NodeIdentifierWithPredicates readNodeIdentifierWithPredicates(final byte header) throws IOException {
+        final QName qname = readNodeIdentifier(header).getNodeType();
+        switch (mask(header, MagnesiumPathArgument.SIZE_MASK)) {
+            case MagnesiumPathArgument.SIZE_1B:
+                return readNodeIdentifierWithPredicates(qname, input.readUnsignedByte());
+            case MagnesiumPathArgument.SIZE_2B:
+                return readNodeIdentifierWithPredicates(qname, input.readUnsignedShort());
+            case MagnesiumPathArgument.SIZE_4B:
+                return readNodeIdentifierWithPredicates(qname, input.readInt());
+            default:
+                return readNodeIdentifierWithPredicates(qname, rshift(header, MagnesiumPathArgument.SIZE_SHIFT));
+        }
+    }
+
+    private NodeIdentifierWithPredicates readNodeIdentifierWithPredicates(final QName qname, final int size)
+            throws IOException {
+        if (size == 1) {
+            return NodeIdentifierWithPredicates.of(qname, readQName(), readLeafValue());
+        } else if (size > 1) {
+            final ImmutableMap.Builder<QName, Object> builder = ImmutableMap.builderWithExpectedSize(size);
+            for (int i = 0; i < size; ++i) {
+                builder.put(readQName(), readLeafValue());
+            }
+            return NodeIdentifierWithPredicates.of(qname, builder.build());
+        } else if (size == 0) {
+            return NodeIdentifierWithPredicates.of(qname);
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid predicate count " + size);
+        }
+    }
+
+    private NodeWithValue<?> readNodeWithValue(final byte header) throws IOException {
+        final QName qname = readNodeIdentifier(header).getNodeType();
+        return new NodeWithValue<>(qname, readLeafValue());
+    }
+
+    private static void verifyPathIdentifierOnly(final byte header) throws InvalidNormalizedNodeStreamException {
+        if (mask(header, MagnesiumPathArgument.SIZE_MASK) != 0) {
+            throw new InvalidNormalizedNodeStreamException("Invalid path argument header " + header);
+        }
+    }
+
+    private @NonNull NodeIdentifier decodeNodeIdentifierRef1() throws IOException {
+        return lookupNodeIdentifier(input.readUnsignedByte());
+    }
+
+    private @NonNull NodeIdentifier decodeNodeIdentifierRef2() throws IOException {
+        return lookupNodeIdentifier(input.readUnsignedShort() + 256);
+    }
+
+    private @NonNull NodeIdentifier decodeNodeIdentifierRef4() throws IOException {
+        return lookupNodeIdentifier(input.readInt());
+    }
+
+    private @NonNull QName decodeQName() throws IOException {
+        return decodeNodeIdentifier().getNodeType();
+    }
+
+    private @NonNull QName decodeQNameRef1() throws IOException {
+        return lookupQName(input.readUnsignedByte());
+    }
+
+    private @NonNull QName decodeQNameRef2() throws IOException {
+        return lookupQName(input.readUnsignedShort() + 256);
+    }
+
+    private @NonNull QName decodeQNameRef4() throws IOException {
+        return lookupQName(input.readInt());
+    }
+
+    private @NonNull QNameModule decodeQNameModule() throws IOException {
+        final byte type = input.readByte();
+        final int index;
+        switch (type) {
+            case MagnesiumValue.MODREF_1B:
+                index = input.readUnsignedByte();
+                break;
+            case MagnesiumValue.MODREF_2B:
+                index = input.readUnsignedShort() + 256;
+                break;
+            case MagnesiumValue.MODREF_4B:
+                index = input.readInt();
+                break;
+            default:
+                return decodeQNameModuleDef(type);
+        }
+
+        try {
+            return codedModules.get(index);
+        } catch (IndexOutOfBoundsException e) {
+            throw new InvalidNormalizedNodeStreamException("Invalid QNameModule reference " + index, e);
+        }
+    }
+
+    // QNameModule definition, i.e. two encoded strings
+    private @NonNull QNameModule decodeQNameModuleDef(final byte type) throws IOException {
+        final String namespace = readRefString(type);
+
+        final byte refType = input.readByte();
+        final String revision = refType == MagnesiumValue.STRING_EMPTY ? null : readRefString(refType);
+        final QNameModule module;
+        try {
+            module = QNameFactory.createModule(namespace, revision);
+        } catch (UncheckedExecutionException e) {
+            throw new InvalidNormalizedNodeStreamException("Illegal QNameModule ns=" + namespace + " rev=" + revision,
+                e);
+        }
+
+        codedModules.add(module);
+        return module;
+    }
+
+    private @NonNull String readRefString() throws IOException {
+        return readRefString(input.readByte());
+    }
+
+    private @NonNull String readRefString(final byte type) throws IOException {
+        final String str;
+        switch (type) {
+            case MagnesiumValue.STRING_REF_1B:
+                return lookupString(input.readUnsignedByte());
+            case MagnesiumValue.STRING_REF_2B:
+                return lookupString(input.readUnsignedShort() + 256);
+            case MagnesiumValue.STRING_REF_4B:
+                return lookupString(input.readInt());
+            case MagnesiumValue.STRING_EMPTY:
+                return "";
+            case MagnesiumValue.STRING_2B:
+                str = readString2();
+                break;
+            case MagnesiumValue.STRING_4B:
+                str = readString4();
+                break;
+            case MagnesiumValue.STRING_CHARS:
+                str = readCharsString();
+                break;
+            case MagnesiumValue.STRING_UTF:
+                str = input.readUTF();
+                break;
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected String type " + type);
+        }
+
+        // TODO: consider interning Strings -- that would help with bits, but otherwise it's probably not worth it
+        codedStrings.add(verifyNotNull(str));
+        return str;
+    }
+
+    private @NonNull String readString() throws IOException {
+        final byte type = input.readByte();
+        switch (type) {
+            case MagnesiumValue.STRING_EMPTY:
+                return "";
+            case MagnesiumValue.STRING_UTF:
+                return input.readUTF();
+            case MagnesiumValue.STRING_2B:
+                return readString2();
+            case MagnesiumValue.STRING_4B:
+                return readString4();
+            case MagnesiumValue.STRING_CHARS:
+                return readCharsString();
+            default:
+                throw new InvalidNormalizedNodeStreamException("Unexpected String type " + type);
+        }
+    }
+
+    private @NonNull String readString2() throws IOException {
+        return readByteString(input.readUnsignedShort());
+    }
+
+    private @NonNull String readString4() throws IOException {
+        return readByteString(input.readInt());
+    }
+
+    private @NonNull String readByteString(final int size) throws IOException {
+        if (size > 0) {
+            final byte[] bytes = new byte[size];
+            input.readFully(bytes);
+            return new String(bytes, StandardCharsets.UTF_8);
+        } else if (size == 0) {
+            return "";
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid String bytes length " + size);
+        }
+    }
+
+    private @NonNull String readCharsString() throws IOException {
+        final int size = input.readInt();
+        if (size > 0) {
+            final char[] chars = new char[size];
+            for (int i = 0; i < size; ++i) {
+                chars[i] = input.readChar();
+            }
+            return String.valueOf(chars);
+        } else if (size == 0) {
+            return "";
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid String chars length " + size);
+        }
+    }
+
+    private @NonNull NodeIdentifier lookupNodeIdentifier(final int index) throws InvalidNormalizedNodeStreamException {
+        try {
+            return codedNodeIdentifiers.get(index);
+        } catch (IndexOutOfBoundsException e) {
+            throw new InvalidNormalizedNodeStreamException("Invalid QName reference " + index, e);
+        }
+    }
+
+    private @NonNull QName lookupQName(final int index) throws InvalidNormalizedNodeStreamException {
+        return lookupNodeIdentifier(index).getNodeType();
+    }
+
+    private @NonNull String lookupString(final int index) throws InvalidNormalizedNodeStreamException {
+        try {
+            return codedStrings.get(index);
+        } catch (IndexOutOfBoundsException e) {
+            throw new InvalidNormalizedNodeStreamException("Invalid String reference " + index, e);
+        }
+    }
+
+    private @NonNull DOMSource readDOMSource() throws IOException {
+        final String str = readString();
+        try {
+            return new DOMSource(UntrustedXML.newDocumentBuilder().parse(new InputSource(new StringReader(str)))
+                .getDocumentElement());
+        } catch (SAXException e) {
+            throw new IOException("Error parsing XML: " + str, e);
+        }
+    }
+
+    private @NonNull Object readLeafValue() throws IOException {
+        final byte type = input.readByte();
+        switch (type) {
+            case MagnesiumValue.BOOLEAN_FALSE:
+                return Boolean.FALSE;
+            case MagnesiumValue.BOOLEAN_TRUE:
+                return Boolean.TRUE;
+            case MagnesiumValue.EMPTY:
+                return Empty.getInstance();
+            case MagnesiumValue.INT8:
+                return input.readByte();
+            case MagnesiumValue.INT8_0:
+                return INT8_0;
+            case MagnesiumValue.INT16:
+                return input.readShort();
+            case MagnesiumValue.INT16_0:
+                return INT16_0;
+            case MagnesiumValue.INT32:
+                return input.readInt();
+            case MagnesiumValue.INT32_0:
+                return INT32_0;
+            case MagnesiumValue.INT32_2B:
+                return input.readShort() & 0xFFFF;
+            case MagnesiumValue.INT64:
+                return input.readLong();
+            case MagnesiumValue.INT64_0:
+                return INT64_0;
+            case MagnesiumValue.INT64_4B:
+                return input.readInt() & 0xFFFFFFFFL;
+            case MagnesiumValue.UINT8:
+                return Uint8.fromByteBits(input.readByte());
+            case MagnesiumValue.UINT8_0:
+                return UINT8_0;
+            case MagnesiumValue.UINT16:
+                return Uint16.fromShortBits(input.readShort());
+            case MagnesiumValue.UINT16_0:
+                return UINT16_0;
+            case MagnesiumValue.UINT32:
+                return Uint32.fromIntBits(input.readInt());
+            case MagnesiumValue.UINT32_0:
+                return UINT32_0;
+            case MagnesiumValue.UINT32_2B:
+                return Uint32.fromIntBits(input.readShort() & 0xFFFF);
+            case MagnesiumValue.UINT64:
+                return Uint64.fromLongBits(input.readLong());
+            case MagnesiumValue.UINT64_0:
+                return UINT64_0;
+            case MagnesiumValue.UINT64_4B:
+                return Uint64.fromLongBits(input.readInt() & 0xFFFFFFFFL);
+            case MagnesiumValue.BIGDECIMAL:
+                // FIXME: use string -> BigDecimal cache
+                return new BigDecimal(input.readUTF());
+            case MagnesiumValue.BIGINTEGER:
+                return readBigInteger();
+            case MagnesiumValue.STRING_EMPTY:
+                return "";
+            case MagnesiumValue.STRING_UTF:
+                return input.readUTF();
+            case MagnesiumValue.STRING_2B:
+                return readString2();
+            case MagnesiumValue.STRING_4B:
+                return readString4();
+            case MagnesiumValue.STRING_CHARS:
+                return readCharsString();
+            case MagnesiumValue.BINARY_0:
+                return BINARY_0;
+            case MagnesiumValue.BINARY_1B:
+                return readBinary(128 + input.readUnsignedByte());
+            case MagnesiumValue.BINARY_2B:
+                return readBinary(384 + input.readUnsignedShort());
+            case MagnesiumValue.BINARY_4B:
+                return readBinary(input.readInt());
+            case MagnesiumValue.YIID_0:
+                return YangInstanceIdentifier.empty();
+            case MagnesiumValue.YIID:
+                return readYangInstanceIdentifier(input.readInt());
+            case MagnesiumValue.QNAME:
+                return decodeQName();
+            case MagnesiumValue.QNAME_REF_1B:
+                return decodeQNameRef1();
+            case MagnesiumValue.QNAME_REF_2B:
+                return decodeQNameRef2();
+            case MagnesiumValue.QNAME_REF_4B:
+                return decodeQNameRef4();
+            case MagnesiumValue.BITS_0:
+                return ImmutableSet.of();
+            case MagnesiumValue.BITS_1B:
+                return readBits(input.readUnsignedByte() + 29);
+            case MagnesiumValue.BITS_2B:
+                return readBits(input.readUnsignedShort() + 285);
+            case MagnesiumValue.BITS_4B:
+                return readBits(input.readInt());
+
+            default:
+                if (type > MagnesiumValue.BINARY_0 && type <= MagnesiumValue.BINARY_127) {
+                    return readBinary(type - MagnesiumValue.BINARY_0);
+                } else if (type > MagnesiumValue.BITS_0 && type < MagnesiumValue.BITS_1B) {
+                    return readBits(type - MagnesiumValue.BITS_0);
+                } else if (type > MagnesiumValue.YIID_0) {
+                    // Note 'byte' is range limited, so it is always '&& type <= MagnesiumValue.YIID_31'
+                    return readYangInstanceIdentifier(type - MagnesiumValue.YIID_0);
+                } else {
+                    throw new InvalidNormalizedNodeStreamException("Invalid value type " + type);
+                }
+        }
+    }
+
+    abstract @NonNull BigInteger readBigInteger() throws IOException;
+
+    private byte @NonNull [] readBinary(final int size) throws IOException {
+        if (size > 0) {
+            final byte[] ret = new byte[size];
+            input.readFully(ret);
+            return ret;
+        } else if (size == 0) {
+            return BINARY_0;
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid binary length " + size);
+        }
+    }
+
+    private @NonNull ImmutableSet<String> readBits(final int size) throws IOException {
+        if (size > 0) {
+            final ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+            for (int i = 0; i < size; ++i) {
+                builder.add(readRefString());
+            }
+            return builder.build();
+        } else if (size == 0) {
+            return ImmutableSet.of();
+        } else {
+            throw new InvalidNormalizedNodeStreamException("Invalid bits length " + size);
+        }
+    }
+
+    private static byte mask(final byte header, final byte mask) {
+        return (byte) (header & mask);
+    }
+
+    private static int rshift(final byte header, final byte shift) {
+        return (header & 0xFF) >>> shift;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractMagnesiumDataOutput.java
new file mode 100644 (file)
index 0000000..3f28f06
--- /dev/null
@@ -0,0 +1,694 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.Set;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.rfc8528.data.api.MountPointIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.Revision;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract base class for NormalizedNodeDataOutput based on {@link MagnesiumNode}, {@link MagnesiumPathArgument} and
+ * {@link MagnesiumValue}.
+ */
+abstract class AbstractMagnesiumDataOutput extends AbstractNormalizedNodeDataOutput {
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractMagnesiumDataOutput.class);
+
+    // Marker for encoding state when we have entered startLeafNode() within a startMapEntry() and that leaf corresponds
+    // to a key carried within NodeIdentifierWithPredicates.
+    private static final Object KEY_LEAF_STATE = new Object();
+    // Marker for nodes which have simple content and do not use END_NODE marker to terminate
+    private static final Object NO_ENDNODE_STATE = new Object();
+
+    private static final TransformerFactory TF = TransformerFactory.newInstance();
+
+    /**
+     * Stack tracking encoding state. In general we track the node identifier of the currently-open element, but there
+     * are a few other circumstances where we push other objects. See {@link #KEY_LEAF_STATE} and
+     * {@link #NO_ENDNODE_STATE}.
+     */
+    private final Deque<Object> stack = new ArrayDeque<>();
+
+    // Coding maps
+    private final Map<AugmentationIdentifier, Integer> aidCodeMap = new HashMap<>();
+    private final Map<QNameModule, Integer> moduleCodeMap = new HashMap<>();
+    private final Map<String, Integer> stringCodeMap = new HashMap<>();
+    private final Map<QName, Integer> qnameCodeMap = new HashMap<>();
+
+    AbstractMagnesiumDataOutput(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    public final void startLeafNode(final NodeIdentifier name) throws IOException {
+        final Object current = stack.peek();
+        if (current instanceof NodeIdentifierWithPredicates) {
+            final QName qname = name.getNodeType();
+            if (((NodeIdentifierWithPredicates) current).containsKey(qname)) {
+                writeQNameNode(MagnesiumNode.NODE_LEAF | MagnesiumNode.PREDICATE_ONE, qname);
+                stack.push(KEY_LEAF_STATE);
+                return;
+            }
+        }
+
+        startSimpleNode(MagnesiumNode.NODE_LEAF, name);
+    }
+
+    @Override
+    public final void startLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_LEAFSET, name);
+    }
+
+    @Override
+    public final void startOrderedLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_LEAFSET_ORDERED, name);
+    }
+
+    @Override
+    public final void startLeafSetEntryNode(final NodeWithValue<?> name) throws IOException {
+        if (matchesParentQName(name.getNodeType())) {
+            output.writeByte(MagnesiumNode.NODE_LEAFSET_ENTRY);
+            stack.push(NO_ENDNODE_STATE);
+        } else {
+            startSimpleNode(MagnesiumNode.NODE_LEAFSET_ENTRY, name);
+        }
+    }
+
+    @Override
+    public final void startContainerNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_CONTAINER, name);
+    }
+
+    @Override
+    public final void startUnkeyedList(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_LIST, name);
+    }
+
+    @Override
+    public final void startUnkeyedListItem(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startInheritedNode(MagnesiumNode.NODE_LIST_ENTRY, name);
+    }
+
+    @Override
+    public final void startMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_MAP, name);
+    }
+
+    @Override
+    public final void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
+            throws IOException {
+        final int size = identifier.size();
+        if (size == 1) {
+            startInheritedNode((byte) (MagnesiumNode.NODE_MAP_ENTRY | MagnesiumNode.PREDICATE_ONE), identifier);
+        } else if (size == 0) {
+            startInheritedNode((byte) (MagnesiumNode.NODE_MAP_ENTRY | MagnesiumNode.PREDICATE_ZERO), identifier);
+        } else if (size < 256) {
+            startInheritedNode((byte) (MagnesiumNode.NODE_MAP_ENTRY | MagnesiumNode.PREDICATE_1B), identifier);
+            output.writeByte(size);
+        } else {
+            startInheritedNode((byte) (MagnesiumNode.NODE_MAP_ENTRY | MagnesiumNode.PREDICATE_4B), identifier);
+            output.writeInt(size);
+        }
+
+        writePredicates(identifier);
+    }
+
+    @Override
+    public final void startOrderedMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_MAP_ORDERED, name);
+    }
+
+    @Override
+    public final void startChoiceNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        startQNameNode(MagnesiumNode.NODE_CHOICE, name);
+    }
+
+    @Override
+    public final void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException {
+        final Integer code = aidCodeMap.get(identifier);
+        if (code == null) {
+            aidCodeMap.put(identifier, aidCodeMap.size());
+            output.writeByte(MagnesiumNode.NODE_AUGMENTATION | MagnesiumNode.ADDR_DEFINE);
+            final Set<QName> qnames = identifier.getPossibleChildNames();
+            output.writeInt(qnames.size());
+            for (QName qname : qnames) {
+                writeQNameInternal(qname);
+            }
+        } else {
+            writeNodeType(MagnesiumNode.NODE_AUGMENTATION, code);
+        }
+        stack.push(identifier);
+    }
+
+    @Override
+    public final boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        if (DOMSource.class.isAssignableFrom(objectModel)) {
+            startSimpleNode(MagnesiumNode.NODE_ANYXML, name);
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public final void domSourceValue(final DOMSource value) throws IOException {
+        final StringWriter writer = new StringWriter();
+        try {
+            TF.newTransformer().transform(value, new StreamResult(writer));
+        } catch (TransformerException e) {
+            throw new IOException("Error writing anyXml", e);
+        }
+        writeValue(writer.toString());
+    }
+
+    @Override
+    public final void startYangModeledAnyXmlNode(final NodeIdentifier name, final int childSizeHint)
+            throws IOException {
+        // FIXME: implement this
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public final void endNode() throws IOException {
+        if (stack.pop() instanceof PathArgument) {
+            output.writeByte(MagnesiumNode.NODE_END);
+        }
+    }
+
+    @Override
+    public final void scalarValue(final Object value) throws IOException {
+        if (KEY_LEAF_STATE.equals(stack.peek())) {
+            LOG.trace("Inside a map entry key leaf, not emitting value {}", value);
+        } else {
+            writeObject(value);
+        }
+    }
+
+    @Override
+    final void writeQNameInternal(final QName qname) throws IOException {
+        final Integer code = qnameCodeMap.get(qname);
+        if (code == null) {
+            output.writeByte(MagnesiumValue.QNAME);
+            encodeQName(qname);
+        } else {
+            writeQNameRef(code);
+        }
+    }
+
+    @Override
+    final void writePathArgumentInternal(final PathArgument pathArgument) throws IOException {
+        if (pathArgument instanceof NodeIdentifier) {
+            writeNodeIdentifier((NodeIdentifier) pathArgument);
+        } else if (pathArgument instanceof NodeIdentifierWithPredicates) {
+            writeNodeIdentifierWithPredicates((NodeIdentifierWithPredicates) pathArgument);
+        } else if (pathArgument instanceof AugmentationIdentifier) {
+            writeAugmentationIdentifier((AugmentationIdentifier) pathArgument);
+        } else if (pathArgument instanceof NodeWithValue) {
+            writeNodeWithValue((NodeWithValue<?>) pathArgument);
+        } else if (pathArgument instanceof MountPointIdentifier) {
+            writeMountPointIdentifier((MountPointIdentifier) pathArgument);
+        } else {
+            throw new IOException("Unhandled PathArgument " + pathArgument);
+        }
+    }
+
+    private void writeAugmentationIdentifier(final AugmentationIdentifier identifier) throws IOException {
+        final Set<QName> qnames = identifier.getPossibleChildNames();
+        final int size = qnames.size();
+        if (size < 29) {
+            output.writeByte(MagnesiumPathArgument.AUGMENTATION_IDENTIFIER
+                | size << MagnesiumPathArgument.AID_COUNT_SHIFT);
+        } else if (size < 256) {
+            output.writeByte(MagnesiumPathArgument.AUGMENTATION_IDENTIFIER | MagnesiumPathArgument.AID_COUNT_1B);
+            output.writeByte(size);
+        } else if (size < 65536) {
+            output.writeByte(MagnesiumPathArgument.AUGMENTATION_IDENTIFIER | MagnesiumPathArgument.AID_COUNT_2B);
+            output.writeShort(size);
+        } else {
+            output.writeByte(MagnesiumPathArgument.AUGMENTATION_IDENTIFIER | MagnesiumPathArgument.AID_COUNT_4B);
+            output.writeInt(size);
+        }
+
+        for (QName qname : qnames) {
+            writeQNameInternal(qname);
+        }
+    }
+
+    private void writeNodeIdentifier(final NodeIdentifier identifier) throws IOException {
+        writePathArgumentQName(identifier.getNodeType(), MagnesiumPathArgument.NODE_IDENTIFIER);
+    }
+
+    private void writeMountPointIdentifier(final MountPointIdentifier identifier) throws IOException {
+        writePathArgumentQName(identifier.getNodeType(), MagnesiumPathArgument.MOUNTPOINT_IDENTIFIER);
+    }
+
+    private void writeNodeIdentifierWithPredicates(final NodeIdentifierWithPredicates identifier) throws IOException {
+        final int size = identifier.size();
+        if (size < 5) {
+            writePathArgumentQName(identifier.getNodeType(),
+                (byte) (MagnesiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES
+                        | size << MagnesiumPathArgument.SIZE_SHIFT));
+        } else if (size < 256) {
+            writePathArgumentQName(identifier.getNodeType(),
+                (byte) (MagnesiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES | MagnesiumPathArgument.SIZE_1B));
+            output.writeByte(size);
+        } else if (size < 65536) {
+            writePathArgumentQName(identifier.getNodeType(),
+                (byte) (MagnesiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES | MagnesiumPathArgument.SIZE_2B));
+            output.writeShort(size);
+        } else {
+            writePathArgumentQName(identifier.getNodeType(),
+                (byte) (MagnesiumPathArgument.NODE_IDENTIFIER_WITH_PREDICATES | MagnesiumPathArgument.SIZE_4B));
+            output.writeInt(size);
+        }
+
+        writePredicates(identifier);
+    }
+
+    private void writePredicates(final NodeIdentifierWithPredicates identifier) throws IOException {
+        for (Entry<QName, Object> e : identifier.entrySet()) {
+            writeQNameInternal(e.getKey());
+            writeObject(e.getValue());
+        }
+    }
+
+    private void writeNodeWithValue(final NodeWithValue<?> identifier) throws IOException {
+        writePathArgumentQName(identifier.getNodeType(), MagnesiumPathArgument.NODE_WITH_VALUE);
+        writeObject(identifier.getValue());
+    }
+
+    private void writePathArgumentQName(final QName qname, final byte typeHeader) throws IOException {
+        final Integer code = qnameCodeMap.get(qname);
+        if (code != null) {
+            final int val = code;
+            if (val < 256) {
+                output.writeByte(typeHeader | MagnesiumPathArgument.QNAME_REF_1B);
+                output.writeByte(val);
+            } else if (val < 65792) {
+                output.writeByte(typeHeader | MagnesiumPathArgument.QNAME_REF_2B);
+                output.writeShort(val - 256);
+            } else {
+                output.writeByte(typeHeader | MagnesiumPathArgument.QNAME_REF_4B);
+                output.writeInt(val);
+            }
+        } else {
+            // implied '| MagnesiumPathArgument.QNAME_DEF'
+            output.writeByte(typeHeader);
+            encodeQName(qname);
+        }
+    }
+
+    @Override
+    final void writeYangInstanceIdentifierInternal(final YangInstanceIdentifier identifier) throws IOException {
+        writeValue(identifier);
+    }
+
+    private void writeObject(final @NonNull Object value) throws IOException {
+        if (value instanceof String) {
+            writeValue((String) value);
+        } else if (value instanceof Boolean) {
+            writeValue((Boolean) value);
+        } else if (value instanceof Byte) {
+            writeValue((Byte) value);
+        } else if (value instanceof Short) {
+            writeValue((Short) value);
+        } else if (value instanceof Integer) {
+            writeValue((Integer) value);
+        } else if (value instanceof Long) {
+            writeValue((Long) value);
+        } else if (value instanceof Uint8) {
+            writeValue((Uint8) value);
+        } else if (value instanceof Uint16) {
+            writeValue((Uint16) value);
+        } else if (value instanceof Uint32) {
+            writeValue((Uint32) value);
+        } else if (value instanceof Uint64) {
+            writeValue((Uint64) value);
+        } else if (value instanceof QName) {
+            writeQNameInternal((QName) value);
+        } else if (value instanceof YangInstanceIdentifier) {
+            writeValue((YangInstanceIdentifier) value);
+        } else if (value instanceof byte[]) {
+            writeValue((byte[]) value);
+        } else if (value instanceof Empty) {
+            output.writeByte(MagnesiumValue.EMPTY);
+        } else if (value instanceof Set) {
+            writeValue((Set<?>) value);
+        } else if (value instanceof BigDecimal) {
+            writeValue((BigDecimal) value);
+        } else if (value instanceof BigInteger) {
+            writeValue((BigInteger) value);
+        } else {
+            throw new IOException("Unhandled value type " + value.getClass());
+        }
+    }
+
+    private void writeValue(final boolean value) throws IOException {
+        output.writeByte(value ? MagnesiumValue.BOOLEAN_TRUE : MagnesiumValue.BOOLEAN_FALSE);
+    }
+
+    private void writeValue(final byte value) throws IOException {
+        if (value != 0) {
+            output.writeByte(MagnesiumValue.INT8);
+            output.writeByte(value);
+        } else {
+            output.writeByte(MagnesiumValue.INT8_0);
+        }
+    }
+
+    private void writeValue(final short value) throws IOException {
+        if (value != 0) {
+            output.writeByte(MagnesiumValue.INT16);
+            output.writeShort(value);
+        } else {
+            output.writeByte(MagnesiumValue.INT16_0);
+        }
+    }
+
+    private void writeValue(final int value) throws IOException {
+        if ((value & 0xFFFF0000) != 0) {
+            output.writeByte(MagnesiumValue.INT32);
+            output.writeInt(value);
+        } else if (value != 0) {
+            output.writeByte(MagnesiumValue.INT32_2B);
+            output.writeShort(value);
+        } else {
+            output.writeByte(MagnesiumValue.INT32_0);
+        }
+    }
+
+    private void writeValue(final long value) throws IOException {
+        if ((value & 0xFFFFFFFF00000000L) != 0) {
+            output.writeByte(MagnesiumValue.INT64);
+            output.writeLong(value);
+        } else if (value != 0) {
+            output.writeByte(MagnesiumValue.INT64_4B);
+            output.writeInt((int) value);
+        } else {
+            output.writeByte(MagnesiumValue.INT64_0);
+        }
+    }
+
+    private void writeValue(final Uint8 value) throws IOException {
+        final byte b = value.byteValue();
+        if (b != 0) {
+            output.writeByte(MagnesiumValue.UINT8);
+            output.writeByte(b);
+        } else {
+            output.writeByte(MagnesiumValue.UINT8_0);
+        }
+    }
+
+    private void writeValue(final Uint16 value) throws IOException {
+        final short s = value.shortValue();
+        if (s != 0) {
+            output.writeByte(MagnesiumValue.UINT16);
+            output.writeShort(s);
+        } else {
+            output.writeByte(MagnesiumValue.UINT16_0);
+        }
+    }
+
+    private void writeValue(final Uint32 value) throws IOException {
+        final int i = value.intValue();
+        if ((i & 0xFFFF0000) != 0) {
+            output.writeByte(MagnesiumValue.UINT32);
+            output.writeInt(i);
+        } else if (i != 0) {
+            output.writeByte(MagnesiumValue.UINT32_2B);
+            output.writeShort(i);
+        } else {
+            output.writeByte(MagnesiumValue.UINT32_0);
+        }
+    }
+
+    private void writeValue(final Uint64 value) throws IOException {
+        final long l = value.longValue();
+        if ((l & 0xFFFFFFFF00000000L) != 0) {
+            output.writeByte(MagnesiumValue.UINT64);
+            output.writeLong(l);
+        } else if (l != 0) {
+            output.writeByte(MagnesiumValue.UINT64_4B);
+            output.writeInt((int) l);
+        } else {
+            output.writeByte(MagnesiumValue.UINT64_0);
+        }
+    }
+
+    private void writeValue(final BigDecimal value) throws IOException {
+        output.writeByte(MagnesiumValue.BIGDECIMAL);
+        output.writeUTF(value.toString());
+    }
+
+    abstract void writeValue(BigInteger value) throws IOException;
+
+    private void writeValue(final String value) throws IOException {
+        if (value.isEmpty()) {
+            output.writeByte(MagnesiumValue.STRING_EMPTY);
+        } else if (value.length() <= Short.MAX_VALUE / 2) {
+            output.writeByte(MagnesiumValue.STRING_UTF);
+            output.writeUTF(value);
+        } else if (value.length() <= 1048576) {
+            final byte[] bytes = value.getBytes(StandardCharsets.UTF_8);
+            if (bytes.length < 65536) {
+                output.writeByte(MagnesiumValue.STRING_2B);
+                output.writeShort(bytes.length);
+            } else {
+                output.writeByte(MagnesiumValue.STRING_4B);
+                output.writeInt(bytes.length);
+            }
+            output.write(bytes);
+        } else {
+            output.writeByte(MagnesiumValue.STRING_CHARS);
+            output.writeInt(value.length());
+            output.writeChars(value);
+        }
+    }
+
+    private void writeValue(final byte[] value) throws IOException {
+        if (value.length < 128) {
+            output.writeByte(MagnesiumValue.BINARY_0 + value.length);
+        } else if (value.length < 384) {
+            output.writeByte(MagnesiumValue.BINARY_1B);
+            output.writeByte(value.length - 128);
+        } else if (value.length < 65920) {
+            output.writeByte(MagnesiumValue.BINARY_2B);
+            output.writeShort(value.length - 384);
+        } else {
+            output.writeByte(MagnesiumValue.BINARY_4B);
+            output.writeInt(value.length);
+        }
+        output.write(value);
+    }
+
+    private void writeValue(final YangInstanceIdentifier value) throws IOException {
+        final List<PathArgument> args = value.getPathArguments();
+        final int size = args.size();
+        if (size > 31) {
+            output.writeByte(MagnesiumValue.YIID);
+            output.writeInt(size);
+        } else {
+            output.writeByte(MagnesiumValue.YIID_0 + size);
+        }
+        for (PathArgument arg : args) {
+            writePathArgumentInternal(arg);
+        }
+    }
+
+    private void writeValue(final Set<?> value) throws IOException {
+        final int size = value.size();
+        if (size < 29) {
+            output.writeByte(MagnesiumValue.BITS_0 + size);
+        } else if (size < 285) {
+            output.writeByte(MagnesiumValue.BITS_1B);
+            output.writeByte(size - 29);
+        } else if (size < 65821) {
+            output.writeByte(MagnesiumValue.BITS_2B);
+            output.writeShort(size - 285);
+        } else {
+            output.writeByte(MagnesiumValue.BITS_4B);
+            output.writeInt(size);
+        }
+
+        for (Object bit : value) {
+            checkArgument(bit instanceof String, "Expected value type to be String but was %s", bit);
+            encodeString((String) bit);
+        }
+    }
+
+    // Check if the proposed QName matches the parent. This is only effective if the parent is identified by
+    // NodeIdentifier -- which is typically true
+    private boolean matchesParentQName(final QName qname) {
+        final Object current = stack.peek();
+        return current instanceof NodeIdentifier && qname.equals(((NodeIdentifier) current).getNodeType());
+    }
+
+    // Start an END_NODE-terminated node, which typically has a QName matching the parent. If that is the case we emit
+    // a parent reference instead of an explicit QName reference -- saving at least one byte
+    private void startInheritedNode(final byte type, final PathArgument name) throws IOException {
+        final QName qname = name.getNodeType();
+        if (matchesParentQName(qname)) {
+            output.write(type);
+        } else {
+            writeQNameNode(type, qname);
+        }
+        stack.push(name);
+    }
+
+    // Start an END_NODE-terminated node, which needs its QName encoded
+    private void startQNameNode(final byte type, final PathArgument name) throws IOException {
+        writeQNameNode(type, name.getNodeType());
+        stack.push(name);
+    }
+
+    // Start a simple node, which is not terminated through END_NODE and encode its QName
+    private void startSimpleNode(final byte type, final PathArgument name) throws IOException {
+        writeQNameNode(type, name.getNodeType());
+        stack.push(NO_ENDNODE_STATE);
+    }
+
+    // Encode a QName-based (i.e. NodeIdentifier*) node with a particular QName. This will either result in a QName
+    // definition, or a reference, where this is encoded along with the node type.
+    private void writeQNameNode(final int type, final @NonNull QName qname) throws IOException {
+        final Integer code = qnameCodeMap.get(qname);
+        if (code == null) {
+            output.writeByte(type | MagnesiumNode.ADDR_DEFINE);
+            encodeQName(qname);
+        } else {
+            writeNodeType(type, code);
+        }
+    }
+
+    // Write a node type + lookup
+    private void writeNodeType(final int type, final int code) throws IOException {
+        if (code <= 255) {
+            output.writeByte(type | MagnesiumNode.ADDR_LOOKUP_1B);
+            output.writeByte(code);
+        } else {
+            output.writeByte(type | MagnesiumNode.ADDR_LOOKUP_4B);
+            output.writeInt(code);
+        }
+    }
+
+    // Encode a QName using lookup tables, resuling either in a reference to an existing entry, or emitting two
+    // String values.
+    private void encodeQName(final @NonNull QName qname) throws IOException {
+        final Integer prev = qnameCodeMap.put(qname, qnameCodeMap.size());
+        if (prev != null) {
+            throw new IOException("Internal coding error: attempted to re-encode " + qname + "%s already encoded as "
+                    + prev);
+        }
+
+        final QNameModule module = qname.getModule();
+        final Integer code = moduleCodeMap.get(module);
+        if (code == null) {
+            moduleCodeMap.put(module, moduleCodeMap.size());
+            encodeString(module.getNamespace().toString());
+            final Optional<Revision> rev = module.getRevision();
+            if (rev.isPresent()) {
+                encodeString(rev.get().toString());
+            } else {
+                output.writeByte(MagnesiumValue.STRING_EMPTY);
+            }
+        } else {
+            writeModuleRef(code);
+        }
+        encodeString(qname.getLocalName());
+    }
+
+    // Encode a String using lookup tables, resulting either in a reference to an existing entry, or emitting as
+    // a literal value
+    private void encodeString(final @NonNull String str) throws IOException {
+        final Integer code = stringCodeMap.get(str);
+        if (code != null) {
+            writeRef(code);
+        } else {
+            stringCodeMap.put(str, stringCodeMap.size());
+            writeValue(str);
+        }
+    }
+
+    // Write a QName with a lookup table reference. This is a combination of asserting the value is a QName plus
+    // the effects of writeRef()
+    private void writeQNameRef(final int code) throws IOException {
+        final int val = code;
+        if (val < 256) {
+            output.writeByte(MagnesiumValue.QNAME_REF_1B);
+            output.writeByte(val);
+        } else if (val < 65792) {
+            output.writeByte(MagnesiumValue.QNAME_REF_2B);
+            output.writeShort(val - 256);
+        } else {
+            output.writeByte(MagnesiumValue.QNAME_REF_4B);
+            output.writeInt(val);
+        }
+    }
+
+    // Write a lookup table reference, which table is being referenced is implied by the caller
+    private void writeRef(final int code) throws IOException {
+        final int val = code;
+        if (val < 256) {
+            output.writeByte(MagnesiumValue.STRING_REF_1B);
+            output.writeByte(val);
+        } else if (val < 65792) {
+            output.writeByte(MagnesiumValue.STRING_REF_2B);
+            output.writeShort(val - 256);
+        } else {
+            output.writeByte(MagnesiumValue.STRING_REF_4B);
+            output.writeInt(val);
+        }
+    }
+
+    // Write a lookup module table reference, which table is being referenced is implied by the caller
+    private void writeModuleRef(final int code) throws IOException {
+        final int val = code;
+        if (val < 256) {
+            output.writeByte(MagnesiumValue.MODREF_1B);
+            output.writeByte(val);
+        } else if (val < 65792) {
+            output.writeByte(MagnesiumValue.MODREF_2B);
+            output.writeShort(val - 256);
+        } else {
+            output.writeByte(MagnesiumValue.MODREF_4B);
+            output.writeInt(val);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataInput.java
new file mode 100644 (file)
index 0000000..f6ab760
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import java.io.DataInput;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+abstract class AbstractNormalizedNodeDataInput extends ForwardingDataInput implements NormalizedNodeDataInput {
+    // Visible for subclasses
+    final @NonNull DataInput input;
+
+    AbstractNormalizedNodeDataInput(final DataInput input) {
+        this.input = requireNonNull(input);
+    }
+
+    @Override
+    final DataInput delegate() {
+        return input;
+    }
+
+    @Override
+    public final SchemaPath readSchemaPath() throws IOException {
+        final boolean absolute = input.readBoolean();
+        final int size = input.readInt();
+
+        final Builder<QName> qnames = ImmutableList.builderWithExpectedSize(size);
+        for (int i = 0; i < size; ++i) {
+            qnames.add(readQName());
+        }
+        return SchemaPath.create(qnames.build(), absolute);
+    }
+
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java
new file mode 100755 (executable)
index 0000000..d8e021f
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Abstract base class for implementing {@link NormalizedNodeDataOutput} contract. This class uses
+ * {@link NormalizedNodeStreamWriter} as an internal interface for performing the actual NormalizedNode writeout,
+ * i.e. it will defer to a {@link NormalizedNodeWriter} instance.
+ *
+ * <p>
+ * As such, this is an implementation detail not exposed from this package, hence implementations can rely on the
+ * stream being initialized with a header and version.
+ */
+abstract class AbstractNormalizedNodeDataOutput implements NormalizedNodeDataOutput, NormalizedNodeStreamWriter {
+    // Visible for subclasses
+    final DataOutput output;
+
+    private NormalizedNodeWriter normalizedNodeWriter;
+    private boolean headerWritten;
+
+    AbstractNormalizedNodeDataOutput(final DataOutput output) {
+        this.output = requireNonNull(output);
+    }
+
+
+    private void ensureHeaderWritten() throws IOException {
+        if (!headerWritten) {
+            output.writeByte(TokenTypes.SIGNATURE_MARKER);
+            output.writeShort(streamVersion());
+            headerWritten = true;
+        }
+    }
+
+    @Override
+    public final void write(final int value) throws IOException {
+        ensureHeaderWritten();
+        output.write(value);
+    }
+
+    @Override
+    public final void write(final byte[] bytes) throws IOException {
+        ensureHeaderWritten();
+        output.write(bytes);
+    }
+
+    @Override
+    public final void write(final byte[] bytes, final int off, final int len) throws IOException {
+        ensureHeaderWritten();
+        output.write(bytes, off, len);
+    }
+
+    @Override
+    public final void writeBoolean(final boolean value) throws IOException {
+        ensureHeaderWritten();
+        output.writeBoolean(value);
+    }
+
+    @Override
+    public final void writeByte(final int value) throws IOException {
+        ensureHeaderWritten();
+        output.writeByte(value);
+    }
+
+    @Override
+    public final void writeShort(final int value) throws IOException {
+        ensureHeaderWritten();
+        output.writeShort(value);
+    }
+
+    @Override
+    public final void writeChar(final int value) throws IOException {
+        ensureHeaderWritten();
+        output.writeChar(value);
+    }
+
+    @Override
+    public final void writeInt(final int value) throws IOException {
+        ensureHeaderWritten();
+        output.writeInt(value);
+    }
+
+    @Override
+    public final void writeLong(final long value) throws IOException {
+        ensureHeaderWritten();
+        output.writeLong(value);
+    }
+
+    @Override
+    public final void writeFloat(final float value) throws IOException {
+        ensureHeaderWritten();
+        output.writeFloat(value);
+    }
+
+    @Override
+    public final void writeDouble(final double value) throws IOException {
+        ensureHeaderWritten();
+        output.writeDouble(value);
+    }
+
+    @Override
+    public final void writeBytes(final String str) throws IOException {
+        ensureHeaderWritten();
+        output.writeBytes(str);
+    }
+
+    @Override
+    public final void writeChars(final String str) throws IOException {
+        ensureHeaderWritten();
+        output.writeChars(str);
+    }
+
+    @Override
+    public final void writeUTF(final String str) throws IOException {
+        ensureHeaderWritten();
+        output.writeUTF(str);
+    }
+
+    @Override
+    public final void writeQName(final QName qname) throws IOException {
+        ensureHeaderWritten();
+        writeQNameInternal(qname);
+    }
+
+    @Override
+    public final void writeNormalizedNode(final NormalizedNode<?, ?> node) throws IOException {
+        ensureHeaderWritten();
+        if (normalizedNodeWriter == null) {
+            normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(this);
+        }
+        normalizedNodeWriter.write(node);
+    }
+
+    @Override
+    public final void writePathArgument(final PathArgument pathArgument) throws IOException {
+        ensureHeaderWritten();
+        writePathArgumentInternal(pathArgument);
+    }
+
+    @Override
+    public final void writeYangInstanceIdentifier(final YangInstanceIdentifier identifier) throws IOException {
+        ensureHeaderWritten();
+        writeYangInstanceIdentifierInternal(identifier);
+    }
+
+    @Override
+    public final void writeSchemaPath(final SchemaPath path) throws IOException {
+        ensureHeaderWritten();
+
+        output.writeBoolean(path.isAbsolute());
+        final List<QName> qnames = path.getPath();
+        output.writeInt(qnames.size());
+        for (QName qname : qnames) {
+            writeQNameInternal(qname);
+        }
+    }
+
+    @Override
+    public final void close() throws IOException {
+        flush();
+    }
+
+    @Override
+    public void flush() throws IOException {
+        if (output instanceof OutputStream) {
+            ((OutputStream)output).flush();
+        }
+    }
+
+    @Override
+    public final boolean startAnydataNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        // FIXME: We do not support anydata nodes of any kind, yet
+        return false;
+    }
+
+    abstract short streamVersion();
+
+    abstract void writeQNameInternal(@NonNull QName qname) throws IOException;
+
+    abstract void writePathArgumentInternal(PathArgument pathArgument) throws IOException;
+
+    abstract void writeYangInstanceIdentifierInternal(YangInstanceIdentifier identifier) throws IOException;
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingDataInput.java
new file mode 100644 (file)
index 0000000..0ec7fbe
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataInput;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+
+// Not a ForwardingObject because delegate() can legally throw and we do not want redirect toString()
+abstract class ForwardingDataInput implements DataInput {
+
+    abstract @NonNull DataInput delegate() throws IOException;
+
+    @Override
+    @SuppressWarnings("checkstyle:parameterName")
+    public final void readFully(final byte[] b) throws IOException {
+        delegate().readFully(b);
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:parameterName")
+    public final void readFully(final byte[] b, final int off, final int len) throws IOException {
+        delegate().readFully(b, off, len);
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:parameterName")
+    public final int skipBytes(final int n) throws IOException {
+        return delegate().skipBytes(n);
+    }
+
+    @Override
+    public final boolean readBoolean() throws IOException {
+        return delegate().readBoolean();
+    }
+
+    @Override
+    public final byte readByte() throws IOException {
+        return delegate().readByte();
+    }
+
+    @Override
+    public final int readUnsignedByte() throws IOException {
+        return delegate().readUnsignedByte();
+    }
+
+    @Override
+    public final short readShort() throws IOException {
+        return delegate().readShort();
+    }
+
+    @Override
+    public final int readUnsignedShort() throws IOException {
+        return delegate().readUnsignedShort();
+    }
+
+    @Override
+    public final char readChar() throws IOException {
+        return delegate().readChar();
+    }
+
+    @Override
+    public final int readInt() throws IOException {
+        return delegate().readInt();
+    }
+
+    @Override
+    public final long readLong() throws IOException {
+        return delegate().readLong();
+    }
+
+    @Override
+    public final float readFloat() throws IOException {
+        return delegate().readFloat();
+    }
+
+    @Override
+    public final double readDouble() throws IOException {
+        return delegate().readDouble();
+    }
+
+    @Override
+    public final String readLine() throws IOException {
+        return delegate().readLine();
+    }
+
+    @Override
+    public final String readUTF() throws IOException {
+        return delegate().readUTF();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingNormalizedNodeDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ForwardingNormalizedNodeDataInput.java
new file mode 100644 (file)
index 0000000..72073c6
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+abstract class ForwardingNormalizedNodeDataInput extends ForwardingDataInput implements NormalizedNodeDataInput {
+
+    @Override
+    abstract @NonNull NormalizedNodeDataInput delegate() throws IOException;
+
+    @Override
+    public final void streamNormalizedNode(final NormalizedNodeStreamWriter writer) throws IOException {
+        delegate().streamNormalizedNode(writer);
+    }
+
+    @Override
+    public final NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+        return delegate().readNormalizedNode();
+    }
+
+    @Override
+    public final NormalizedNode<?, ?> readNormalizedNode(final ReusableStreamReceiver receiver) throws IOException {
+        return delegate().readNormalizedNode(receiver);
+    }
+
+    @Override
+    public final YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
+        return delegate().readYangInstanceIdentifier();
+    }
+
+    @Override
+    public final QName readQName() throws IOException {
+        return delegate().readQName();
+    }
+
+    @Override
+    public final PathArgument readPathArgument() throws IOException {
+        return delegate().readPathArgument();
+    }
+
+    @Override
+    public final SchemaPath readSchemaPath() throws IOException {
+        return delegate().readSchemaPath();
+    }
+
+    @Override
+    public final NormalizedNodeStreamVersion getVersion() throws IOException {
+        return delegate().getVersion();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java
new file mode 100644 (file)
index 0000000..2099e3d
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown from NormalizedNodeInputStreamReader when the input stream does not contain
+ * valid serialized data.
+ *
+ * @author Thomas Pantelis
+ */
+public class InvalidNormalizedNodeStreamException extends IOException {
+    private static final long serialVersionUID = 1L;
+
+    public InvalidNormalizedNodeStreamException(final String message) {
+        super(message);
+    }
+
+    public InvalidNormalizedNodeStreamException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNode.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNode.java
new file mode 100644 (file)
index 0000000..fbc2fe8
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+/**
+ * Stream constants identifying individual node types.
+ */
+final class LithiumNode {
+    static final byte LEAF_NODE = 1;
+    static final byte LEAF_SET = 2;
+    static final byte LEAF_SET_ENTRY_NODE = 3;
+    static final byte CONTAINER_NODE = 4;
+    static final byte UNKEYED_LIST = 5;
+    static final byte UNKEYED_LIST_ITEM = 6;
+    static final byte MAP_NODE = 7;
+    static final byte MAP_ENTRY_NODE = 8;
+    static final byte ORDERED_MAP_NODE = 9;
+    static final byte CHOICE_NODE = 10;
+    static final byte AUGMENTATION_NODE = 11;
+    static final byte ANY_XML_NODE = 12;
+    static final byte END_NODE = 13;
+    static final byte ORDERED_LEAF_SET = 14;
+    static final byte YANG_MODELED_ANY_XML_NODE = 15;
+
+    private LithiumNode() {
+        throw new UnsupportedOperationException("utility class");
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeInputStreamReader.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeInputStreamReader.java
new file mode 100755 (executable)
index 0000000..871f371
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Strings;
+import java.io.DataInput;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+
+/**
+ * Lithium (or Oxygen really) specialization of AbstractLithiumDataInput.
+ */
+final class LithiumNormalizedNodeInputStreamReader extends AbstractLithiumDataInput {
+    LithiumNormalizedNodeInputStreamReader(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public NormalizedNodeStreamVersion getVersion() {
+        return NormalizedNodeStreamVersion.LITHIUM;
+    }
+
+    @Override
+    public QName readQName() throws IOException {
+        // Read in the same sequence of writing
+        String localName = readCodedString();
+        String namespace = readCodedString();
+        String revision = Strings.emptyToNull(readCodedString());
+
+        return QNameFactory.create(localName, namespace, revision);
+    }
+
+    @Override
+    AugmentationIdentifier readAugmentationIdentifier() throws IOException {
+        return defaultReadAugmentationIdentifier();
+    }
+
+    @Override
+    NodeIdentifier readNodeIdentifier() throws IOException {
+        return new NodeIdentifier(readQName());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeOutputStreamWriter.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumNormalizedNodeOutputStreamWriter.java
new file mode 100644 (file)
index 0000000..b14bc19
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+
+/**
+ * NormalizedNodeOutputStreamWriter will be used by distributed datastore to send normalized node in
+ * a stream.
+ * A stream writer wrapper around this class will write node objects to stream in recursive manner.
+ * for example - If you have a ContainerNode which has a two LeafNode as children, then
+ * you will first call
+ * {@link #startContainerNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, int)},
+ * then will call
+ * {@link #leafNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, Object)} twice
+ * and then, {@link #endNode()} to end container node.
+ *
+ * <p>Based on the each node, the node type is also written to the stream, that helps in reconstructing the object,
+ * while reading.
+ */
+final class LithiumNormalizedNodeOutputStreamWriter extends AbstractLithiumDataOutput {
+    LithiumNormalizedNodeOutputStreamWriter(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    short streamVersion() {
+        return TokenTypes.LITHIUM_VERSION;
+    }
+
+    @Override
+    void writeQNameInternal(final QName qname) throws IOException {
+        defaultWriteQName(qname);
+    }
+
+    @Override
+    void writeModule(final QNameModule module) throws IOException {
+        defaultWriteModule(module);
+    }
+
+    @Override
+    void writeAugmentationIdentifier(final AugmentationIdentifier aid) throws IOException {
+        defaultWriteAugmentationIdentifier(aid);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumPathArgument.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumPathArgument.java
new file mode 100644 (file)
index 0000000..660e0a7
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+
+final class LithiumPathArgument {
+    static final byte AUGMENTATION_IDENTIFIER = 1;
+    static final byte NODE_IDENTIFIER = 2;
+    static final byte NODE_IDENTIFIER_WITH_VALUE = 3;
+    static final byte NODE_IDENTIFIER_WITH_PREDICATES = 4;
+
+    private LithiumPathArgument() {
+        throw new UnsupportedOperationException("Utility class");
+    }
+
+    static byte getSerializablePathArgumentType(final PathArgument pathArgument) {
+        if (pathArgument instanceof NodeIdentifier) {
+            return NODE_IDENTIFIER;
+        } else if (pathArgument instanceof NodeIdentifierWithPredicates) {
+            return NODE_IDENTIFIER_WITH_PREDICATES;
+        } else if (pathArgument instanceof AugmentationIdentifier) {
+            return AUGMENTATION_IDENTIFIER;
+        } else if (pathArgument instanceof NodeWithValue) {
+            return NODE_IDENTIFIER_WITH_VALUE;
+        } else {
+            throw new IllegalArgumentException("Unknown type of PathArgument = " + pathArgument);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumTokens.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumTokens.java
new file mode 100644 (file)
index 0000000..27241e9
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+/**
+ * Tokens related to Lithium/NeonSR2 encoding.
+ */
+final class LithiumTokens {
+    /**
+     * The value is a reference to a previously-defined entity, typically through {@link #IS_STRING_VALUE}.
+     */
+    static final byte IS_CODE_VALUE = 1;
+    /**
+     * The value is a String, which needs to be kept memoized for the purposes for being referenced by
+     * {@link #IS_CODE_VALUE}.
+     */
+    static final byte IS_STRING_VALUE = 2;
+    /**
+     * The value is an explicit null.
+     */
+    static final byte IS_NULL_VALUE = 3;
+
+    private LithiumTokens() {
+
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumValue.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/LithiumValue.java
new file mode 100644 (file)
index 0000000..cd118ea
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+final class LithiumValue {
+    // The String length threshold beyond which a String should be encoded as bytes
+    static final int STRING_BYTES_LENGTH_THRESHOLD = Short.MAX_VALUE / 4;
+
+    static final byte SHORT_TYPE = 1;
+    static final byte BYTE_TYPE = 2;
+    static final byte INT_TYPE = 3;
+    static final byte LONG_TYPE = 4;
+    static final byte BOOL_TYPE = 5;
+    static final byte QNAME_TYPE = 6;
+    static final byte BITS_TYPE = 7;
+    static final byte YANG_IDENTIFIER_TYPE = 8;
+    static final byte STRING_TYPE = 9;
+    static final byte BIG_INTEGER_TYPE = 10;
+    static final byte BIG_DECIMAL_TYPE = 11;
+    static final byte BINARY_TYPE = 12;
+    // Leaf nodes no longer allow null values. The "empty" type is now represented as
+    // org.opendaylight.yangtools.yang.common.Empty. This is kept for backwards compatibility.
+    @Deprecated
+    static final byte NULL_TYPE = 13;
+    static final byte STRING_BYTES_TYPE = 14;
+    static final byte EMPTY_TYPE = 15;
+
+    private LithiumValue() {
+        throw new UnsupportedOperationException("Utility class");
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataInput.java
new file mode 100644 (file)
index 0000000..74c8a69
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.math.BigInteger;
+
+final class MagnesiumDataInput extends AbstractMagnesiumDataInput {
+    MagnesiumDataInput(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public NormalizedNodeStreamVersion getVersion() {
+        return NormalizedNodeStreamVersion.MAGNESIUM;
+    }
+
+    @Override
+    BigInteger readBigInteger() throws IOException {
+        throw new InvalidNormalizedNodeStreamException("BigInteger coding is not supported");
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumDataOutput.java
new file mode 100644 (file)
index 0000000..a438268
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.math.BigInteger;
+
+final class MagnesiumDataOutput extends AbstractMagnesiumDataOutput {
+    MagnesiumDataOutput(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    short streamVersion() {
+        return TokenTypes.MAGNESIUM_VERSION;
+    }
+
+    @Override
+    void writeValue(final BigInteger value) throws IOException {
+        throw new IOException("BigInteger values are not supported");
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumNode.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumNode.java
new file mode 100644 (file)
index 0000000..30a35ab
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+/**
+ * Magnesium encoding Node types. Encoded as a single byte, split as follows:
+ * <pre>
+ *   7 6 5 4 3 2 1 0
+ *  +-+-+-+-+-+-+-+-+
+ *  | P | A |  Type |
+ *  +-+-+-+-+-+-+-+-+
+ * </pre>
+ * The fields being:
+ * <ul>
+ *   <li>Bits 7 and 6 (most significant): predicate presence. Only valid for NODE_MAP_ENTRY and NODE_LEAF</li>
+ *   <li>Bits 5 and 4: addressing mode</li>
+ *   <li>Bits 3-0 (least significant) node type</li>
+ * </ul>
+ */
+// TODO: restructure this into some concrete examples
+//- a leaf referencing a previously-encoded NodeIdentifier would take
+//6 bytes:
+//  (byte)    NodeTypes.LEAF_NODE
+//  (byte)    TokenTypes.IS_QNAME_CODE
+//  (int)     code value
+//where as new tokens can do that in as few as 2 bytes:
+//  (byte)    NodeType.(NODE_LEAF | ADDR_LOOKUP_1B)
+//  (byte)    code value
+//with worst-case being 5 bytes:
+//  (byte)    NodeType.(NODE_LEAF | ADDR_LOOKUP_4B)
+//  (int)     code value
+//- a map entry node referencing previously-encoded QNames and a single
+//predicate would take a base of 15 bytes (not counting value object):
+//  (byte)    NodeTypes.MAP_ENTRY_NODE
+//  (byte)    TokenTypes.IS_QNAME_CODE
+//  (int)     code value
+//  (int)     size of predicates
+//  (byte)    TokenTypes.IS_QNAME_CODE
+//  (int)     code value
+//whereas new tokens can do that in as few as 3 bytes:
+//  (byte)    NodeType.(NODE_MAP_ENTRY | ADDR_LOOKUP_1B | PREDICATE_ONE)
+//  (byte)    code value
+//  (byte)    code value
+//this ability is maintained for up to 255 predicates with:
+//  (byte)    NodeType.(NODE_MAP_ENTRY | ADDR_LOOKUP_1B | PREDICATE_1B)
+//  (byte)    code value
+//  (byte)    size of predicates
+//  (byte)    code value [0-255]
+//- a leaf representing a key inside a map entry has the ability to skip
+//value encoding by being as simple as:
+//  (byte)    NodeTYpe.(NODE_LEAF | ADDR_LOOKUP_1B | PREDICATE_ONE)
+//  (byte)    code value
+//
+final class MagnesiumNode {
+    /**
+     * End of node marker. Does not support addressing modes.
+     */
+    static final byte NODE_END             = 0x00; // N/A
+    /**
+     * A leaf node. Encoding can specify {@link #PREDICATE_ONE}, which indicates the value is skipped as the encoder
+     * has emitted a parent MapNode, whose identifier contains the value.
+     */
+    static final byte NODE_LEAF            = 0x01;
+    static final byte NODE_CONTAINER       = 0x02;
+    static final byte NODE_LIST            = 0x03;
+    static final byte NODE_MAP             = 0x04;
+    static final byte NODE_MAP_ORDERED     = 0x05;
+    static final byte NODE_LEAFSET         = 0x06;
+    static final byte NODE_LEAFSET_ORDERED = 0x07;
+    static final byte NODE_CHOICE          = 0x08;
+    static final byte NODE_AUGMENTATION    = 0x09;
+    static final byte NODE_ANYXML          = 0x0A;
+    static final byte NODE_LIST_ENTRY      = 0x0B;
+    static final byte NODE_LEAFSET_ENTRY   = 0x0C;
+    static final byte NODE_MAP_ENTRY       = 0x0D;
+
+    // TODO: either implement or remove this coding. While Lithium has emit code, it lacks the code do read such nodes,
+    //       which most probably means we do not need to bother ...
+    static final byte NODE_ANYXML_MODELED  = 0x0E;
+    // 0x0F reserved for anydata
+    static final byte TYPE_MASK            = 0x0F;
+
+
+    /**
+     * Inherit identifier from parent. This addressing mode is applicable in:
+     * <ul>
+     *   <li>{@link #NODE_END}, where an identifier is not applicable
+     *   <li>{@link #NODE_LIST_ENTRY}, where the NodeIdentifier is inherited from parent {@link #NODE_LIST}</li>
+     *   <li>{@link #NODE_MAP_ENTRY}, where the NodeIdentifier is inherited from parent {@link #NODE_MAP} or
+     *       {@link #NODE_MAP_ORDERED}</li>
+     *   <li>{@link #NODE_LEAFSET_ENTRY}, where the QName inherited from parent and the value is inferred from the
+     *       next {@link MagnesiumValue} encoded</li>
+     * </ul>
+     */
+    static final byte ADDR_PARENT     = 0x00;
+    /**
+     * Define a new QName-based identifier constant. For {@link #NODE_AUGMENTATION} this is a set of QNames. Assign
+     * a new linear key to this constant.
+     */
+    static final byte ADDR_DEFINE     = 0x10;
+    /**
+     * Reference a previously {@link #ADDR_DEFINE}d identifier constant. This node byte is followed by an unsigned
+     * byte, which holds the linear key previously defined (i.e. 0-255).
+     */
+    static final byte ADDR_LOOKUP_1B  = 0x20;
+    /**
+     * Reference a previously {@link #ADDR_DEFINE}d identifier constant. This node byte is followed by a signed int,
+     * which holds the linear key previously defined.
+     */
+    static final byte ADDR_LOOKUP_4B  = 0x30;
+    static final byte ADDR_MASK       = ADDR_LOOKUP_4B;
+
+    /**
+     * Predicate encoding: no predicates are present in a {@link #NODE_MAP_ENTRY}.
+     */
+    static final byte PREDICATE_ZERO = 0x00;
+
+    /**
+     * Predicate encoding: a single predicate is present in a {@link #NODE_MAP_ENTRY}. In case of {@link #NODE_LEAF}
+     * encoded as part of a {@link #NODE_MAP_ENTRY} this bit indicates the <strong>value</strong> is not encoded and
+     * should be looked up from the map entry's predicates.
+     *
+     * <p>
+     * The predicate is encoded as a {@link #ADDR_DEFINE} or {@link #ADDR_LOOKUP_1B}/{@link #ADDR_LOOKUP_4B},
+     * followed by an encoded {@link MagnesiumValue}.
+     */
+    static final byte PREDICATE_ONE   = 0x40;
+
+    /**
+     * Predicate encoding: 0-255 predicates are present, as specified by the following {@code unsigned byte}. This
+     * encoding is expected to be exceedingly rare. This should not be used to encode 0 or 1 predicate, those cases
+     * should be encoded as:
+     * <ul>
+     *   <li>no PREDICATE_* set when there are no predicates (probably not valid anyway)</li>
+     *   <li><{@link #PREDICATE_ONE} if there is only one predicate</li>
+     * </ul>
+     */
+    static final byte PREDICATE_1B    = (byte) 0x80;
+
+    /**
+     * Predicate encoding 0 - {@link Integer#MAX_VALUE} predicates are present, as specified by the following
+     * {@code int}. This should not be used where 0-255 predicates are present.
+     */
+    static final byte PREDICATE_4B    = (byte) (PREDICATE_ONE | PREDICATE_1B);
+    static final byte PREDICATE_MASK  = PREDICATE_4B;
+
+    private MagnesiumNode() {
+
+    }
+}
\ No newline at end of file
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumPathArgument.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumPathArgument.java
new file mode 100644 (file)
index 0000000..e1022e0
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+/**
+ * Path Argument types used in Magnesium encoding. These are encoded as a single byte, three bits of which are reserved
+ * for PathArgument type itself:
+ * <pre>
+ *   7 6 5 4 3 2 1 0
+ *  +-+-+-+-+-+-+-+-+
+ *  |         | Type|
+ *  +-+-+-+-+-+-+-+-+
+ * </pre>
+ * There are five type defined:
+ * <ul>
+ *   <li>{@link #AUGMENTATION_IDENTIFIER}, which additionally holds the number of QName elements encoded:
+ *     <pre>
+ *        7 6 5 4 3 2 1 0
+ *       +-+-+-+-+-+-+-+-+
+ *       |  Count  |0 0 0|
+ *       +-+-+-+-+-+-+-+-+
+ *     </pre>
+ *     Where count is coded as an unsigned integer, with {@link #AID_COUNT_1B} and {@link #AID_COUNT_2B} and
+ *     {@link #AID_COUNT_4B} indicating extended coding with up to 4 additional bytes. This byte is followed by
+ *     {@code count} {@link MagnesiumValue} QNames.
+ *     <pre>
+ *       7 6 5 4 3 2 1 0
+ *      +-+-+-+-+-+-+-+-+
+ *      |0 0 0| Q |0 0 1|
+ *      +-+-+-+-+-+-+-+-+
+ *     </pre>
+ *     Where QName coding is achieved via {@link #QNAME_DEF}, {@link #QNAME_REF_1B}, {@link #QNAME_REF_2B} and
+ *     {@link #QNAME_REF_4B}.
+ *   </li>
+ *   <li>{@link #NODE_IDENTIFIER_WITH_PREDICATES}, which encodes a QName same way NodeIdentifier does:
+ *     <pre>
+ *       7 6 5 4 3 2 1 0
+ *      +-+-+-+-+-+-+-+-+
+ *      | Size| Q |0 1 0|
+ *      +-+-+-+-+-+-+-+-+
+ *      </pre>
+ *      but additionally encodes number of predicates contained using {@link #SIZE_0} through {@link #SIZE_4}. If that
+ *      number cannot be expressed, {@link #SIZE_1B}, {@value #SIZE_2B} and {@link #SIZE_4B} indicate number and format
+ *      of additional bytes that hold number of predicates.
+ *
+ *      <p>
+ *      This is then followed by the specified number of QName/Object key/value pairs based on {@link MagnesiumValue}
+ *      encoding.
+ *   </li>
+ *   <li>{@link #NODE_WITH_VALUE}, which encodes a QName same way NodeIdentifier does:
+ *     <pre>
+ *       7 6 5 4 3 2 1 0
+ *      +-+-+-+-+-+-+-+-+
+ *      |0 0 0| Q |0 1 1|
+ *      +-+-+-+-+-+-+-+-+
+ *     </pre>
+ *     but is additionally followed by a single encoded value, as per {@link MagnesiumValue}.
+ *   </li>
+ *   <li>{@link #MOUNTPOINT_IDENTIFIER}, which encodes a QName same way NodeIdentifier does:
+ *     <pre>
+ *       7 6 5 4 3 2 1 0
+ *      +-+-+-+-+-+-+-+-+
+ *      |0 0 0| Q |1 0 0|
+ *      +-+-+-+-+-+-+-+-+
+ *     </pre>
+ *   </li>
+ * </ul>
+ */
+final class MagnesiumPathArgument {
+    // 3 bits reserved for type...
+    static final byte AUGMENTATION_IDENTIFIER         = 0x00;
+    static final byte NODE_IDENTIFIER                 = 0x01;
+    static final byte NODE_IDENTIFIER_WITH_PREDICATES = 0x02;
+    static final byte NODE_WITH_VALUE                 = 0x03;
+    static final byte MOUNTPOINT_IDENTIFIER           = 0x04;
+
+    // ... leaving three values currently unused
+    // 0x05 reserved
+    // 0x06 reserved
+    // 0x07 reserved
+
+    static final byte TYPE_MASK                       = 0x07;
+
+    // In case of AUGMENTATION_IDENTIFIER, top 5 bits are used to encode the number of path arguments, except last three
+    // values. This means that up to AugmentationIdentifiers with up to 28 components have this length encoded inline,
+    // otherwise we encode them in following 1 (unsigned), 2 (unsigned) or 4 (signed) bytes
+    static final byte AID_COUNT_1B                    = (byte) 0xE8;
+    static final byte AID_COUNT_2B                    = (byte) 0xF0;
+    static final byte AID_COUNT_4B                    = (byte) 0xF8;
+    static final byte AID_COUNT_MASK                  = AID_COUNT_4B;
+    static final byte AID_COUNT_SHIFT                 = 3;
+
+    // For normal path path arguments we can either define a QName reference or follow a 1-4 byte reference.
+    static final byte QNAME_DEF                       = 0x00;
+    static final byte QNAME_REF_1B                    = 0x08; // Unsigned
+    static final byte QNAME_REF_2B                    = 0x10; // Unsigned
+    static final byte QNAME_REF_4B                    = 0x18; // Signed
+    static final byte QNAME_MASK                      = QNAME_REF_4B;
+
+    // For NodeIdentifierWithPredicates we also carry the number of subsequent path arguments. The case of 0-4 arguments
+    // is indicated directly, otherwise there is 1-4 bytes carrying the reference.
+    static final byte SIZE_0                          = 0x00;
+    static final byte SIZE_1                          = 0x20;
+    static final byte SIZE_2                          = 0x40;
+    static final byte SIZE_3                          = 0x60;
+    static final byte SIZE_4                          = (byte) 0x80;
+    static final byte SIZE_1B                         = (byte) 0xA0;
+    static final byte SIZE_2B                         = (byte) 0xC0;
+    static final byte SIZE_4B                         = (byte) 0xE0;
+    static final byte SIZE_MASK                       = SIZE_4B;
+    static final byte SIZE_SHIFT                      = 5;
+
+    private MagnesiumPathArgument() {
+
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumValue.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/MagnesiumValue.java
new file mode 100644 (file)
index 0000000..403578d
--- /dev/null
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataOutput;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Magnesium encoding value types. Serialized as a single byte.
+ */
+/*
+ * Note these constants are organized by their absolute value, which is slightly counter-intuitive when trying to make
+ * sense of what is going on.
+ *
+ * TODO: create some sort of facility which would provide symbolic names for debugging and documentation purposes.
+ */
+final class MagnesiumValue {
+    /**
+     * {@link Boolean#FALSE} value.
+     */
+    static final byte BOOLEAN_FALSE  = 0x00;
+    /**
+     * {@link Boolean#TRUE} value.
+     */
+    static final byte BOOLEAN_TRUE   = 0x01;
+    /**
+     * An {@link Empty} value.
+     */
+    static final byte EMPTY          = 0x02;
+    /**
+     * A Byte, followed by a byte holding the value.
+     */
+    static final byte INT8           = 0x03;
+    /**
+     * A Short, followed by a {@code short} holding the value.
+     */
+    static final byte INT16          = 0x04;
+    /**
+     * An Integer, followed by an {@code int} holding the value.
+     */
+    static final byte INT32          = 0x05;
+    /**
+     * A Long, followed by an {@code long} holding the value.
+     */
+    static final byte INT64          = 0x06;
+    /**
+     * A Uint8, followed by an {@code unsigned byte} holding the value.
+     */
+    static final byte UINT8          = 0x07;
+    /**
+     * A Uint16, followed by a {@code unsigned short} holding the value.
+     */
+    static final byte UINT16         = 0x08;
+    /**
+     * A Uint32, followed by an {@code unsigned int} holding the value.
+     */
+    static final byte UINT32         = 0x09;
+    /**
+     * A Uint64, followed by an {@code unsigned long} holding the value.
+     */
+    static final byte UINT64         = 0x0A;
+    /**
+     * A {@link String}, encoded through {@link DataOutput#writeUTF(String)}. Note this is generally true of any
+     * string with less then 16384 characters.
+     */
+    static final byte STRING_UTF     = 0x0B;
+    /**
+     * A {@link String}, encoded as an {@code unsigned short} followed by that many UTF8-encoded bytes.
+     */
+    static final byte STRING_2B      = 0x0C;
+    /**
+     * A {@link String}, encoded as an {@code int >= 0} followed by that many UTF8-encoded bytes.
+     */
+    static final byte STRING_4B      = 0x0D;
+    /**
+     * A {@link String}, encoded as an {@code int >= 0} followed by that many UTF16 characters, i.e. as produced by
+     * {@link DataOutput#writeChars(String)}.
+     */
+    static final byte STRING_CHARS   = 0x0E;
+    /**
+     * Utility 'reference coding' codepoint with {@code unsigned byte} offset. This is not a value type, but is used in
+     * context of various schema-related encodings like constant strings, QNameModule and similar.
+     */
+    static final byte STRING_REF_1B  = 0x0F;
+    /**
+     * Utility 'reference coding' codepoint with {@code unsigned short} offset. This is not a value type, but is used in
+     * context of various schema-related encodings like constant strings, QNameModule and similar.
+     */
+    static final byte STRING_REF_2B  = 0x10;
+    /**
+     * Utility 'reference coding' codepoint with {@code int} offset. This is not a value type, but is used in context of
+     * various schema-related encodings like constant strings, QNameModule and similar.
+     */
+    static final byte STRING_REF_4B  = 0x11;
+    /**
+     * A {@code byte[])}, encoded as a single {@code unsigned byte} followed by 128-383 bytes. Note that smaller
+     * arrays are encoded via {@link #BINARY_0} - {@link #BINARY_127} range.
+     */
+    static final byte BINARY_1B      = 0x12;
+    /**
+     * A {@code byte[])}, encoded as a single {@code unsigned short} followed by 384-65919 bytes. See also
+     * {@link #BINARY_1B}.
+     */
+    static final byte BINARY_2B      = 0x13;
+    /**
+     * A {@code byte[])}, encoded as a single {@code int} followed by that many bytes bytes. See also
+     * {@link #BINARY_2B}.
+     */
+    static final byte BINARY_4B      = 0x14;
+    /**
+     * A {@link YangInstanceIdentifier}, encoded as a single {@code int}, followed by that many components. See
+     * also {@link #YIID_0}, which offers optimized encoding for up to 31 components. Components are encoded using
+     * {@link MagnesiumPathArgument} coding.
+     */
+    static final byte YIID           = 0x15;
+    /**
+     * A QName literal. Encoded as QNameModule + String. This literal is expected to be memoized on receiver side, which
+     * assigns the next linear integer identifier. The sender will memoize it too and further references to this QName
+     * will be made via {@link #QNAME_REF_1B}, {@link #QNAME_REF_2B} or {@link #QNAME_REF_4B}.
+     *
+     * <p>
+     * Note that QNameModule (and String in this context) encoding works similarly -- it can only occur as part of a
+     * QName (coming from here or {@link MagnesiumPathArgument}) and is subject to the same memoization.
+     *
+     * <p>
+     * For example, given two QNames {@code foo = QName.create("foo", "abc")} and
+     * {@code bar = QName.create("foo", "def")}, if they are written in order {@code foo, bar, foo}, then the following
+     * events are emitted:
+     * <pre>
+     *   QNAME                (define QName, assign shorthand Q0)
+     *   STRING_UTF   "foo"   ("foo", assign shorthand S0, implies define QNameModule, assign shorthand M0)
+     *   STRING_EMPTY         (foo's non-existent revision)
+     *   STRING_UTF   "abc"   ("abc", assign shorthand S1)
+     *   QNAME                (define QName, assign shorthand Q1)
+     *   MODREF_1B    (byte)0 (reference M0)
+     *   STRING_UTF   "def"   ("def", assign shorthand S2)
+     *   QNAME_REF_1B (byte)0 (reference Q0)
+     * </pre>
+     */
+    // Design note: STRING_EMPTY is required to *NOT* establish a shortcut, as that is less efficient (and hence does
+    //              not make sense from the sender, the receiver or the serialization protocol itself.
+    static final byte QNAME          = 0x16;
+    /**
+     * Reference a QName previously defined via {@link #QNAME}. Reference number is encoded as {@code unsigned byte}.
+     */
+    static final byte QNAME_REF_1B   = 0x17;
+    /**
+     * Reference a QName previously defined via {@link #QNAME}. Reference number is encoded as {@code unsigned short}.
+     */
+    static final byte QNAME_REF_2B   = 0x18;
+    /**
+     * Reference a QName previously defined via {@link #QNAME}. Reference number is encoded as {@code int}.
+     */
+    static final byte QNAME_REF_4B   = 0x19;
+    /**
+     * Reference a previously defined QNameModule. Reference number is encoded as {@code unsigned byte}.
+     */
+    static final byte MODREF_1B      = 0x1A;
+    /**
+     * Reference a previously defined QNameModule. Reference number is encoded as {@code unsigned short}.
+     */
+    static final byte MODREF_2B      = 0x1B;
+    /**
+     * Reference a previously defined QNameModule. Reference number is encoded as {@code int}.
+     */
+    static final byte MODREF_4B      = 0x1C;
+
+    /**
+     * A {@link BigDecimal}, encoded through {@link DataOutput#writeUTF(String)}.
+     */
+    // This is legacy compatibility. At some point we will remove support for writing these.
+    static final byte BIGDECIMAL     = 0x1D;
+    /**
+     * A {@link BigInteger}, encoded through {@link DataOutput#writeUTF(String)}.
+     */
+    // This is legacy compatibility. At some point we will remove support for writing these.
+    static final byte BIGINTEGER     = 0x1E;
+
+    // 0x1F reserved
+
+    /**
+     * Byte value {@code 0}.
+     */
+    static final byte INT8_0         = 0x20;
+    /**
+     * Short value {@code 0}.
+     */
+    static final byte INT16_0        = 0x21;
+    /**
+     * Integer value {@code 0}.
+     */
+    static final byte INT32_0        = 0x22;
+    /**
+     * Long value {@code 0}.
+     */
+    static final byte INT64_0        = 0x23;
+    /**
+     * {@link Uint8#ZERO} value.
+     */
+    static final byte UINT8_0        = 0x24;
+    /**
+     * {@link Uint16#ZERO} value.
+     */
+    static final byte UINT16_0       = 0x25;
+    /**
+     * {@link Uint32#ZERO} value.
+     */
+    static final byte UINT32_0       = 0x26;
+    /**
+     * {@link Uint64#ZERO} value.
+     */
+    static final byte UINT64_0       = 0x27;
+    /**
+     * Empty String value ({@code ""}).
+     */
+    static final byte STRING_EMPTY   = 0x28;
+    /**
+     * {@link #INT32} with a 2-byte operand.
+     */
+    static final byte INT32_2B       = 0x29;
+    /**
+     * {@link #UINT32} with a 2-byte operand.
+     */
+    static final byte UINT32_2B      = 0x2A;
+    /**
+     * {@link #INT64} with a 4-byte operand.
+     */
+    static final byte INT64_4B       = 0x2B;
+    /**
+     * {@link #UINT64} with a 4-byte operand.
+     */
+    static final byte UINT64_4B      = 0x2C;
+
+    // 0x2D - 0x39 reserved
+
+    /**
+     * Empty bits value. This code point starts the range, where the number of bits can be extracted as
+     * {@code code & 0x1F)}. Last three values of this range are used to encode more than 28 entries.
+     */
+    static final byte BITS_0         = 0x40;
+    /**
+     * A bits value of up to 255 entries. Number of values is encoded as the following {@code unsigned byte}.
+     */
+    static final byte BITS_1B        = 0x5D;
+    /**
+     * A bits value of up to 65535 entries. Number of values is encoded as the following {@code unsigned short}.
+     */
+    static final byte BITS_2B        = 0x5E;
+    /**
+     * A bits value. Number of values is encoded as the following {@code int}.
+     */
+    static final byte BITS_4B        = 0x5F;
+
+    /**
+     * {@link YangInstanceIdentifier} with zero components. This code point starts the range ending with
+     * {@link #YIID_31}, where the number of components can be extracted as {@code code & 0x1F}. Identifiers with
+     * more than 31 components are encoded using {@link #YIID}.
+     */
+    static final byte YIID_0         = 0x60;
+    /**
+     * {@link YangInstanceIdentifier} with 31 components. See {@link #YIID_0}.
+     */
+    static final byte YIID_31        = 0x7F;
+
+    /**
+     * A {@code byte[]} with 0 bytes. This code point starts the range ending with {@link #BINARY_127}, where
+     * the number of bytes can be extracted as {@code code & 0x7F}. Arrays longer than 127 bytes are encoded using
+     * {@link #BINARY_1B}, {@link #BINARY_2B} and {@link #BINARY_4B} as needed.
+     */
+    static final byte BINARY_0       = (byte) 0x80;
+    /**
+     * A {@code byte[]} with 127 bytes. See {@link #BINARY_0}.
+     */
+    static final byte BINARY_127     = (byte) 0xFF;
+
+    private MagnesiumValue() {
+
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeInputStreamReader.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeInputStreamReader.java
new file mode 100644 (file)
index 0000000..80ec1b0
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static com.google.common.base.Verify.verify;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+
+/**
+ * Neon SR2 specialization of AbstractLithiumDataInput. Unlike its Lithium counterpart, this format uses coding for
+ * QNameModules, QNames, NodeIdentifiers and AugmentationIdentifiers, thus reducing stream duplication.
+ */
+final class NeonSR2NormalizedNodeInputStreamReader extends AbstractLithiumDataInput {
+    private final ArrayList<NodeIdentifier> codedNodeIdentifiers = new ArrayList<>();
+    private final List<AugmentationIdentifier> codedAugments = new ArrayList<>();
+    private final List<QNameModule> codedModules = new ArrayList<>();
+    private final List<QName> codedQNames = new ArrayList<>();
+
+    NeonSR2NormalizedNodeInputStreamReader(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public NormalizedNodeStreamVersion getVersion() {
+        return NormalizedNodeStreamVersion.NEON_SR2;
+    }
+
+    @Override
+    public QName readQName() throws IOException {
+        final byte valueType = readByte();
+        switch (valueType) {
+            case NeonSR2Tokens.IS_QNAME_CODE:
+                return codedQName(readInt());
+            case NeonSR2Tokens.IS_QNAME_VALUE:
+                return rawQName();
+            default:
+                throw new IOException("Unhandled QName value type " + valueType);
+        }
+    }
+
+    @Override
+    AugmentationIdentifier readAugmentationIdentifier() throws IOException {
+        final byte valueType = readByte();
+        switch (valueType) {
+            case NeonSR2Tokens.IS_AUGMENT_CODE:
+                return codedAugmentId(readInt());
+            case NeonSR2Tokens.IS_AUGMENT_VALUE:
+                return rawAugmentId();
+            default:
+                throw new IOException("Unhandled QName value type " + valueType);
+        }
+    }
+
+    @Override
+    NodeIdentifier readNodeIdentifier() throws IOException {
+        // NodeIdentifier rides on top of QName, with this method really saying 'interpret next QName as NodeIdentifier'
+        // to do that we inter-mingle with readQName()
+        final byte valueType = readByte();
+        switch (valueType) {
+            case NeonSR2Tokens.IS_QNAME_CODE:
+                return codedNodeIdentifier(readInt());
+            case NeonSR2Tokens.IS_QNAME_VALUE:
+                return rawNodeIdentifier();
+            default:
+                throw new IOException("Unhandled QName value type " + valueType);
+        }
+    }
+
+    private QNameModule readModule() throws IOException {
+        final byte valueType = readByte();
+        switch (valueType) {
+            case NeonSR2Tokens.IS_MODULE_CODE:
+                return codedModule(readInt());
+            case NeonSR2Tokens.IS_MODULE_VALUE:
+                return rawModule();
+            default:
+                throw new IOException("Unhandled QName value type " + valueType);
+        }
+    }
+
+    private NodeIdentifier codedNodeIdentifier(final int code) throws IOException {
+        final NodeIdentifier existing = codedNodeIdentifiers.size() > code ? codedNodeIdentifiers.get(code) : null;
+        return existing != null ? existing : storeNodeIdentifier(code, codedQName(code));
+    }
+
+    private NodeIdentifier rawNodeIdentifier() throws IOException {
+        // Capture size before it incremented
+        final int code = codedQNames.size();
+        return storeNodeIdentifier(code, rawQName());
+    }
+
+    private NodeIdentifier storeNodeIdentifier(final int code, final QName qname) {
+        final NodeIdentifier ret = NodeIdentifier.create(qname);
+        final int size = codedNodeIdentifiers.size();
+
+        if (code >= size) {
+            // Null-fill others
+            codedNodeIdentifiers.ensureCapacity(code + 1);
+            for (int i = size; i < code; ++i) {
+                codedNodeIdentifiers.add(null);
+            }
+
+            codedNodeIdentifiers.add(ret);
+        } else {
+            final NodeIdentifier check = codedNodeIdentifiers.set(code, ret);
+            verify(check == null);
+        }
+
+        return ret;
+    }
+
+    private QName codedQName(final int code) throws IOException {
+        try {
+            return codedQNames.get(code);
+        } catch (IndexOutOfBoundsException e) {
+            throw new IOException("QName code " + code + " was not found", e);
+        }
+    }
+
+    private QName rawQName() throws IOException {
+        final String localName = readCodedString();
+        final QNameModule module = readModule();
+        final QName qname = QNameFactory.create(module, localName);
+        codedQNames.add(qname);
+        return qname;
+    }
+
+    private AugmentationIdentifier codedAugmentId(final int code) throws IOException {
+        try {
+            return codedAugments.get(code);
+        } catch (IndexOutOfBoundsException e) {
+            throw new IOException("QName set code " + code + " was not found", e);
+        }
+    }
+
+    private AugmentationIdentifier rawAugmentId() throws IOException {
+        final AugmentationIdentifier aid = defaultReadAugmentationIdentifier();
+        codedAugments.add(aid);
+        return aid;
+    }
+
+    private QNameModule codedModule(final int code) throws IOException {
+        try {
+            return codedModules.get(code);
+        } catch (IndexOutOfBoundsException e) {
+            throw new IOException("Module code " + code + " was not found", e);
+        }
+    }
+
+    private QNameModule rawModule() throws IOException {
+        final String namespace = readCodedString();
+        final String revision = readCodedString();
+        final QNameModule mod = QNameFactory.createModule(namespace, revision);
+        codedModules.add(mod);
+        return mod;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeOutputStreamWriter.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2NormalizedNodeOutputStreamWriter.java
new file mode 100644 (file)
index 0000000..010b220
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+
+/**
+ * NormalizedNodeOutputStreamWriter will be used by distributed datastore to send normalized node in
+ * a stream.
+ * A stream writer wrapper around this class will write node objects to stream in recursive manner.
+ * for example - If you have a ContainerNode which has a two LeafNode as children, then
+ * you will first call
+ * {@link #startContainerNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, int)},
+ * then will call
+ * {@link #leafNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, Object)} twice
+ * and then, {@link #endNode()} to end container node.
+ *
+ * <p>Based on the each node, the node type is also written to the stream, that helps in reconstructing the object,
+ * while reading.
+ */
+final class NeonSR2NormalizedNodeOutputStreamWriter extends AbstractLithiumDataOutput {
+    private final Map<AugmentationIdentifier, Integer> aidCodeMap = new HashMap<>();
+    private final Map<QNameModule, Integer> moduleCodeMap = new HashMap<>();
+    private final Map<QName, Integer> qnameCodeMap = new HashMap<>();
+
+    NeonSR2NormalizedNodeOutputStreamWriter(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    short streamVersion() {
+        return TokenTypes.NEON_SR2_VERSION;
+    }
+
+    @Override
+    void writeQNameInternal(final QName qname) throws IOException {
+        final Integer value = qnameCodeMap.get(qname);
+        if (value == null) {
+            // Fresh QName, remember it and emit as three strings
+            qnameCodeMap.put(qname, qnameCodeMap.size());
+            writeByte(NeonSR2Tokens.IS_QNAME_VALUE);
+            defaultWriteQName(qname);
+        } else {
+            // We have already seen this QName: write its code
+            writeByte(NeonSR2Tokens.IS_QNAME_CODE);
+            writeInt(value);
+        }
+    }
+
+    @Override
+    void writeAugmentationIdentifier(final AugmentationIdentifier aid) throws IOException {
+        final Integer value = aidCodeMap.get(aid);
+        if (value == null) {
+            // Fresh AugmentationIdentifier, remember it and emit as three strings
+            aidCodeMap.put(aid, aidCodeMap.size());
+            writeByte(NeonSR2Tokens.IS_AUGMENT_VALUE);
+            defaultWriteAugmentationIdentifier(aid);
+        } else {
+            // We have already seen this AugmentationIdentifier: write its code
+            writeByte(NeonSR2Tokens.IS_AUGMENT_CODE);
+            writeInt(value);
+        }
+    }
+
+    @Override
+    void writeModule(final QNameModule module) throws IOException {
+        final Integer value = moduleCodeMap.get(module);
+        if (value == null) {
+            // Fresh QNameModule, remember it and emit as three strings
+            moduleCodeMap.put(module, moduleCodeMap.size());
+            writeByte(NeonSR2Tokens.IS_MODULE_VALUE);
+            defaultWriteModule(module);
+        } else {
+            // We have already seen this QNameModule: write its code
+            writeByte(NeonSR2Tokens.IS_MODULE_CODE);
+            writeInt(value);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2Tokens.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NeonSR2Tokens.java
new file mode 100644 (file)
index 0000000..9126747
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+/**
+ * Tokens used in Neon SR2 encoding. Note that Neon SR2 builds on top of Lithium, hence the token values must never
+ * overlap.
+ */
+final class NeonSR2Tokens {
+    static final byte IS_QNAME_CODE = 4;
+    static final byte IS_QNAME_VALUE = 5;
+    static final byte IS_AUGMENT_CODE = 6;
+    static final byte IS_AUGMENT_VALUE = 7;
+    static final byte IS_MODULE_CODE = 8;
+    static final byte IS_MODULE_VALUE = 9;
+
+    private NeonSR2Tokens() {
+
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java
new file mode 100644 (file)
index 0000000..8d05061
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Interface for reading {@link NormalizedNode}s, {@link YangInstanceIdentifier}s, {@link PathArgument}s
+ * and {@link SchemaPath}s.
+ */
+@Beta
+public interface NormalizedNodeDataInput extends DataInput {
+    /**
+     * Interpret current stream position as a NormalizedNode, stream its events into a NormalizedNodeStreamWriter.
+     *
+     * @param writer Writer to emit events to
+     * @throws IOException if an error occurs
+     * @throws IllegalStateException if the dictionary has been detached
+     * @throws NullPointerException if {@code writer} is null
+     */
+    void streamNormalizedNode(NormalizedNodeStreamWriter writer) throws IOException;
+
+    /**
+     * Read a normalized node from the reader.
+     *
+     * @return Next node from the stream, or null if end of stream has been reached.
+     * @throws IOException if an error occurs
+     * @throws IllegalStateException if the dictionary has been detached
+     */
+    default NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+        return readNormalizedNode(ReusableImmutableNormalizedNodeStreamWriter.create());
+    }
+
+    /**
+     * Read a normalized node from the reader, using specified writer to construct the result.
+     *
+     * @param receiver Reusable receiver to, expected to be reset
+     * @return Next node from the stream, or null if end of stream has been reached.
+     * @throws IOException if an error occurs
+     * @throws IllegalStateException if the dictionary has been detached
+     */
+    default NormalizedNode<?, ?> readNormalizedNode(final ReusableStreamReceiver receiver) throws IOException {
+        try {
+            streamNormalizedNode(receiver);
+            return receiver.getResult();
+        } finally {
+            receiver.reset();
+        }
+    }
+
+    YangInstanceIdentifier readYangInstanceIdentifier() throws IOException;
+
+    @NonNull QName readQName() throws IOException;
+
+    PathArgument readPathArgument() throws IOException;
+
+    SchemaPath readSchemaPath() throws IOException;
+
+    /**
+     * Return the version of the underlying input stream.
+     *
+     * @return Stream version
+     * @throws IOException if the version cannot be ascertained
+     */
+    NormalizedNodeStreamVersion getVersion() throws IOException;
+
+    default Optional<NormalizedNode<?, ?>> readOptionalNormalizedNode() throws IOException {
+        return readBoolean() ? Optional.of(readNormalizedNode()) : Optional.empty();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java
new file mode 100644 (file)
index 0000000..681553e
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.annotations.Beta;
+import java.io.DataOutput;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+
+/**
+ * Interface for emitting {@link NormalizedNode}s, {@link YangInstanceIdentifier}s, {@link PathArgument}s
+ * and {@link SchemaPath}s.
+ */
+@Beta
+public interface NormalizedNodeDataOutput extends AutoCloseable, DataOutput {
+    void writeQName(@NonNull QName qname) throws IOException;
+
+    void writeNormalizedNode(@NonNull NormalizedNode<?, ?> normalizedNode) throws IOException;
+
+    void writePathArgument(PathArgument pathArgument) throws IOException;
+
+    void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException;
+
+    void writeSchemaPath(SchemaPath path) throws IOException;
+
+    @Override
+    void close() throws IOException;
+
+    default void writeOptionalNormalizedNode(final @Nullable NormalizedNode<?, ?> normalizedNode) throws IOException {
+        if (normalizedNode != null) {
+            writeBoolean(true);
+            writeNormalizedNode(normalizedNode);
+        } else {
+            writeBoolean(false);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java
new file mode 100644 (file)
index 0000000..2a9d1f7
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+
+@Beta
+public final class NormalizedNodeInputOutput {
+    private NormalizedNodeInputOutput() {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * Creates a new {@link NormalizedNodeDataInput} instance that reads from the given input. This method first reads
+     * and validates that the input contains a valid NormalizedNode stream.
+     *
+     * @param input the DataInput to read from
+     * @return a new {@link NormalizedNodeDataInput} instance
+     * @throws IOException if an error occurs reading from the input
+     */
+    public static NormalizedNodeDataInput newDataInput(final @NonNull DataInput input) throws IOException {
+        return new VersionedNormalizedNodeDataInput(input).delegate();
+    }
+
+    /**
+     * Creates a new {@link NormalizedNodeDataInput} instance that reads from the given input. This method does not
+     * perform any initial validation of the input stream.
+     *
+     * @param input the DataInput to read from
+     * @return a new {@link NormalizedNodeDataInput} instance
+     */
+    public static NormalizedNodeDataInput newDataInputWithoutValidation(final @NonNull DataInput input) {
+        return new VersionedNormalizedNodeDataInput(input);
+    }
+
+    /**
+     * Creates a new {@link NormalizedNodeDataOutput} instance that writes to the given output and latest current
+     * stream version.
+     *
+     * @param output the DataOutput to write to
+     * @return a new {@link NormalizedNodeDataOutput} instance
+     */
+    public static NormalizedNodeDataOutput newDataOutput(final @NonNull DataOutput output) {
+        return new SodiumSR1DataOutput(output);
+    }
+
+    /**
+     * Creates a new {@link NormalizedNodeDataOutput} instance that writes to the given output.
+     *
+     * @param output the DataOutput to write to
+     * @param version Streaming version to use
+     * @return a new {@link NormalizedNodeDataOutput} instance
+     */
+    public static NormalizedNodeDataOutput newDataOutput(final @NonNull DataOutput output,
+            final @NonNull NormalizedNodeStreamVersion version) {
+        switch (version) {
+            case LITHIUM:
+                return new LithiumNormalizedNodeOutputStreamWriter(output);
+            case NEON_SR2:
+                return new NeonSR2NormalizedNodeOutputStreamWriter(output);
+            case SODIUM_SR1:
+                return new SodiumSR1DataOutput(output);
+            case MAGNESIUM:
+                return new MagnesiumDataOutput(output);
+            default:
+                throw new IllegalStateException("Unhandled version " + version);
+        }
+    }
+
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamVersion.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamVersion.java
new file mode 100644 (file)
index 0000000..e8adc25
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.annotations.Beta;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * Enumeration of all stream versions this implementation supports on both input and output.
+ */
+@Beta
+@NonNullByDefault
+public enum NormalizedNodeStreamVersion {
+    LITHIUM,
+    NEON_SR2,
+    SODIUM_SR1,
+    MAGNESIUM;
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java
new file mode 100644 (file)
index 0000000..6735cdb
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * Provides various utility methods for serialization and de-serialization.
+ *
+ * @author Thomas Pantelis
+ */
+public final class SerializationUtils {
+    private SerializationUtils() {
+
+    }
+
+    @FunctionalInterface
+    public interface Applier<T> {
+        void apply(T instance, YangInstanceIdentifier path, NormalizedNode<?, ?> node);
+    }
+
+    public static Optional<NormalizedNode<?, ?>> readNormalizedNode(final DataInput in) throws IOException {
+        if (!in.readBoolean()) {
+            return Optional.empty();
+        }
+        return Optional.of(NormalizedNodeInputOutput.newDataInput(in).readNormalizedNode());
+    }
+
+    public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode<?, ?> node)
+            throws IOException {
+        if (node != null) {
+            out.writeBoolean(true);
+
+            try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out)) {
+                stream.writeNormalizedNode(node);
+            }
+        } else {
+            out.writeBoolean(false);
+        }
+    }
+
+    public static void writeNormalizedNode(final DataOutput out, final NormalizedNodeStreamVersion version,
+            final @Nullable NormalizedNode<?, ?> node) throws IOException {
+        if (node != null) {
+            out.writeBoolean(true);
+
+            try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out, version)) {
+                stream.writeNormalizedNode(node);
+            }
+        } else {
+            out.writeBoolean(false);
+        }
+    }
+
+    public static YangInstanceIdentifier readPath(final DataInput in) throws IOException {
+        return NormalizedNodeInputOutput.newDataInput(in).readYangInstanceIdentifier();
+    }
+
+    public static void writePath(final DataOutput out, final @NonNull YangInstanceIdentifier path)
+            throws IOException {
+        try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out)) {
+            stream.writeYangInstanceIdentifier(path);
+        }
+    }
+
+    public static void writePath(final DataOutput out, final NormalizedNodeStreamVersion version,
+            final @NonNull YangInstanceIdentifier path) throws IOException {
+        try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out, version)) {
+            stream.writeYangInstanceIdentifier(path);
+        }
+    }
+
+    public static <T> void readNodeAndPath(final DataInput in, final T instance, final Applier<T> applier)
+            throws IOException {
+        final NormalizedNodeDataInput stream = NormalizedNodeInputOutput.newDataInput(in);
+        NormalizedNode<?, ?> node = stream.readNormalizedNode();
+        YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
+        applier.apply(instance, path, node);
+    }
+
+    public static void writeNodeAndPath(final DataOutput out, final YangInstanceIdentifier path,
+            final NormalizedNode<?, ?> node) throws IOException {
+        try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out)) {
+            stream.writeNormalizedNode(node);
+            stream.writeYangInstanceIdentifier(path);
+        }
+    }
+
+    public static <T> void readPathAndNode(final DataInput in, final T instance, final Applier<T> applier)
+            throws IOException {
+        final NormalizedNodeDataInput stream = NormalizedNodeInputOutput.newDataInput(in);
+        YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
+        NormalizedNode<?, ?> node = stream.readNormalizedNode();
+        applier.apply(instance, path, node);
+    }
+
+    public static void writePathAndNode(final DataOutput out, final YangInstanceIdentifier path,
+            final NormalizedNode<?, ?> node) throws IOException {
+        try (NormalizedNodeDataOutput stream = NormalizedNodeInputOutput.newDataOutput(out)) {
+            stream.writeYangInstanceIdentifier(path);
+            stream.writeNormalizedNode(node);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataInput.java
new file mode 100644 (file)
index 0000000..d866411
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataInput;
+import java.io.IOException;
+import java.math.BigInteger;
+
+final class SodiumSR1DataInput extends AbstractMagnesiumDataInput {
+    SodiumSR1DataInput(final DataInput input) {
+        super(input);
+    }
+
+    @Override
+    public NormalizedNodeStreamVersion getVersion() {
+        return NormalizedNodeStreamVersion.SODIUM_SR1;
+    }
+
+    @Override
+    BigInteger readBigInteger() throws IOException {
+        // FIXME: use string -> BigInteger cache
+        return new BigInteger(input.readUTF());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataOutput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SodiumSR1DataOutput.java
new file mode 100644 (file)
index 0000000..4c432d3
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.math.BigInteger;
+
+final class SodiumSR1DataOutput extends AbstractMagnesiumDataOutput {
+    SodiumSR1DataOutput(final DataOutput output) {
+        super(output);
+    }
+
+    @Override
+    short streamVersion() {
+        return TokenTypes.SODIUM_SR1_VERSION;
+    }
+
+    @Override
+    void writeValue(final BigInteger value) throws IOException {
+        output.writeByte(MagnesiumValue.BIGINTEGER);
+        output.writeUTF(value.toString());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java
new file mode 100644 (file)
index 0000000..e09c3e9
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+final class TokenTypes {
+    private TokenTypes() {
+        throw new UnsupportedOperationException();
+    }
+
+    static final byte SIGNATURE_MARKER = (byte) 0xab;
+
+    /**
+     * Original stream version. Uses a per-stream dictionary for strings. QNames are serialized as three strings.
+     */
+    static final short LITHIUM_VERSION = 1;
+    /**
+     * Revised stream version. Unlike {@link #LITHIUM_VERSION}, QNames and QNameModules are using a per-stream
+     * dictionary, too.
+     */
+    static final short NEON_SR2_VERSION = 2;
+    /**
+     * From-scratch designed version shipping in Sodium SR1.
+     */
+    static final short SODIUM_SR1_VERSION = 3;
+    /**
+     * Magnesium version. Structurally matches {@link #SODIUM_SR1_VERSION}, but does not allow BigIntegers to be
+     * present.
+     */
+    static final short MAGNESIUM_VERSION = 4;
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/VersionedNormalizedNodeDataInput.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/VersionedNormalizedNodeDataInput.java
new file mode 100644 (file)
index 0000000..68618fc
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.DataInput;
+import java.io.IOException;
+
+final class VersionedNormalizedNodeDataInput extends ForwardingNormalizedNodeDataInput {
+    private DataInput input;
+    private NormalizedNodeDataInput delegate;
+
+    VersionedNormalizedNodeDataInput(final DataInput input) {
+        this.input = requireNonNull(input);
+    }
+
+    @Override
+    NormalizedNodeDataInput delegate() throws IOException {
+        if (delegate != null) {
+            return delegate;
+        }
+
+        final byte marker = input.readByte();
+        if (marker != TokenTypes.SIGNATURE_MARKER) {
+            throw defunct("Invalid signature marker: %d", marker);
+        }
+
+        final short version = input.readShort();
+        final NormalizedNodeDataInput ret;
+        switch (version) {
+            case TokenTypes.LITHIUM_VERSION:
+                ret = new LithiumNormalizedNodeInputStreamReader(input);
+                break;
+            case TokenTypes.NEON_SR2_VERSION:
+                ret = new NeonSR2NormalizedNodeInputStreamReader(input);
+                break;
+            case TokenTypes.SODIUM_SR1_VERSION:
+                ret = new SodiumSR1DataInput(input);
+                break;
+            case TokenTypes.MAGNESIUM_VERSION:
+                ret = new MagnesiumDataInput(input);
+                break;
+            default:
+                throw defunct("Unhandled stream version %s", version);
+        }
+
+        setDelegate(ret);
+        return ret;
+    }
+
+    private InvalidNormalizedNodeStreamException defunct(final String format, final Object... args) {
+        final InvalidNormalizedNodeStreamException ret = new InvalidNormalizedNodeStreamException(
+            String.format(format, args));
+        // Make sure the stream is not touched
+        setDelegate(new ForwardingNormalizedNodeDataInput() {
+            @Override
+            NormalizedNodeDataInput delegate() throws IOException {
+                throw new InvalidNormalizedNodeStreamException("Stream is not usable", ret);
+            }
+        });
+        return ret;
+    }
+
+    private void setDelegate(final NormalizedNodeDataInput delegate) {
+        this.delegate = requireNonNull(delegate);
+        input = null;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java
new file mode 100644 (file)
index 0000000..d6d409c
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.NoSuchElementException;
+import java.util.Optional;
+import javax.xml.transform.dom.DOMSource;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
+ * schema element in the passed in SchemaContext.
+ */
+abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWriter {
+    enum State {
+        UNITIALIZED,
+        OPEN,
+        CLOSED;
+    }
+
+    @FunctionalInterface
+    private interface WriterMethod<T extends PathArgument> {
+
+        void apply(ReusableImmutableNormalizedNodeStreamWriter writer, T name) throws IOException;
+    }
+
+    @FunctionalInterface
+    private interface SizedWriterMethod<T extends PathArgument> {
+
+        void apply(ReusableImmutableNormalizedNodeStreamWriter writer, T name, int childSizeHint) throws IOException;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodePruner.class);
+
+    private final Deque<DataSchemaContextNode<?>> stack = new ArrayDeque<>();
+    private final ReusableImmutableNormalizedNodeStreamWriter delegate =
+            ReusableImmutableNormalizedNodeStreamWriter.create();
+    private final DataSchemaContextTree tree;
+
+    private DataSchemaContextNode<?> nodePathSchemaNode;
+    private State state = State.UNITIALIZED;
+    private int unknown;
+
+    // FIXME: package-private to support unguarded NormalizedNodePruner access
+    NormalizedNode<?, ?> normalizedNode;
+
+    AbstractNormalizedNodePruner(final DataSchemaContextTree tree) {
+        this.tree = requireNonNull(tree);
+    }
+
+    AbstractNormalizedNodePruner(final SchemaContext schemaContext) {
+        this(DataSchemaContextTree.from(schemaContext));
+    }
+
+    final DataSchemaContextTree getTree() {
+        return tree;
+    }
+
+    final void initialize(final YangInstanceIdentifier nodePath) {
+        nodePathSchemaNode = tree.findChild(nodePath).orElse(null);
+        unknown = 0;
+        normalizedNode = null;
+        stack.clear();
+        delegate.reset();
+        state = State.OPEN;
+    }
+
+    @Override
+    public void startLeafNode(final NodeIdentifier name) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafNode, name);
+    }
+
+    @Override
+    public void startLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafSet, name, childSizeHint);
+    }
+
+    @Override
+    public void startOrderedLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startOrderedLeafSet, name, childSizeHint);
+    }
+
+    @Override
+    public void startLeafSetEntryNode(final NodeWithValue<?> name) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafSetEntryNode, name);
+    }
+
+    @Override
+    public void startContainerNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startContainerNode, name, childSizeHint);
+    }
+
+    @Override
+    public void startYangModeledAnyXmlNode(final NodeIdentifier nodeIdentifier, final int count) {
+        // FIXME: implement this
+        throw new UnsupportedOperationException("Not implemented yet");
+    }
+
+    @Override
+    public void startUnkeyedList(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startUnkeyedList, name, childSizeHint);
+    }
+
+    @Override
+    public void startUnkeyedListItem(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startUnkeyedListItem, name, childSizeHint);
+    }
+
+    @Override
+    public void startMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startMapNode, name, childSizeHint);
+    }
+
+    @Override
+    public void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
+            throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startMapEntryNode, identifier, childSizeHint);
+    }
+
+    @Override
+    public void startOrderedMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startOrderedMapNode, name, childSizeHint);
+    }
+
+    @Override
+    public void startChoiceNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startChoiceNode, name, childSizeHint);
+    }
+
+    @Override
+    public void startAugmentationNode(final AugmentationIdentifier identifier) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startAugmentationNode, identifier);
+    }
+
+    @Override
+    public boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        if (enter(name)) {
+            verify(delegate.startAnyxmlNode(name, objectModel),
+                "Unexpected failure to stream DOMSource node %s model %s", name, objectModel);
+        }
+        return true;
+    }
+
+    @Override
+    public boolean startAnydataNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        // FIXME: we do not support anydata nodes yet
+        return false;
+    }
+
+    @Override
+    public void domSourceValue(final DOMSource value) throws IOException {
+        checkNotSealed();
+        if (unknown == 0) {
+            delegate.domSourceValue(value);
+        }
+    }
+
+    @Override
+    public void scalarValue(final Object value) throws IOException {
+        checkNotSealed();
+        if (unknown == 0) {
+            delegate.scalarValue(translateScalar(stack.peek(), value));
+        }
+    }
+
+    Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
+        // Default is pass-through
+        return value;
+    }
+
+    @Override
+    public void endNode() throws IOException {
+        checkNotSealed();
+
+        if (unknown == 0) {
+            try {
+                stack.pop();
+            } catch (NoSuchElementException e) {
+                throw new IllegalStateException("endNode called on an empty stack", e);
+            }
+            delegate.endNode();
+        } else {
+            unknown--;
+            if (unknown != 0) {
+                // Still at unknown, do not attempt to create result
+                return;
+            }
+        }
+
+        if (stack.isEmpty()) {
+            normalizedNode = delegate.getResult();
+            state = State.CLOSED;
+        }
+    }
+
+    @Override
+    public void close() throws IOException {
+        state = State.CLOSED;
+        stack.clear();
+        delegate.close();
+    }
+
+    @Override
+    public void flush() throws IOException {
+        delegate.flush();
+    }
+
+    /**
+     * Return the resulting normalized node.
+     *
+     * @return Resulting node for the path, if it was not pruned
+     * @throws IllegalStateException if this pruner has not been closed
+     */
+    public final Optional<NormalizedNode<?, ?>> getResult() {
+        checkState(state == State.CLOSED, "Cannot get result in state %s", state);
+        return Optional.ofNullable(normalizedNode);
+    }
+
+    private void checkNotSealed() {
+        checkState(state == State.OPEN, "Illegal operation in state %s", state);
+    }
+
+    private boolean enter(final PathArgument name) {
+        checkNotSealed();
+
+        if (unknown != 0) {
+            LOG.debug("Skipping child {} in unknown subtree", name);
+            unknown++;
+            return false;
+        }
+
+        final DataSchemaContextNode<?> schema;
+        final DataSchemaContextNode<?> parent = stack.peek();
+        if (parent != null) {
+            schema = parent.getChild(name);
+        } else {
+            schema = nodePathSchemaNode;
+        }
+
+        if (schema == null) {
+            LOG.debug("Schema not found for {}", name);
+            unknown = 1;
+            return false;
+        }
+
+        stack.push(schema);
+        final DataSchemaNode dataSchema = schema.getDataSchemaNode();
+        if (dataSchema != null) {
+            delegate.nextDataSchemaNode(dataSchema);
+        }
+        return true;
+    }
+
+    private <A extends PathArgument> void enter(final WriterMethod<A> method, final A name) throws IOException {
+        if (enter(name)) {
+            method.apply(delegate, name);
+        }
+    }
+
+    private <A extends PathArgument> void enter(final SizedWriterMethod<A> method, final A name, final int size)
+            throws IOException {
+        if (enter(name)) {
+            method.apply(delegate, name, size);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java
new file mode 100644 (file)
index 0000000..ba1b85f
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import java.net.URI;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+/**
+ * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
+ * schema element in the passed in SchemaContext.
+ *
+ * @deprecated Use {@link AbstractNormalizedNodePruner} instead.
+ */
+@Deprecated(forRemoval = true)
+public class NormalizedNodePruner extends AbstractNormalizedNodePruner {
+    public static final URI BASE_NAMESPACE = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
+
+    public NormalizedNodePruner(final YangInstanceIdentifier nodePath, final SchemaContext schemaContext) {
+        super(schemaContext);
+        initialize(nodePath);
+    }
+
+    public NormalizedNode<?, ?> normalizedNode() {
+        return normalizedNode;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java b/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java
new file mode 100644 (file)
index 0000000..751c96f
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.math.BigInteger;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.TypedDataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.type.Uint16TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint32TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint64TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint8TypeDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
+ * schema element in the passed in SchemaContext.
+ *
+ * <p>
+ * Unlike {@link NormalizedNodePruner}, this class can be reused multiple times and must be initialized before each use
+ * through {@link #initializeForPath(YangInstanceIdentifier)}.
+ */
+@Beta
+public abstract class ReusableNormalizedNodePruner extends AbstractNormalizedNodePruner {
+    private static final class SimplePruner extends ReusableNormalizedNodePruner {
+        SimplePruner(final SchemaContext schemaContext) {
+            super(schemaContext);
+        }
+
+        SimplePruner(final DataSchemaContextTree tree) {
+            super(tree);
+        }
+
+        @Override
+        public ReusableNormalizedNodePruner duplicate() {
+            return new SimplePruner(getTree());
+        }
+    }
+
+    private static final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
+        private static final Logger LOG = LoggerFactory.getLogger(UintAdaptingPruner.class);
+
+        UintAdaptingPruner(final DataSchemaContextTree tree) {
+            super(tree);
+        }
+
+        @Override
+        public ReusableNormalizedNodePruner duplicate() {
+            return new UintAdaptingPruner(getTree());
+        }
+
+        @Override
+        Object translateScalar(final DataSchemaContextNode<?> context, final Object value) throws IOException {
+            final DataSchemaNode schema = context.getDataSchemaNode();
+            if (schema instanceof TypedDataSchemaNode) {
+                final TypeDefinition<?> type = ((TypedDataSchemaNode) schema).getType();
+                if (value instanceof Short && type instanceof Uint8TypeDefinition) {
+                    LOG.trace("Translating legacy uint8 {}", value);
+                    return Uint8.valueOf((Short) value);
+                } else if (value instanceof Integer && type instanceof Uint16TypeDefinition) {
+                    LOG.trace("Translating legacy uint16 {}", value);
+                    return Uint16.valueOf((Integer) value);
+                } else if (value instanceof Long && type instanceof Uint32TypeDefinition) {
+                    LOG.trace("Translating legacy uint32 {}", value);
+                    return Uint32.valueOf((Long) value);
+                } else if (value instanceof BigInteger && type instanceof Uint64TypeDefinition) {
+                    LOG.trace("Translating legacy uint64 {}", value);
+                    return Uint64.valueOf((BigInteger) value);
+                }
+            }
+            return value;
+        }
+    }
+
+    ReusableNormalizedNodePruner(final SchemaContext schemaContext) {
+        super(schemaContext);
+    }
+
+    ReusableNormalizedNodePruner(final DataSchemaContextTree tree) {
+        super(tree);
+    }
+
+    /**
+     * Create a new pruner bound to a SchemaContext.
+     *
+     * @param schemaContext SchemaContext to use
+     * @return A new uninitialized pruner
+     * @throws NullPointerException if {@code schemaContext} is null
+     */
+    public static @NonNull ReusableNormalizedNodePruner forSchemaContext(final SchemaContext schemaContext) {
+        return new SimplePruner(schemaContext);
+    }
+
+    /**
+     * Create a new pruner bound to a DataSchemaContextTree. This is a more efficient alternative of
+     * {@link #forSchemaContext(SchemaContext)}.
+     *
+     * @param tree DataSchemaContextTree to use
+     * @return A new uninitialized pruner
+     * @throws NullPointerException if {@code schemaContext} is null
+     */
+    public static @NonNull ReusableNormalizedNodePruner forDataSchemaContext(final DataSchemaContextTree tree) {
+        return new SimplePruner(tree);
+    }
+
+    /**
+     * Return a new instance, which is backed but the same DataSchemaContextTree, but does not share any state and is
+     * uninitialized. This is equivalent to {@link #forDataSchemaContext(DataSchemaContextTree)} and is provided for
+     * convenience.
+     *
+     * @return A new uninitialized pruner bound to the same SchemaContext as this one.
+     */
+    public abstract @NonNull ReusableNormalizedNodePruner duplicate();
+
+    /**
+     * Initialize this pruner for processing a node at specified path.
+     *
+     * @param path Path that will be processed next
+     * @throws NullPointerException if {@code path} is null
+     */
+    public final void initializeForPath(final YangInstanceIdentifier path) {
+        initialize(path);
+    }
+
+    public final @NonNull ReusableNormalizedNodePruner withUintAdaption() {
+        return new UintAdaptingPruner(getTree());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java b/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java
new file mode 100644 (file)
index 0000000..1f5259e
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.util;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.annotations.Beta;
+import java.util.Optional;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+
+/**
+ * Abstract {@link DataTreeModificationCursor} which tracks the current path. Subclasses can get the current path
+ * via {@link #current()}. This class is NOT thread-safe.
+ *
+ * @author Thomas Pantelis
+ */
+@Beta
+public abstract class AbstractDataTreeModificationCursor implements DataTreeModificationCursor {
+    private YangInstanceIdentifier current = YangInstanceIdentifier.empty();
+
+    protected final YangInstanceIdentifier current() {
+        return current;
+    }
+
+    @Override
+    public final void enter(final PathArgument child) {
+        current = current.node(child);
+    }
+
+    @Override
+    public final void enter(final PathArgument... path) {
+        for (PathArgument arg : path) {
+            enter(arg);
+        }
+    }
+
+    @Override
+    public final void enter(final Iterable<PathArgument> path) {
+        for (PathArgument arg : path) {
+            enter(arg);
+        }
+    }
+
+    @Override
+    public final void exit() {
+        checkState(!current.isEmpty());
+        current = verifyNotNull(current.getParent());
+    }
+
+    @Override
+    public final void exit(final int depth) {
+        checkArgument(depth >= 0);
+
+        YangInstanceIdentifier next = current;
+        for (int i = 0; i < depth; ++i) {
+            next = next.getParent();
+            checkState(next != null);
+        }
+
+        current = next;
+    }
+
+    @Override
+    public final Optional<NormalizedNode<?, ?>> readNode(final PathArgument child) {
+        throw new UnsupportedOperationException("Not implemented");
+    }
+
+    @Override
+    public void close() {
+        // No-op
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java b/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java
new file mode 100644 (file)
index 0000000..970b06f
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.FinalizablePhantomReference;
+import com.google.common.base.FinalizableReferenceQueue;
+import com.google.common.collect.Sets;
+import com.google.common.io.ByteSource;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.util.Iterator;
+import java.util.Set;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An {@link OutputStream} that starts buffering to a byte array, but switches to file buffering once the data
+ * reaches a configurable size. This class is thread-safe.
+ *
+ * @author Thomas Pantelis
+ */
+public class FileBackedOutputStream extends OutputStream {
+    private static final Logger LOG = LoggerFactory.getLogger(FileBackedOutputStream.class);
+
+    /**
+     * This stores the Cleanup PhantomReference instances statically. This is necessary because PhantomReferences
+     * need a hard reference so they're not garbage collected. Once finalized, the Cleanup PhantomReference removes
+     * itself from this map and thus becomes eligible for garbage collection.
+     */
+    @VisibleForTesting
+    static final Set<Cleanup> REFERENCE_CACHE = Sets.newConcurrentHashSet();
+
+    /**
+     * Used as the ReferenceQueue for the Cleanup PhantomReferences.
+     */
+    private static final FinalizableReferenceQueue REFERENCE_QUEUE = new FinalizableReferenceQueue();
+
+    private final int fileThreshold;
+    private final String fileDirectory;
+
+    @GuardedBy("this")
+    private MemoryOutputStream memory = new MemoryOutputStream();
+
+    @GuardedBy("this")
+    private OutputStream out = memory;
+
+    @GuardedBy("this")
+    private File file;
+
+    @GuardedBy("this")
+    private ByteSource source;
+
+    private volatile long count;
+
+    /**
+     * Creates a new instance that uses the given file threshold, and does not reset the data when the
+     * {@link ByteSource} returned by {@link #asByteSource} is finalized.
+     *
+     * @param fileThreshold the number of bytes before the stream should switch to buffering to a file
+     * @param fileDirectory the directory in which to create the file if needed. If null, the default temp file
+     *                      location is used.
+     */
+    public FileBackedOutputStream(final int fileThreshold, @Nullable final String fileDirectory) {
+        this.fileThreshold = fileThreshold;
+        this.fileDirectory = fileDirectory;
+    }
+
+    /**
+     * Returns a readable {@link ByteSource} view of the data that has been written to this stream. This stream is
+     * closed and further attempts to write to it will result in an IOException.
+     *
+     * @return a ByteSource instance
+     * @throws IOException if close fails
+     */
+    public synchronized @NonNull ByteSource asByteSource() throws IOException {
+        close();
+
+        if (source == null) {
+            source = new ByteSource() {
+                @Override
+                public InputStream openStream() throws IOException {
+                    synchronized (FileBackedOutputStream.this) {
+                        if (file != null) {
+                            return Files.newInputStream(file.toPath());
+                        } else {
+                            return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount());
+                        }
+                    }
+                }
+
+                @Override
+                public long size() {
+                    return count;
+                }
+            };
+        }
+
+        return source;
+    }
+
+    @Override
+    @SuppressFBWarnings(value = "VO_VOLATILE_INCREMENT", justification = "Findbugs erroneously complains that the "
+        + "increment of count needs to be atomic even though it is inside a synchronized block.")
+    public synchronized void write(final int value) throws IOException {
+        possiblySwitchToFile(1);
+        out.write(value);
+        count++;
+    }
+
+    @Override
+    public synchronized void write(final byte[] bytes) throws IOException {
+        write(bytes, 0, bytes.length);
+    }
+
+    @Override
+    public synchronized void write(final byte[] bytes, final int off, final int len) throws IOException {
+        possiblySwitchToFile(len);
+        out.write(bytes, off, len);
+        count += len;
+    }
+
+    @Override
+    public synchronized void close() throws IOException {
+        if (out != null) {
+            OutputStream closeMe = out;
+            out = null;
+            closeMe.close();
+        }
+    }
+
+    @Override
+    public synchronized void flush() throws IOException {
+        if (out != null) {
+            out.flush();
+        }
+    }
+
+    public synchronized long getCount() {
+        return count;
+    }
+
+    /**
+     * Calls {@link #close} if not already closed and, if data was buffered to a file, deletes the file.
+     */
+    public synchronized void cleanup() {
+        LOG.debug("In cleanup");
+
+        closeQuietly();
+
+        if (file != null) {
+            Iterator<Cleanup> iter = REFERENCE_CACHE.iterator();
+            while (iter.hasNext()) {
+                if (file.equals(iter.next().file)) {
+                    iter.remove();
+                    break;
+                }
+            }
+
+            LOG.debug("cleanup - deleting temp file {}", file);
+
+            deleteFile(file);
+            file = null;
+        }
+    }
+
+    @Holding("this")
+    private void closeQuietly() {
+        try {
+            close();
+        } catch (IOException e) {
+            LOG.warn("Error closing output stream {}", out, e);
+        }
+    }
+
+    /**
+     * Checks if writing {@code len} bytes would go over threshold, and switches to file buffering if so.
+     */
+    @Holding("this")
+    private void possiblySwitchToFile(final int len) throws IOException {
+        if (out == null) {
+            throw new IOException("Stream already closed");
+        }
+
+        if (file == null && memory.getCount() + len > fileThreshold) {
+            File temp = File.createTempFile("FileBackedOutputStream", null,
+                    fileDirectory == null ? null : new File(fileDirectory));
+            temp.deleteOnExit();
+
+            LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.getCount() + len,
+                    fileThreshold, temp);
+
+            OutputStream transfer = null;
+            try {
+                transfer = Files.newOutputStream(temp.toPath());
+                transfer.write(memory.getBuffer(), 0, memory.getCount());
+                transfer.flush();
+
+                // We've successfully transferred the data; switch to writing to file
+                out = transfer;
+                file = temp;
+                memory = null;
+
+                new Cleanup(this, file);
+            } catch (IOException e) {
+                if (transfer != null) {
+                    try {
+                        transfer.close();
+                    } catch (IOException ex) {
+                        LOG.debug("Error closing temp file {}", temp, ex);
+                    }
+                }
+
+                deleteFile(temp);
+                throw e;
+            }
+        }
+    }
+
+    private static void deleteFile(final File file) {
+        if (!file.delete()) {
+            LOG.warn("Could not delete temp file {}", file);
+        }
+    }
+
+    /**
+     * ByteArrayOutputStream that exposes its internals for efficiency.
+     */
+    private static class MemoryOutputStream extends ByteArrayOutputStream {
+        byte[] getBuffer() {
+            return buf;
+        }
+
+        int getCount() {
+            return count;
+        }
+    }
+
+    /**
+     * PhantomReference that deletes the temp file when the FileBackedOutputStream is garbage collected.
+     */
+    private static class Cleanup extends FinalizablePhantomReference<FileBackedOutputStream> {
+        private final File file;
+
+        Cleanup(final FileBackedOutputStream referent, final File file) {
+            super(referent, REFERENCE_QUEUE);
+            this.file = file;
+
+            REFERENCE_CACHE.add(this);
+
+            LOG.debug("Added Cleanup for temp file {}", file);
+        }
+
+        @Override
+        public void finalizeReferent() {
+            LOG.debug("In finalizeReferent");
+
+            if (REFERENCE_CACHE.remove(this)) {
+                LOG.debug("finalizeReferent - deleting temp file {}", file);
+                deleteFile(file);
+            }
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamFactory.java b/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamFactory.java
new file mode 100644 (file)
index 0000000..f2d6647
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * A factory for creating {@link FileBackedOutputStream} instances.
+ *
+ * @author Thomas Pantelis
+ * @see FileBackedOutputStream
+ */
+public class FileBackedOutputStreamFactory {
+    private final int fileThreshold;
+    private final String fileDirectory;
+
+    /**
+     * Constructor.
+     *
+     * @param fileThreshold the number of bytes before streams should switch to buffering to a file
+     * @param fileDirectory the directory in which to create files if needed. If null, the default temp file
+     *                      location is used.
+     */
+    public FileBackedOutputStreamFactory(final int fileThreshold, final @Nullable String fileDirectory) {
+        this.fileThreshold = fileThreshold;
+        this.fileDirectory = fileDirectory;
+    }
+
+    /**
+     * Creates a new {@link FileBackedOutputStream} with the settings configured for this factory.
+     *
+     * @return a {@link FileBackedOutputStream} instance
+     */
+    public FileBackedOutputStream newInstance() {
+        return new FileBackedOutputStream(fileThreshold, fileDirectory);
+    }
+
+    /**
+     * Creates a new {@link SharedFileBackedOutputStream} with the settings configured for this factory.
+     *
+     * @return a {@link SharedFileBackedOutputStream} instance
+     */
+    public SharedFileBackedOutputStream newSharedInstance() {
+        return new SharedFileBackedOutputStream(fileThreshold, fileDirectory);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/io/SharedFileBackedOutputStream.java b/java/org/opendaylight/controller/cluster/io/SharedFileBackedOutputStream.java
new file mode 100644 (file)
index 0000000..852473b
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import com.google.common.base.Preconditions;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+
+/**
+ * A FileBackedOutputStream that allows for sharing in that it maintains a usage count and the backing file isn't
+ * deleted until the usage count reaches 0. The usage count is initialized to 1 on construction. Subsequent users of
+ * the instance must call {@link #incrementUsageCount()}. The {@link #cleanup()} method decrements the usage count and,
+ * when it reaches 0, the {@link FileBackedOutputStream#cleanup()} is called to delete the backing file.
+ *
+ * @author Thomas Pantelis
+ */
+public class SharedFileBackedOutputStream extends FileBackedOutputStream {
+    private final AtomicInteger usageCount = new AtomicInteger(1);
+    @SuppressWarnings("rawtypes")
+    private Consumer onCleanupCallback;
+    private Object onCleanupContext;
+
+    public SharedFileBackedOutputStream(int fileThreshold, String fileDirectory) {
+        super(fileThreshold, fileDirectory);
+    }
+
+    /**
+     * Increments the usage count. This must be followed by a corresponding call to {@link #cleanup()} when this
+     * instance is no longer needed.
+     */
+    public void incrementUsageCount() {
+        usageCount.getAndIncrement();
+    }
+
+    /**
+     * Returns the current usage count.
+     *
+     * @return the current usage count
+     */
+    public int getUsageCount() {
+        return usageCount.get();
+    }
+
+    /**
+     * Sets the callback to be notified when {@link FileBackedOutputStream#cleanup()} is called to delete the backing
+     * file.
+     */
+    public <T> void setOnCleanupCallback(Consumer<T> callback, T context) {
+        onCleanupCallback = callback;
+        onCleanupContext = context;
+    }
+
+    /**
+     * Overridden to decrement the usage count.
+     */
+    @SuppressWarnings("unchecked")
+    @Override
+    public void cleanup() {
+        Preconditions.checkState(usageCount.get() > 0);
+
+        if (usageCount.decrementAndGet() == 0) {
+            super.cleanup();
+
+            if (onCleanupCallback != null) {
+                onCleanupCallback.accept(onCleanupContext);
+            }
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/AbortSlicing.java b/java/org/opendaylight/controller/cluster/messaging/AbortSlicing.java
new file mode 100644 (file)
index 0000000..9df2fd4
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.Serializable;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+/**
+ * Message sent to abort slicing.
+ *
+ * @author Thomas Pantelis
+ */
+class AbortSlicing implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final Identifier identifier;
+
+    AbortSlicing(final Identifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    Identifier getIdentifier() {
+        return identifier;
+    }
+
+    @Override
+    public String toString() {
+        return "AbortSlicing [identifier=" + identifier + "]";
+    }
+
+    private Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private AbortSlicing abortSlicing;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+        }
+
+        Proxy(final AbortSlicing abortSlicing) {
+            this.abortSlicing = abortSlicing;
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            out.writeObject(abortSlicing.identifier);
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            abortSlicing = new AbortSlicing((Identifier) in.readObject());
+        }
+
+        private Object readResolve() {
+            return abortSlicing;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/AssembledMessageState.java b/java/org/opendaylight/controller/cluster/messaging/AssembledMessageState.java
new file mode 100644 (file)
index 0000000..842fdaf
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteSource;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
+import org.opendaylight.yangtools.concepts.Identifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Maintains the state of an assembled message. This class is NOT thread-safe.
+ *
+ * @author Thomas Pantelis
+ */
+public class AssembledMessageState implements AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(AssembledMessageState.class);
+
+    private final int totalSlices;
+    private final BufferedOutputStream bufferedStream;
+    private final FileBackedOutputStream fileBackedStream;
+    private final Identifier identifier;
+    private final String logContext;
+
+    private int lastSliceIndexReceived = SlicedMessageState.FIRST_SLICE_INDEX - 1;
+    private int lastSliceHashCodeReceived = SlicedMessageState.INITIAL_SLICE_HASH_CODE;
+    private boolean sealed = false;
+    private boolean closed = false;
+    private long assembledSize;
+
+    /**
+     * Constructor.
+     *
+     * @param identifier the identifier for this instance
+     * @param totalSlices the total number of slices to expect
+     * @param fileBackedStreamFactory factory for creating the FileBackedOutputStream instance used for streaming
+     * @param logContext the context for log messages
+     */
+    public AssembledMessageState(final Identifier identifier, final int totalSlices,
+            final FileBackedOutputStreamFactory fileBackedStreamFactory, final String logContext) {
+        this.identifier = identifier;
+        this.totalSlices = totalSlices;
+        this.logContext = logContext;
+
+        fileBackedStream = fileBackedStreamFactory.newInstance();
+        bufferedStream = new BufferedOutputStream(fileBackedStream);
+    }
+
+    /**
+     * Returns the identifier of this instance.
+     *
+     * @return the identifier
+     */
+    public Identifier getIdentifier() {
+        return identifier;
+    }
+
+    /**
+     * Adds a slice to the assembled stream.
+     *
+     * @param sliceIndex the index of the slice
+     * @param data the sliced data
+     * @param lastSliceHashCode the hash code of the last slice sent
+     * @return true if this is the last slice received, false otherwise
+     * @throws MessageSliceException
+     *         <ul>
+     *         <li>if the slice index is invalid</li>
+     *         <li>if the last slice hash code is invalid</li>
+     *         <li>if an error occurs writing the data to the stream</li>
+     *         </ul>
+     *         In addition, this instance is automatically closed and can no longer be used.
+     * @throws AssemblerSealedException if this instance is already sealed (ie has received all the slices)
+     * @throws AssemblerClosedException if this instance is already closed
+     */
+    public boolean addSlice(final int sliceIndex, final byte[] data, final int lastSliceHashCode)
+            throws MessageSliceException {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("{}: addSlice: identifier: {}, sliceIndex: {}, lastSliceIndex: {}, assembledSize: {}, "
+                    + "sliceHashCode: {}, lastSliceHashCode: {}", logContext, identifier, sliceIndex,
+                    lastSliceIndexReceived, assembledSize, lastSliceHashCode, lastSliceHashCodeReceived);
+        }
+
+        try {
+            validateSlice(sliceIndex, lastSliceHashCode);
+
+            assembledSize += data.length;
+            lastSliceIndexReceived = sliceIndex;
+            lastSliceHashCodeReceived = Arrays.hashCode(data);
+
+            bufferedStream.write(data);
+
+            sealed = sliceIndex == totalSlices;
+            if (sealed) {
+                bufferedStream.close();
+            }
+        } catch (IOException e) {
+            close();
+            throw new MessageSliceException(String.format("Error writing data for slice %d of message %s",
+                    sliceIndex, identifier), e);
+        }
+
+        return sealed;
+    }
+
+    /**
+     * Returns the assembled bytes as a ByteSource. This method must only be called after this instance is sealed.
+     *
+     * @return a ByteSource containing the assembled bytes
+     * @throws IOException if an error occurs obtaining the assembled bytes
+     * @throws IllegalStateException is this instance is not sealed
+     */
+    public ByteSource getAssembledBytes() throws IOException {
+        Preconditions.checkState(sealed, "Last slice not received yet");
+        return fileBackedStream.asByteSource();
+    }
+
+    private void validateSlice(final int sliceIndex, final int lastSliceHashCode) throws MessageSliceException {
+        if (closed) {
+            throw new AssemblerClosedException(identifier);
+        }
+
+        if (sealed) {
+            throw new AssemblerSealedException(String.format(
+                    "Received slice index for message %s but all %d expected slices have already already received.",
+                    identifier, totalSlices));
+        }
+
+        if (lastSliceIndexReceived + 1 != sliceIndex) {
+            close();
+            throw new MessageSliceException(String.format("Expected sliceIndex %d but got %d for message %s",
+                    lastSliceIndexReceived + 1, sliceIndex, identifier), true);
+        }
+
+        if (lastSliceHashCode != lastSliceHashCodeReceived) {
+            close();
+            throw new MessageSliceException(String.format("The hash code of the recorded last slice (%d) does not "
+                    + "match the senders last hash code (%d) for message %s", lastSliceHashCodeReceived,
+                    lastSliceHashCode, identifier), true);
+        }
+    }
+
+    @Override
+    public void close() {
+        if (closed) {
+            return;
+        }
+
+        closed = true;
+        if (!sealed) {
+            try {
+                bufferedStream.close();
+            } catch (IOException e) {
+                LOG.debug("{}: Error closing output stream", logContext, e);
+            }
+        }
+
+        fileBackedStream.cleanup();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/AssemblerClosedException.java b/java/org/opendaylight/controller/cluster/messaging/AssemblerClosedException.java
new file mode 100644 (file)
index 0000000..83c8dcb
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import org.opendaylight.yangtools.concepts.Identifier;
+
+/**
+ * A MessageSliceException indicating the message assembler has already been closed.
+ *
+ * @author Thomas Pantelis
+ */
+public class AssemblerClosedException extends MessageSliceException {
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Constructs an instance.
+     *
+     * @param identifier the identifier whose state was closed
+     */
+    public AssemblerClosedException(final Identifier identifier) {
+        super(String.format("Message assembler for %s has already been closed", identifier), false);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/AssemblerSealedException.java b/java/org/opendaylight/controller/cluster/messaging/AssemblerSealedException.java
new file mode 100644 (file)
index 0000000..df9ac63
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+/**
+ * A MessageSliceException indicating the message assembler has already been sealed.
+ *
+ * @author Thomas Pantelis
+ */
+public class AssemblerSealedException extends MessageSliceException {
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Constructs an instance.
+     *
+     * @param message he detail message
+     */
+    public AssemblerSealedException(String message) {
+        super(message, false);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java b/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java
new file mode 100644 (file)
index 0000000..297186d
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalNotification;
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiConsumer;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
+import org.opendaylight.yangtools.concepts.Identifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class re-assembles messages sliced into smaller chunks by {@link MessageSlicer}.
+ *
+ * @author Thomas Pantelis
+ * @see MessageSlicer
+ */
+public final  class MessageAssembler implements AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(MessageAssembler.class);
+
+    private final Cache<Identifier, AssembledMessageState> stateCache;
+    private final FileBackedOutputStreamFactory fileBackedStreamFactory;
+    private final BiConsumer<Object, ActorRef> assembledMessageCallback;
+    private final String logContext;
+
+    MessageAssembler(final Builder builder) {
+        this.fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
+                "FiledBackedStreamFactory cannot be null");
+        this.assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
+                "assembledMessageCallback cannot be null");
+        this.logContext = builder.logContext;
+
+        stateCache = CacheBuilder.newBuilder()
+                .expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit)
+                .removalListener(this::stateRemoved).build();
+    }
+
+    /**
+     * Returns a new Builder for creating MessageAssembler instances.
+     *
+     * @return a Builder instance
+     */
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    /**
+     * Checks if the given message is handled by this class. If so, it should be forwarded to the
+     * {@link #handleMessage(Object, ActorRef)} method
+     *
+     * @param message the message to check
+     * @return true if handled, false otherwise
+     */
+    public static boolean isHandledMessage(final Object message) {
+        return message instanceof MessageSlice || message instanceof AbortSlicing;
+    }
+
+    @Override
+    public void close() {
+        LOG.debug("{}: Closing", logContext);
+        stateCache.invalidateAll();
+    }
+
+    /**
+     * Checks for and removes assembled message state that has expired due to inactivity from the slicing component
+     * on the other end.
+     */
+    public void checkExpiredAssembledMessageState() {
+        if (stateCache.size() > 0) {
+            stateCache.cleanUp();
+        }
+    }
+
+    /**
+     * Invoked to handle message slices and other messages pertaining to this class.
+     *
+     * @param message the message
+     * @param sendTo the reference of the actor to which subsequent message slices should be sent
+     * @return true if the message was handled, false otherwise
+     */
+    public boolean handleMessage(final Object message, final @NonNull ActorRef sendTo) {
+        if (message instanceof MessageSlice) {
+            LOG.debug("{}: handleMessage: {}", logContext, message);
+            onMessageSlice((MessageSlice) message, sendTo);
+            return true;
+        } else if (message instanceof AbortSlicing) {
+            LOG.debug("{}: handleMessage: {}", logContext, message);
+            onAbortSlicing((AbortSlicing) message);
+            return true;
+        }
+
+        return false;
+    }
+
+    private void onMessageSlice(final MessageSlice messageSlice, final ActorRef sendTo) {
+        final Identifier identifier = messageSlice.getIdentifier();
+        try {
+            final AssembledMessageState state = stateCache.get(identifier, () -> createState(messageSlice));
+            processMessageSliceForState(messageSlice, state, sendTo);
+        } catch (ExecutionException e) {
+            final MessageSliceException messageSliceEx;
+            final Throwable cause = e.getCause();
+            if (cause instanceof MessageSliceException) {
+                messageSliceEx = (MessageSliceException) cause;
+            } else {
+                messageSliceEx = new MessageSliceException(String.format(
+                        "Error creating state for identifier %s", identifier), cause);
+            }
+
+            messageSlice.getReplyTo().tell(MessageSliceReply.failed(identifier, messageSliceEx, sendTo),
+                    ActorRef.noSender());
+        }
+    }
+
+    private AssembledMessageState createState(final MessageSlice messageSlice) throws MessageSliceException {
+        final Identifier identifier = messageSlice.getIdentifier();
+        if (messageSlice.getSliceIndex() == SlicedMessageState.FIRST_SLICE_INDEX) {
+            LOG.debug("{}: Received first slice for {} - creating AssembledMessageState", logContext, identifier);
+            return new AssembledMessageState(identifier, messageSlice.getTotalSlices(),
+                    fileBackedStreamFactory, logContext);
+        }
+
+        LOG.debug("{}: AssembledMessageState not found for {} - returning failed reply", logContext, identifier);
+        throw new MessageSliceException(String.format(
+                "No assembled state found for identifier %s and slice index %s", identifier,
+                messageSlice.getSliceIndex()), true);
+    }
+
+    private void processMessageSliceForState(final MessageSlice messageSlice, final AssembledMessageState state,
+            final ActorRef sendTo) {
+        final Identifier identifier = messageSlice.getIdentifier();
+        final ActorRef replyTo = messageSlice.getReplyTo();
+        Object reAssembledMessage = null;
+        synchronized (state) {
+            final int sliceIndex = messageSlice.getSliceIndex();
+            try {
+                final MessageSliceReply successReply = MessageSliceReply.success(identifier, sliceIndex, sendTo);
+                if (state.addSlice(sliceIndex, messageSlice.getData(), messageSlice.getLastSliceHashCode())) {
+                    LOG.debug("{}: Received last slice for {}", logContext, identifier);
+
+                    reAssembledMessage = reAssembleMessage(state);
+
+                    replyTo.tell(successReply, ActorRef.noSender());
+                    removeState(identifier);
+                } else {
+                    LOG.debug("{}: Added slice for {} - expecting more", logContext, identifier);
+                    replyTo.tell(successReply, ActorRef.noSender());
+                }
+            } catch (MessageSliceException e) {
+                LOG.warn("{}: Error processing {}", logContext, messageSlice, e);
+                replyTo.tell(MessageSliceReply.failed(identifier, e, sendTo), ActorRef.noSender());
+                removeState(identifier);
+            }
+        }
+
+        if (reAssembledMessage != null) {
+            LOG.debug("{}: Notifying callback of re-assembled message {}", logContext, reAssembledMessage);
+            assembledMessageCallback.accept(reAssembledMessage, replyTo);
+        }
+    }
+
+    private static Object reAssembleMessage(final AssembledMessageState state) throws MessageSliceException {
+        try {
+            final ByteSource assembledBytes = state.getAssembledBytes();
+            try (ObjectInputStream in = new ObjectInputStream(assembledBytes.openStream())) {
+                return in.readObject();
+            }
+
+        } catch (IOException | ClassNotFoundException  e) {
+            throw new MessageSliceException(String.format("Error re-assembling bytes for identifier %s",
+                    state.getIdentifier()), e);
+        }
+    }
+
+    private void onAbortSlicing(final AbortSlicing message) {
+        removeState(message.getIdentifier());
+    }
+
+    private void removeState(final Identifier identifier) {
+        LOG.debug("{}: Removing state for {}", logContext, identifier);
+        stateCache.invalidate(identifier);
+    }
+
+    private void stateRemoved(final RemovalNotification<Identifier, AssembledMessageState> notification) {
+        if (notification.wasEvicted()) {
+            LOG.warn("{}: AssembledMessageState for {} was expired from the cache", logContext, notification.getKey());
+        } else {
+            LOG.debug("{}: AssembledMessageState for {} was removed from the cache due to {}", logContext,
+                    notification.getKey(), notification.getCause());
+        }
+
+        notification.getValue().close();
+    }
+
+    @VisibleForTesting
+    boolean hasState(final Identifier forIdentifier) {
+        boolean exists = stateCache.getIfPresent(forIdentifier) != null;
+        stateCache.cleanUp();
+        return exists;
+    }
+
+    public static class Builder {
+        private FileBackedOutputStreamFactory fileBackedStreamFactory;
+        private BiConsumer<Object, ActorRef> assembledMessageCallback;
+        private long expireStateAfterInactivityDuration = 1;
+        private TimeUnit expireStateAfterInactivityUnit = TimeUnit.MINUTES;
+        private String logContext = "<no-context>";
+
+        /**
+         * Sets the factory for creating FileBackedOutputStream instances used for streaming messages.
+         *
+         * @param newFileBackedStreamFactory the factory for creating FileBackedOutputStream instances
+         * @return this Builder
+         */
+        public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
+            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            return this;
+        }
+
+        /**
+         * Sets the Consumer callback for assembled messages. The callback takes the assembled message and the
+         * original sender ActorRef as arguments.
+         *
+         * @param newAssembledMessageCallback the Consumer callback
+         * @return this Builder
+         */
+        public Builder assembledMessageCallback(final BiConsumer<Object, ActorRef> newAssembledMessageCallback) {
+            this.assembledMessageCallback = newAssembledMessageCallback;
+            return this;
+        }
+
+        /**
+         * Sets the duration and time unit whereby assembled message state is purged from the cache due to
+         * inactivity from the slicing component on the other end. By default, state is purged after 1 minute of
+         * inactivity.
+         *
+         * @param duration the length of time after which a state entry is purged
+         * @param unit the unit the duration is expressed in
+         * @return this Builder
+         */
+        public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
+            checkArgument(duration > 0, "duration must be > 0");
+            this.expireStateAfterInactivityDuration = duration;
+            this.expireStateAfterInactivityUnit = unit;
+            return this;
+        }
+
+        /**
+         * Sets the context for log messages.
+         *
+         * @param newLogContext the log context
+         * @return this Builder
+         */
+        public Builder logContext(final String newLogContext) {
+            this.logContext = newLogContext;
+            return this;
+        }
+
+        /**
+         * Builds a new MessageAssembler instance.
+         *
+         * @return a new MessageAssembler
+         */
+        public MessageAssembler build() {
+            return new MessageAssembler(this);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageSlice.java b/java/org/opendaylight/controller/cluster/messaging/MessageSlice.java
new file mode 100644 (file)
index 0000000..50e0460
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.Serializable;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+/**
+ * Represents a sliced message chunk.
+ *
+ * @author Thomas Pantelis
+ */
+public class MessageSlice implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final Identifier identifier;
+    private final byte[] data;
+    private final int sliceIndex;
+    private final int totalSlices;
+    private final int lastSliceHashCode;
+    private final ActorRef replyTo;
+
+    MessageSlice(final Identifier identifier, final byte[] data, final int sliceIndex, final int totalSlices,
+            final int lastSliceHashCode, final ActorRef replyTo) {
+        this.identifier = requireNonNull(identifier);
+        this.data = requireNonNull(data);
+        this.sliceIndex = sliceIndex;
+        this.totalSlices = totalSlices;
+        this.lastSliceHashCode = lastSliceHashCode;
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public Identifier getIdentifier() {
+        return identifier;
+    }
+
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
+            + "this is OK since this class is merely a DTO and does not process the byte[] internally."
+            + "Also it would be inefficient to create a return copy as the byte[] could be large.")
+    public byte[] getData() {
+        return data;
+    }
+
+    public int getSliceIndex() {
+        return sliceIndex;
+    }
+
+    public int getTotalSlices() {
+        return totalSlices;
+    }
+
+    public int getLastSliceHashCode() {
+        return lastSliceHashCode;
+    }
+
+    public ActorRef getReplyTo() {
+        return replyTo;
+    }
+
+    @Override
+    public String toString() {
+        return "MessageSlice [identifier=" + identifier + ", data.length=" + data.length + ", sliceIndex="
+                + sliceIndex + ", totalSlices=" + totalSlices + ", lastSliceHashCode=" + lastSliceHashCode
+                + ", replyTo=" + replyTo + "]";
+    }
+
+    private Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private MessageSlice messageSlice;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+        }
+
+        Proxy(final MessageSlice messageSlice) {
+            this.messageSlice = messageSlice;
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            out.writeObject(messageSlice.identifier);
+            out.writeInt(messageSlice.sliceIndex);
+            out.writeInt(messageSlice.totalSlices);
+            out.writeInt(messageSlice.lastSliceHashCode);
+            out.writeObject(messageSlice.data);
+            out.writeObject(Serialization.serializedActorPath(messageSlice.replyTo));
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            Identifier identifier = (Identifier) in.readObject();
+            int sliceIndex = in.readInt();
+            int totalSlices = in.readInt();
+            int lastSliceHashCode = in.readInt();
+            byte[] data = (byte[])in.readObject();
+            ActorRef replyTo = JavaSerializer.currentSystem().value().provider()
+                    .resolveActorRef((String) in.readObject());
+
+            messageSlice = new MessageSlice(identifier, data, sliceIndex, totalSlices, lastSliceHashCode, replyTo);
+        }
+
+        private Object readResolve() {
+            return messageSlice;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageSliceException.java b/java/org/opendaylight/controller/cluster/messaging/MessageSliceException.java
new file mode 100644 (file)
index 0000000..09ee723
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+/**
+ * An exception indicating a message slice failure.
+ *
+ * @author Thomas Pantelis
+ */
+public class MessageSliceException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    private final boolean isRetriable;
+
+    /**
+     * Constructs an instance.
+     *
+     * @param message the detail message
+     * @param cause the cause
+     */
+    public MessageSliceException(final String message, final Throwable cause) {
+        super(message, cause);
+        isRetriable = false;
+    }
+
+    /**
+     * Constructs an instance.
+     *
+     * @param message the detail message
+     * @param isRetriable if true, indicates the original operation can be retried
+     */
+    public MessageSliceException(final String message, final boolean isRetriable) {
+        super(message);
+        this.isRetriable = isRetriable;
+    }
+
+    /**
+     * Returns whether or not the original operation can be retried.
+     *
+     * @return true if it can be retried, false otherwise
+     */
+    public boolean isRetriable() {
+        return isRetriable;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java b/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java
new file mode 100644 (file)
index 0000000..0cc3668
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.yangtools.concepts.Identifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Identifier for a message slice that is composed of a client-supplied Identifier and an internal counter value.
+ *
+ * @author Thomas Pantelis
+ */
+final class MessageSliceIdentifier implements Identifier {
+    private static final long serialVersionUID = 1L;
+    private static final AtomicLong ID_COUNTER = new AtomicLong(1);
+
+    private final Identifier clientIdentifier;
+    private final long slicerId;
+    private final long messageId;
+
+    MessageSliceIdentifier(final Identifier clientIdentifier, final long slicerId) {
+        this(clientIdentifier, slicerId, ID_COUNTER.getAndIncrement());
+    }
+
+    private MessageSliceIdentifier(final Identifier clientIdentifier, final long slicerId, final long messageId) {
+        this.clientIdentifier = requireNonNull(clientIdentifier);
+        this.messageId = messageId;
+        this.slicerId = slicerId;
+    }
+
+    Identifier getClientIdentifier() {
+        return clientIdentifier;
+    }
+
+    long getSlicerId() {
+        return slicerId;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + clientIdentifier.hashCode();
+        result = prime * result + (int) (messageId ^ messageId >>> 32);
+        result = prime * result + (int) (slicerId ^ slicerId >>> 32);
+        return result;
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+        if (this == obj) {
+            return true;
+        }
+
+        if (!(obj instanceof MessageSliceIdentifier)) {
+            return false;
+        }
+
+        MessageSliceIdentifier other = (MessageSliceIdentifier) obj;
+        return other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
+                && other.messageId == messageId;
+    }
+
+    @Override
+    public String toString() {
+        return "MessageSliceIdentifier [clientIdentifier=" + clientIdentifier + ", slicerId=" + slicerId
+                + ", messageId=" + messageId + "]";
+    }
+
+    private Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private MessageSliceIdentifier messageSliceId;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+        }
+
+        Proxy(final MessageSliceIdentifier messageSliceId) {
+            this.messageSliceId = messageSliceId;
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            out.writeObject(messageSliceId.clientIdentifier);
+            WritableObjects.writeLongs(out, messageSliceId.slicerId, messageSliceId.messageId);
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final Identifier clientIdentifier = (Identifier) in.readObject();
+            final byte header = WritableObjects.readLongHeader(in);
+            final long slicerId =  WritableObjects.readFirstLong(in, header);
+            final long messageId = WritableObjects.readSecondLong(in, header);
+            messageSliceId = new MessageSliceIdentifier(clientIdentifier, slicerId, messageId);
+        }
+
+        private Object readResolve() {
+            return messageSliceId;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageSliceReply.java b/java/org/opendaylight/controller/cluster/messaging/MessageSliceReply.java
new file mode 100644 (file)
index 0000000..e820c4b
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.Serializable;
+import java.util.Optional;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+/**
+ * The reply message for {@link MessageSlice}.
+ *
+ * @author Thomas Pantelis
+ */
+public final class MessageSliceReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final Identifier identifier;
+    private final int sliceIndex;
+    private final MessageSliceException failure;
+    private final ActorRef sendTo;
+
+    private MessageSliceReply(final Identifier identifier, final int sliceIndex, final MessageSliceException failure,
+            final ActorRef sendTo) {
+        this.identifier = requireNonNull(identifier);
+        this.sliceIndex = sliceIndex;
+        this.sendTo = requireNonNull(sendTo);
+        this.failure = failure;
+    }
+
+    public static MessageSliceReply success(final Identifier identifier, final int sliceIndex, final ActorRef sendTo) {
+        return new MessageSliceReply(identifier, sliceIndex, null, sendTo);
+    }
+
+    public static MessageSliceReply failed(final Identifier identifier, final MessageSliceException failure,
+            final ActorRef sendTo) {
+        return new MessageSliceReply(identifier, -1, failure, sendTo);
+    }
+
+    public Identifier getIdentifier() {
+        return identifier;
+    }
+
+    public int getSliceIndex() {
+        return sliceIndex;
+    }
+
+    public ActorRef getSendTo() {
+        return sendTo;
+    }
+
+    public Optional<MessageSliceException> getFailure() {
+        return Optional.ofNullable(failure);
+    }
+
+    @Override
+    public String toString() {
+        return "MessageSliceReply [identifier=" + identifier + ", sliceIndex=" + sliceIndex + ", failure=" + failure
+                + ", sendTo=" + sendTo + "]";
+    }
+
+    private Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private MessageSliceReply messageSliceReply;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+        }
+
+        Proxy(final MessageSliceReply messageSliceReply) {
+            this.messageSliceReply = messageSliceReply;
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            out.writeObject(messageSliceReply.identifier);
+            out.writeInt(messageSliceReply.sliceIndex);
+            out.writeObject(messageSliceReply.failure);
+            out.writeObject(Serialization.serializedActorPath(messageSliceReply.sendTo));
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final Identifier identifier = (Identifier) in.readObject();
+            final int sliceIndex = in.readInt();
+            final MessageSliceException failure = (MessageSliceException) in.readObject();
+            ActorRef sendTo = JavaSerializer.currentSystem().value().provider()
+                    .resolveActorRef((String) in.readObject());
+
+            messageSliceReply = new MessageSliceReply(identifier, sliceIndex, failure, sendTo);
+        }
+
+        private Object readResolve() {
+            return messageSliceReply;
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java b/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java
new file mode 100644 (file)
index 0000000..57a6f9e
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalNotification;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Predicate;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
+import org.opendaylight.yangtools.concepts.Identifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class slices messages into smaller chunks. {@link MessageAssembler} is used to re-assemble the messages.
+ *
+ * @author Thomas Pantelis
+ * @see MessageAssembler
+ */
+public class MessageSlicer implements AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(MessageSlicer.class);
+    private static final AtomicLong SLICER_ID_COUNTER = new AtomicLong(1);
+    public static final int DEFAULT_MAX_SLICING_TRIES = 3;
+
+    private final Cache<MessageSliceIdentifier, SlicedMessageState<ActorRef>> stateCache;
+    private final FileBackedOutputStreamFactory fileBackedStreamFactory;
+    private final int messageSliceSize;
+    private final int maxSlicingTries;
+    private final String logContext;
+    private final long id;
+
+    MessageSlicer(final Builder builder) {
+        this.fileBackedStreamFactory = builder.fileBackedStreamFactory;
+        this.messageSliceSize = builder.messageSliceSize;
+        this.maxSlicingTries = builder.maxSlicingTries;
+
+        id = SLICER_ID_COUNTER.getAndIncrement();
+        this.logContext = builder.logContext + "_slicer-id-" + id;
+
+        CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder =
+                CacheBuilder.newBuilder().removalListener(this::stateRemoved);
+        if (builder.expireStateAfterInactivityDuration > 0) {
+            cacheBuilder = cacheBuilder.expireAfterAccess(builder.expireStateAfterInactivityDuration,
+                    builder.expireStateAfterInactivityUnit);
+        }
+        stateCache = cacheBuilder.build();
+    }
+
+    @VisibleForTesting
+    long getId() {
+        return id;
+    }
+
+    /**
+     * Returns a new Builder for creating MessageSlicer instances.
+     *
+     * @return a Builder instance
+     */
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    /**
+     * Checks if the given message is handled by this class. If so, it should be forwarded to the
+     * {@link #handleMessage(Object)} method
+     *
+     * @param message the message to check
+     * @return true if handled, false otherwise
+     */
+    public static boolean isHandledMessage(final Object message) {
+        return message instanceof MessageSliceReply;
+    }
+
+    /**
+     * Slices a message into chunks based on the serialized size, the maximum message slice size and the given
+     * options.
+     *
+     * @param options the SliceOptions
+     * @return true if the message was sliced, false otherwise
+     */
+    public boolean slice(final SliceOptions options) {
+        final Identifier identifier = options.getIdentifier();
+        final Serializable message = options.getMessage();
+        final FileBackedOutputStream fileBackedStream;
+        if (message != null) {
+            LOG.debug("{}: slice: identifier: {}, message: {}", logContext, identifier, message);
+
+            requireNonNull(fileBackedStreamFactory,
+                    "The FiledBackedStreamFactory must be set in order to call this slice method");
+
+            // Serialize the message to a FileBackedOutputStream.
+            fileBackedStream = fileBackedStreamFactory.newInstance();
+            try (ObjectOutputStream out = new ObjectOutputStream(fileBackedStream)) {
+                out.writeObject(message);
+            } catch (IOException e) {
+                LOG.debug("{}: Error serializing message for {}", logContext, identifier, e);
+                fileBackedStream.cleanup();
+                options.getOnFailureCallback().accept(e);
+                return false;
+            }
+        } else {
+            fileBackedStream = options.getFileBackedStream();
+        }
+
+        return initializeSlicing(options, fileBackedStream);
+    }
+
+    private boolean initializeSlicing(final SliceOptions options, final FileBackedOutputStream fileBackedStream) {
+        final Identifier identifier = options.getIdentifier();
+        MessageSliceIdentifier messageSliceId = new MessageSliceIdentifier(identifier, id);
+        SlicedMessageState<ActorRef> state = null;
+        try {
+            state = new SlicedMessageState<>(messageSliceId, fileBackedStream, messageSliceSize, maxSlicingTries,
+                    options.getReplyTo(), options.getOnFailureCallback(), logContext);
+
+            final Serializable message = options.getMessage();
+            if (state.getTotalSlices() == 1 && message != null) {
+                LOG.debug("{}: Message does not need to be sliced - sending original message", logContext);
+                state.close();
+                sendTo(options, message, options.getReplyTo());
+                return false;
+            }
+
+            final MessageSlice firstSlice = getNextSliceMessage(state);
+
+            LOG.debug("{}: Sending first slice: {}", logContext, firstSlice);
+
+            stateCache.put(messageSliceId, state);
+            sendTo(options, firstSlice, ActorRef.noSender());
+            return true;
+        } catch (IOException e) {
+            LOG.error("{}: Error initializing SlicedMessageState for {}", logContext, identifier, e);
+            if (state != null) {
+                state.close();
+            } else {
+                fileBackedStream.cleanup();
+            }
+
+            options.getOnFailureCallback().accept(e);
+            return false;
+        }
+    }
+
+    private static void sendTo(final SliceOptions options, final Object message, final ActorRef sender) {
+        if (options.getSendToRef() != null) {
+            options.getSendToRef().tell(message, sender);
+        } else {
+            options.getSendToSelection().tell(message, sender);
+        }
+    }
+
+    /**
+     * Invoked to handle messages pertaining to this class.
+     *
+     * @param message the message
+     * @return true if the message was handled, false otherwise
+     */
+    public boolean handleMessage(final Object message) {
+        if (message instanceof MessageSliceReply) {
+            LOG.debug("{}: handleMessage: {}", logContext, message);
+            return onMessageSliceReply((MessageSliceReply) message);
+        }
+
+        return false;
+    }
+
+    /**
+     * Checks for and removes sliced message state that has expired due to inactivity from the assembling component
+     * on the other end.
+     */
+    public void checkExpiredSlicedMessageState() {
+        if (stateCache.size() > 0) {
+            stateCache.cleanUp();
+        }
+    }
+
+    /**
+     * Closes and removes all in-progress sliced message state.
+     */
+    @Override
+    public void close() {
+        LOG.debug("{}: Closing", logContext);
+        stateCache.invalidateAll();
+    }
+
+    /**
+     * Cancels all in-progress sliced message state that matches the given filter.
+     *
+     * @param filter filters by Identifier
+     */
+    public void cancelSlicing(final @NonNull Predicate<Identifier> filter) {
+        stateCache.asMap().keySet().removeIf(
+            messageSliceIdentifier -> filter.test(messageSliceIdentifier.getClientIdentifier()));
+    }
+
+    private static MessageSlice getNextSliceMessage(final SlicedMessageState<ActorRef> state) throws IOException {
+        final byte[] firstSliceBytes = state.getNextSlice();
+        return new MessageSlice(state.getIdentifier(), firstSliceBytes, state.getCurrentSliceIndex(),
+                state.getTotalSlices(), state.getLastSliceHashCode(), state.getReplyTarget());
+    }
+
+    private boolean onMessageSliceReply(final MessageSliceReply reply) {
+        final Identifier identifier = reply.getIdentifier();
+        if (!(identifier instanceof MessageSliceIdentifier)
+                || ((MessageSliceIdentifier)identifier).getSlicerId() != id) {
+            return false;
+        }
+
+        final SlicedMessageState<ActorRef> state = stateCache.getIfPresent(identifier);
+        if (state == null) {
+            LOG.warn("{}: SlicedMessageState not found for {}", logContext, reply);
+            reply.getSendTo().tell(new AbortSlicing(identifier), ActorRef.noSender());
+            return true;
+        }
+
+        synchronized (state) {
+            try {
+                final Optional<MessageSliceException> failure = reply.getFailure();
+                if (failure.isPresent()) {
+                    LOG.warn("{}: Received failed {}", logContext, reply);
+                    processMessageSliceException(failure.get(), state, reply.getSendTo());
+                    return true;
+                }
+
+                if (state.getCurrentSliceIndex() != reply.getSliceIndex()) {
+                    LOG.warn("{}: Slice index {} in {} does not match expected index {}", logContext,
+                            reply.getSliceIndex(), reply, state.getCurrentSliceIndex());
+                    reply.getSendTo().tell(new AbortSlicing(identifier), ActorRef.noSender());
+                    possiblyRetrySlicing(state, reply.getSendTo());
+                    return true;
+                }
+
+                if (state.isLastSlice(reply.getSliceIndex())) {
+                    LOG.debug("{}: Received last slice reply for {}", logContext, identifier);
+                    removeState(identifier);
+                } else {
+                    final MessageSlice nextSlice = getNextSliceMessage(state);
+                    LOG.debug("{}: Sending next slice: {}", logContext, nextSlice);
+                    reply.getSendTo().tell(nextSlice, ActorRef.noSender());
+                }
+            } catch (IOException e) {
+                LOG.warn("{}: Error processing {}", logContext, reply, e);
+                fail(state, e);
+            }
+        }
+
+        return true;
+    }
+
+    private void processMessageSliceException(final MessageSliceException exception,
+            final SlicedMessageState<ActorRef> state, final ActorRef sendTo) throws IOException {
+        if (exception.isRetriable()) {
+            possiblyRetrySlicing(state, sendTo);
+        } else {
+            fail(state, exception.getCause() != null ? exception.getCause() : exception);
+        }
+    }
+
+    private void possiblyRetrySlicing(final SlicedMessageState<ActorRef> state, final ActorRef sendTo)
+            throws IOException {
+        if (state.canRetry()) {
+            LOG.info("{}: Retrying message slicing for {}", logContext, state.getIdentifier());
+            state.reset();
+            sendTo.tell(getNextSliceMessage(state), ActorRef.noSender());
+        } else {
+            String message = String.format("Maximum slicing retries reached for identifier %s - failing the message",
+                    state.getIdentifier());
+            LOG.warn(message);
+            fail(state, new RuntimeException(message));
+        }
+    }
+
+    private void removeState(final Identifier identifier) {
+        LOG.debug("{}: Removing state for {}", logContext, identifier);
+        stateCache.invalidate(identifier);
+    }
+
+    private void stateRemoved(final RemovalNotification<Identifier, SlicedMessageState<ActorRef>> notification) {
+        final SlicedMessageState<ActorRef> state = notification.getValue();
+        state.close();
+        if (notification.wasEvicted()) {
+            LOG.warn("{}: SlicedMessageState for {} was expired from the cache", logContext, notification.getKey());
+            state.getOnFailureCallback().accept(new RuntimeException(String.format(
+                    "The slicing state for message identifier %s was expired due to inactivity from the assembling "
+                     + "component on the other end", state.getIdentifier())));
+        } else {
+            LOG.debug("{}: SlicedMessageState for {} was removed from the cache due to {}", logContext,
+                    notification.getKey(), notification.getCause());
+        }
+    }
+
+    private void fail(final SlicedMessageState<ActorRef> state, final Throwable failure) {
+        removeState(state.getIdentifier());
+        state.getOnFailureCallback().accept(failure);
+    }
+
+    @VisibleForTesting
+    boolean hasState(final Identifier forIdentifier) {
+        boolean exists = stateCache.getIfPresent(forIdentifier) != null;
+        stateCache.cleanUp();
+        return exists;
+    }
+
+    public static class Builder {
+        private FileBackedOutputStreamFactory fileBackedStreamFactory;
+        private int messageSliceSize = -1;
+        private long expireStateAfterInactivityDuration = -1;
+        private TimeUnit expireStateAfterInactivityUnit = TimeUnit.MINUTES;
+        private int maxSlicingTries = DEFAULT_MAX_SLICING_TRIES;
+        private String logContext = "<no-context>";
+
+        /**
+         * Sets the factory for creating FileBackedOutputStream instances used for streaming messages. This factory
+         * is used by the {@link MessageSlicer#slice(SliceOptions)} method if a Serializable message is passed.
+         * If Serializable messages aren't passed then the factory need not be set.
+         *
+         * @param newFileBackedStreamFactory the factory for creating FileBackedOutputStream instances
+         * @return this Builder
+         */
+        public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
+            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            return this;
+        }
+
+        /**
+         * Sets the maximum size (in bytes) for a message slice.
+         *
+         * @param newMessageSliceSize the maximum size (in bytes)
+         * @return this Builder
+         */
+        public Builder messageSliceSize(final int newMessageSliceSize) {
+            checkArgument(newMessageSliceSize > 0, "messageSliceSize must be > 0");
+            this.messageSliceSize = newMessageSliceSize;
+            return this;
+        }
+
+        /**
+         * Sets the maximum number of tries for slicing a message. If exceeded, slicing fails. The default is
+         * defined by {@link #DEFAULT_MAX_SLICING_TRIES}
+         *
+         * @param newMaxSlicingTries the maximum number of tries
+         * @return this Builder
+         */
+        public Builder maxSlicingTries(final int newMaxSlicingTries) {
+            checkArgument(newMaxSlicingTries > 0, "newMaxSlicingTries must be > 0");
+            this.maxSlicingTries = newMaxSlicingTries;
+            return this;
+        }
+
+        /**
+         * Sets the duration and time unit whereby sliced message state is purged from the cache and the associated
+         * failure callback is notified due to inactivity from the assembling component on the other end. By default,
+         * state is not purged due to inactivity.
+         *
+         * @param duration the length of time after which a state entry is purged
+         * @param unit the unit the duration is expressed in
+         * @return this Builder
+         */
+        public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
+            checkArgument(duration > 0, "duration must be > 0");
+            this.expireStateAfterInactivityDuration = duration;
+            this.expireStateAfterInactivityUnit = unit;
+            return this;
+        }
+
+        /**
+         * Sets the context for log messages.
+         *
+         * @param newLogContext the log context
+         * @return this Builder
+         */
+        public Builder logContext(final String newLogContext) {
+            this.logContext = requireNonNull(newLogContext);
+            return this;
+        }
+
+        /**
+         * Builds a new MessageSlicer instance.
+         *
+         * @return a new MessageSlicer
+         */
+        public MessageSlicer build() {
+            return new MessageSlicer(this);
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/SliceOptions.java b/java/org/opendaylight/controller/cluster/messaging/SliceOptions.java
new file mode 100644 (file)
index 0000000..9ba69fb
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.io.Serializable;
+import java.util.function.Consumer;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+/**
+ * Options for slicing a message with {@link MessageSlicer#slice(SliceOptions)}.
+ *
+ * @author Thomas Pantelis
+ */
+public final class SliceOptions {
+    private final Builder builder;
+
+    private SliceOptions(final Builder builder) {
+        this.builder = builder;
+    }
+
+    public Identifier getIdentifier() {
+        return builder.identifier;
+    }
+
+    public FileBackedOutputStream getFileBackedStream() {
+        return builder.fileBackedStream;
+    }
+
+    public Serializable getMessage() {
+        return builder.message;
+    }
+
+    public ActorRef getSendToRef() {
+        return builder.sendToRef;
+    }
+
+    public ActorSelection getSendToSelection() {
+        return builder.sendToSelection;
+    }
+
+    public ActorRef getReplyTo() {
+        return builder.replyTo;
+    }
+
+    public Consumer<Throwable> getOnFailureCallback() {
+        return builder.onFailureCallback;
+    }
+
+    /**
+     * Returns a new Builder for creating MessageSlicer instances.
+     *
+     * @return a Builder instance
+     */
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    public static class Builder {
+        private Identifier identifier;
+        private FileBackedOutputStream fileBackedStream;
+        private Serializable message;
+        private ActorRef sendToRef;
+        private ActorSelection sendToSelection;
+        private ActorRef replyTo;
+        private Consumer<Throwable> onFailureCallback;
+        private boolean sealed;
+
+        /**
+         * Sets the identifier of the component to slice.
+         *
+         * @param newIdentifier the identifier
+         * @return this Builder
+         */
+        public Builder identifier(final Identifier newIdentifier) {
+            checkSealed();
+            identifier = newIdentifier;
+            return this;
+        }
+
+        /**
+         * Sets the {@link FileBackedOutputStream} containing the message data to slice.
+         *
+         * @param newFileBackedStream the {@link FileBackedOutputStream}
+         * @return this Builder
+         */
+        public Builder fileBackedOutputStream(final FileBackedOutputStream newFileBackedStream) {
+            checkSealed();
+            fileBackedStream = newFileBackedStream;
+            return this;
+        }
+
+        /**
+         * Sets the message to slice. The message is first serialized to a {@link FileBackedOutputStream}. If the
+         * message doesn't need to be sliced, ie its serialized size is less than the maximum message slice size, then
+         * the original message is sent. Otherwise the first message slice is sent.
+         *
+         * <p>
+         * <b>Note:</b> a {@link FileBackedOutputStreamFactory} must be set in the {@link MessageSlicer}.
+         *
+         * @param newMessage the message
+         * @param <T> the Serializable message type
+         * @return this Builder
+         */
+        public <T extends Serializable> Builder message(final T newMessage) {
+            checkSealed();
+            message = newMessage;
+            return this;
+        }
+
+        /**
+         * Sets the reference of the actor to which to send the message slices.
+         *
+         * @param sendTo the ActorRef
+         * @return this Builder
+         */
+        public Builder sendTo(final ActorRef sendTo) {
+            checkSealed();
+            sendToRef = sendTo;
+            return this;
+        }
+
+        /**
+         * Sets the ActorSelection to which to send the message slices.
+         *
+         * @param sendTo the ActorSelection
+         * @return this Builder
+         */
+        public Builder sendTo(final ActorSelection sendTo) {
+            checkSealed();
+            sendToSelection = sendTo;
+            return this;
+        }
+
+        /**
+         * Sets the reference of the actor to which message slice replies should be sent. The actor should
+         * forward the replies to the {@link MessageSlicer#handleMessage(Object)} method.
+         *
+         * @param newReplyTo the ActorRef
+         * @return this Builder
+         */
+        public Builder replyTo(final ActorRef newReplyTo) {
+            checkSealed();
+            replyTo = newReplyTo;
+            return this;
+        }
+
+        /**
+         * Sets the callback to be notified of failure.
+         *
+         * @param newOnFailureCallback the callback
+         * @return this Builder
+         */
+        public Builder onFailureCallback(final Consumer<Throwable> newOnFailureCallback) {
+            checkSealed();
+            onFailureCallback = newOnFailureCallback;
+            return this;
+        }
+
+        /**
+         * Builds a new SliceOptions instance.
+         *
+         * @return a new SliceOptions
+         */
+        public SliceOptions build() {
+            sealed = true;
+
+            requireNonNull(identifier, "identifier must be set");
+            requireNonNull(replyTo, "replyTo must be set");
+            requireNonNull(onFailureCallback, "onFailureCallback must be set");
+            checkState(fileBackedStream == null || message == null,
+                    "Only one of message and fileBackedStream can be set");
+            checkState(!(fileBackedStream == null && message == null),
+                    "One of message and fileBackedStream must be set");
+            checkState(sendToRef == null || sendToSelection == null,
+                    "Only one of sendToRef and sendToSelection can be set");
+            checkState(!(sendToRef == null && sendToSelection == null),
+                    "One of sendToRef and sendToSelection must be set");
+
+            return new SliceOptions(this);
+        }
+
+        protected void checkSealed() {
+            checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
+        }
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/messaging/SlicedMessageState.java b/java/org/opendaylight/controller/cluster/messaging/SlicedMessageState.java
new file mode 100644 (file)
index 0000000..5be3fa4
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.messaging;
+
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.function.Consumer;
+import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
+import org.opendaylight.yangtools.concepts.Identifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Maintains the state of a sliced message. This class is NOT thread-safe.
+ *
+ * @author Thomas Pantelis
+ * @see MessageSlicer
+ */
+public class SlicedMessageState<T> implements AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(SlicedMessageState.class);
+
+    // The index of the first slice that is sent.
+    static final int FIRST_SLICE_INDEX = 1;
+
+    // The initial hash code for a slice.
+    static final int INITIAL_SLICE_HASH_CODE = -1;
+
+    private final Identifier identifier;
+    private final int messageSliceSize;
+    private final FileBackedOutputStream fileBackedStream;
+    private final T replyTarget;
+    private final ByteSource messageBytes;
+    private final int totalSlices;
+    private final long totalMessageSize;
+    private final int maxRetries;
+    private final Consumer<Throwable> onFailureCallback;
+    private final String logContext;
+
+    private int currentByteOffset = 0;
+    private int currentSliceIndex = FIRST_SLICE_INDEX - 1;
+    private int lastSliceHashCode = INITIAL_SLICE_HASH_CODE;
+    private int currentSliceHashCode = INITIAL_SLICE_HASH_CODE;
+    private int tryCount = 1;
+    private InputStream messageInputStream;
+
+    /**
+     * Constructor.
+     *
+     * @param identifier the identifier for this instance
+     * @param fileBackedStream the FileBackedOutputStream containing the serialized data to slice
+     * @param messageSliceSize the maximum size (in bytes) for a message slice
+     * @param maxRetries the maximum number of retries
+     * @param replyTarget the user-defined target for sliced message replies
+     * @param onFailureCallback the callback to notify on failure
+     * @param logContext the context for log messages
+     * @throws IOException if an error occurs opening the input stream
+     */
+    public SlicedMessageState(final Identifier identifier, final FileBackedOutputStream fileBackedStream,
+            final int messageSliceSize, final int maxRetries, final T replyTarget,
+            final Consumer<Throwable> onFailureCallback, final String logContext) throws IOException {
+        this.identifier = identifier;
+        this.fileBackedStream = fileBackedStream;
+        this.messageSliceSize = messageSliceSize;
+        this.maxRetries = maxRetries;
+        this.replyTarget = replyTarget;
+        this.onFailureCallback = onFailureCallback;
+        this.logContext = logContext;
+
+        messageBytes = fileBackedStream.asByteSource();
+        totalMessageSize = messageBytes.size();
+        messageInputStream = messageBytes.openStream();
+
+        totalSlices = (int)(totalMessageSize / messageSliceSize + (totalMessageSize % messageSliceSize > 0 ? 1 : 0));
+
+        LOG.debug("{}: Message size: {} bytes, total slices to send: {}", logContext, totalMessageSize, totalSlices);
+    }
+
+    /**
+     * Returns the current slice index that has been sent.
+     *
+     * @return the current slice index that has been sent
+     */
+    public int getCurrentSliceIndex() {
+        return currentSliceIndex;
+    }
+
+    /**
+     * Returns the hash code of the last slice that was sent.
+     *
+     * @return the hash code of the last slice that was sent
+     */
+    public int getLastSliceHashCode() {
+        return lastSliceHashCode;
+    }
+
+    /**
+     * Returns the total number of slices to send.
+     *
+     * @return the total number of slices to send
+     */
+    public int getTotalSlices() {
+        return totalSlices;
+    }
+
+    /**
+     * Returns the identifier of this instance.
+     *
+     * @return the identifier
+     */
+    public Identifier getIdentifier() {
+        return identifier;
+    }
+
+    /**
+     * Returns the user-defined target for sliced message replies.
+     *
+     * @return the user-defined target
+     */
+    public T getReplyTarget() {
+        return replyTarget;
+    }
+
+    /**
+     *  Returns the callback to notify on failure.
+     *
+     * @return the callback to notify on failure
+     */
+    public Consumer<Throwable> getOnFailureCallback() {
+        return onFailureCallback;
+    }
+
+    /**
+     * Determines if the slicing can be retried.
+     *
+     * @return true if the slicing can be retried, false if the maximum number of retries has been reached
+     */
+    public boolean canRetry() {
+        return tryCount <= maxRetries;
+    }
+
+    /**
+     * Determines if the given index is the last slice to send.
+     *
+     * @param index the slice index to test
+     * @return true if the index is the last slice, false otherwise
+     */
+    public boolean isLastSlice(final int index) {
+        return totalSlices == index;
+    }
+
+    /**
+     * Reads and returns the next slice of data.
+     *
+     * @return the next slice of data as a byte[]
+     * @throws IOException if an error occurs reading the data
+     */
+    public byte[] getNextSlice() throws IOException {
+        currentSliceIndex++;
+        final int start;
+        if (currentSliceIndex == FIRST_SLICE_INDEX) {
+            start = 0;
+        } else {
+            start = incrementByteOffset();
+        }
+
+        final int size;
+        if (messageSliceSize > totalMessageSize) {
+            size = (int) totalMessageSize;
+        } else if (start + messageSliceSize > totalMessageSize) {
+            size = (int) (totalMessageSize - start);
+        } else {
+            size = messageSliceSize;
+        }
+
+        LOG.debug("{}: getNextSlice: total size: {}, offset: {}, size: {}, index: {}", logContext, totalMessageSize,
+                start, size, currentSliceIndex);
+
+        byte[] nextSlice = new byte[size];
+        int numRead = messageInputStream.read(nextSlice);
+        if (numRead != size) {
+            throw new IOException(String.format(
+                    "The # of bytes read from the input stream, %d, does not match the expected # %d", numRead, size));
+        }
+
+        lastSliceHashCode = currentSliceHashCode;
+        currentSliceHashCode = Arrays.hashCode(nextSlice);
+
+        return nextSlice;
+    }
+
+    /**
+     * Resets this instance to restart slicing from the beginning.
+     *
+     * @throws IOException if an error occurs resetting the input stream
+     */
+    public void reset() throws IOException {
+        closeStream();
+
+        tryCount++;
+        currentByteOffset = 0;
+        currentSliceIndex = FIRST_SLICE_INDEX - 1;
+        lastSliceHashCode = INITIAL_SLICE_HASH_CODE;
+        currentSliceHashCode = INITIAL_SLICE_HASH_CODE;
+
+        messageInputStream = messageBytes.openStream();
+    }
+
+    private int incrementByteOffset() {
+        currentByteOffset  += messageSliceSize;
+        return currentByteOffset;
+    }
+
+    private void closeStream() {
+        if (messageInputStream != null) {
+            try {
+                messageInputStream.close();
+            } catch (IOException e) {
+                LOG.warn("{}: Error closing message stream", logContext, e);
+            }
+
+            messageInputStream = null;
+        }
+    }
+
+    @Override
+    public void close() {
+        closeStream();
+        fileBackedStream.cleanup();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java b/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java
new file mode 100644 (file)
index 0000000..caa1a8d
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.notifications;
+
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * A local message initiated internally from the RaftActor when some state of a leader has changed.
+ *
+ * @author Thomas Pantelis
+ */
+public class LeaderStateChanged {
+    private final String memberId;
+    private final String leaderId;
+    private final short leaderPayloadVersion;
+
+    public LeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, short leaderPayloadVersion) {
+        this.memberId = requireNonNull(memberId);
+        this.leaderId = leaderId;
+        this.leaderPayloadVersion = leaderPayloadVersion;
+    }
+
+    public @NonNull String getMemberId() {
+        return memberId;
+    }
+
+    public @Nullable String getLeaderId() {
+        return leaderId;
+    }
+
+    public short getLeaderPayloadVersion() {
+        return leaderPayloadVersion;
+    }
+
+    @Override
+    public String toString() {
+        return "LeaderStateChanged [memberId=" + memberId
+                + ", leaderId=" + leaderId
+                + ", leaderPayloadVersion=" + leaderPayloadVersion + "]";
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListener.java b/java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListener.java
new file mode 100644 (file)
index 0000000..4928679
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.notifications;
+
+import java.io.Serializable;
+
+/**
+ * Message sent from the listener of Role Change messages to register itself to the Role Change Notifier.
+ * The Listener could be in a separate ActorSystem and hence this message needs to be Serializable.
+ */
+public class RegisterRoleChangeListener implements Serializable {
+    private static final long serialVersionUID = 8370459011119791506L;
+}
diff --git a/java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListenerReply.java b/java/org/opendaylight/controller/cluster/notifications/RegisterRoleChangeListenerReply.java
new file mode 100644 (file)
index 0000000..eea87ae
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.notifications;
+
+import java.io.Serializable;
+
+/**
+ * Reply message sent from a RoleChangeNotifier to the Role Change Listener.
+ * Can be sent to a separate actor system and hence should be made serializable.
+ */
+public class RegisterRoleChangeListenerReply implements Serializable {
+    private static final long serialVersionUID = -1972061601184451430L;
+}
diff --git a/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotification.java b/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotification.java
new file mode 100644 (file)
index 0000000..370ce1f
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.notifications;
+
+import java.io.Serializable;
+
+/**
+ * Notification message representing a Role change of a cluster member.
+ * Roles generally are Leader, Follower and Candidate. But can be based on the consensus strategy/implementation.
+ * The Listener could be in a separate ActorSystem and hence this message needs to be Serializable.
+ */
+public class RoleChangeNotification implements Serializable {
+    private static final long serialVersionUID = -2873869509490117116L;
+    private final String memberId;
+    private final String oldRole;
+    private final String newRole;
+
+    public RoleChangeNotification(String memberId, String oldRole, String newRole) {
+        this.memberId = memberId;
+        this.oldRole = oldRole;
+        this.newRole = newRole;
+    }
+
+    public String getMemberId() {
+        return memberId;
+    }
+
+    public String getOldRole() {
+        return oldRole;
+    }
+
+    public String getNewRole() {
+        return newRole;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java b/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java
new file mode 100644 (file)
index 0000000..bb4ad65
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.notifications;
+
+import akka.actor.ActorPath;
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.serialization.Serialization;
+import java.util.HashMap;
+import java.util.Map;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+
+/**
+ * The RoleChangeNotifier is responsible for receiving Raft role and leader state change messages and notifying
+ * the listeners (within the same node), which are registered with it.
+ *
+ * <p>
+ * The RoleChangeNotifier is instantiated by the Shard and injected into the RaftActor.
+ */
+public class RoleChangeNotifier extends AbstractUntypedActor implements AutoCloseable {
+
+    private final String memberId;
+    private final Map<ActorPath, ActorRef> registeredListeners = new HashMap<>();
+    private RoleChangeNotification latestRoleChangeNotification = null;
+    private LeaderStateChanged latestLeaderStateChanged;
+
+    public RoleChangeNotifier(final String memberId) {
+        this.memberId = memberId;
+    }
+
+    public static Props getProps(final String memberId) {
+        return Props.create(RoleChangeNotifier.class, memberId);
+    }
+
+    @Override
+    public void preStart() throws Exception {
+        super.preStart();
+        LOG.info("RoleChangeNotifier:{} created and ready for shard:{}",
+            Serialization.serializedActorPath(getSelf()), memberId);
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        if (message instanceof RegisterRoleChangeListener) {
+            // register listeners for this shard
+
+            ActorRef curRef = registeredListeners.get(getSender().path());
+            if (curRef != null) {
+                // ActorPaths would pass equal even if the unique id of the actors are different
+                // if a listener actor is re-registering after reincarnation, then removing the existing
+                // entry so the actor path with correct unique id is registered.
+                registeredListeners.remove(getSender().path());
+            }
+            registeredListeners.put(getSender().path(), getSender());
+
+            LOG.info("RoleChangeNotifier for {} , registered listener {}", memberId,
+                getSender().path().toString());
+
+            getSender().tell(new RegisterRoleChangeListenerReply(), getSelf());
+
+            if (latestLeaderStateChanged != null) {
+                getSender().tell(latestLeaderStateChanged, getSelf());
+            }
+
+            if (latestRoleChangeNotification != null) {
+                getSender().tell(latestRoleChangeNotification, getSelf());
+            }
+
+
+        } else if (message instanceof RoleChanged) {
+            // this message is sent by RaftActor. Notify registered listeners when this message is received.
+            RoleChanged roleChanged = (RoleChanged) message;
+
+            LOG.info("RoleChangeNotifier for {} , received role change from {} to {}", memberId,
+                roleChanged.getOldRole(), roleChanged.getNewRole());
+
+            latestRoleChangeNotification =
+                new RoleChangeNotification(roleChanged.getMemberId(),
+                    roleChanged.getOldRole(), roleChanged.getNewRole());
+
+            for (ActorRef listener: registeredListeners.values()) {
+                listener.tell(latestRoleChangeNotification, getSelf());
+            }
+        } else if (message instanceof LeaderStateChanged) {
+            latestLeaderStateChanged = (LeaderStateChanged)message;
+
+            for (ActorRef listener: registeredListeners.values()) {
+                listener.tell(latestLeaderStateChanged, getSelf());
+            }
+        } else {
+            unknownMessage(message);
+        }
+    }
+
+    @Override
+    public void close() {
+        registeredListeners.clear();
+    }
+}
+
diff --git a/java/org/opendaylight/controller/cluster/notifications/RoleChanged.java b/java/org/opendaylight/controller/cluster/notifications/RoleChanged.java
new file mode 100644 (file)
index 0000000..711025b
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.notifications;
+
+/**
+ * Role Change message initiated internally from the  Raft Actor when a the behavior/role changes.
+ * Since its internal , need not be serialized
+ */
+public class RoleChanged {
+    private final String memberId;
+    private final String oldRole;
+    private final String newRole;
+
+    public RoleChanged(String memberId, String oldRole, String newRole) {
+        this.memberId = memberId;
+        this.oldRole = oldRole;
+        this.newRole = newRole;
+    }
+
+    public String getMemberId() {
+        return memberId;
+    }
+
+    public String getOldRole() {
+        return oldRole;
+    }
+
+    public String getNewRole() {
+        return newRole;
+    }
+
+    @Override
+    public String toString() {
+        return "RoleChanged [memberId=" + memberId + ", oldRole=" + oldRole + ", newRole=" + newRole + "]";
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java b/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java
new file mode 100644 (file)
index 0000000..42c90b8
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.persistence;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import akka.actor.ExtendedActorSystem;
+import akka.dispatch.Futures;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.serialization.Snapshot;
+import akka.persistence.serialization.SnapshotSerializer;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import akka.serialization.JavaSerializer;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.io.ByteStreams;
+import com.typesafe.config.Config;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.Callable;
+import java.util.stream.Collector;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.ExecutionContext;
+import scala.concurrent.Future;
+
+/**
+ * Akka SnapshotStore implementation backed by the local file system. This class was patterned after akka's
+ * LocalSnapshotStore class and exists because akka's version serializes to a byte[] before persisting
+ * to the file which will fail if the data reaches or exceeds Integer.MAX_VALUE in size. This class avoids that issue
+ * by serializing the data directly to the file.
+ *
+ * @author Thomas Pantelis
+ */
+public class LocalSnapshotStore extends SnapshotStore {
+    private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class);
+
+    private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length();
+
+    private final ExecutionContext executionContext;
+    private final int maxLoadAttempts;
+    private final File snapshotDir;
+
+    public LocalSnapshotStore(final Config config) {
+        this.executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
+        snapshotDir = new File(config.getString("dir"));
+
+        int localMaxLoadAttempts = config.getInt("max-load-attempts");
+        maxLoadAttempts = localMaxLoadAttempts > 0 ? localMaxLoadAttempts : 1;
+
+        LOG.debug("LocalSnapshotStore ctor: snapshotDir: {}, maxLoadAttempts: {}", snapshotDir, maxLoadAttempts);
+    }
+
+    @Override
+    public void preStart() throws Exception {
+        if (!snapshotDir.isDirectory()) {
+            // Try to create the directory, on failure double check if someone else beat us to it.
+            if (!snapshotDir.mkdirs() && !snapshotDir.isDirectory()) {
+                throw new IOException("Failed to create snapshot directory " + snapshotDir.getCanonicalPath());
+            }
+        }
+
+        super.preStart();
+    }
+
+    @Override
+    public Future<Optional<SelectedSnapshot>> doLoadAsync(final String persistenceId,
+                                                          final SnapshotSelectionCriteria criteria) {
+        LOG.debug("In doLoadAsync - persistenceId: {}, criteria: {}", persistenceId, criteria);
+
+        // Select the youngest 'maxLoadAttempts' snapshots that match the criteria. This may help in situations where
+        // saving of a snapshot could not be completed because of a JVM crash. Hence, an attempt to load that snapshot
+        // will fail but loading an older snapshot may succeed.
+
+        Deque<SnapshotMetadata> metadatas = getSnapshotMetadatas(persistenceId, criteria).stream()
+                .sorted(LocalSnapshotStore::compare).collect(reverse()).stream().limit(maxLoadAttempts)
+                    .collect(Collectors.toCollection(ArrayDeque::new));
+
+        if (metadatas.isEmpty()) {
+            return Futures.successful(Optional.empty());
+        }
+
+        LOG.debug("doLoadAsync - found: {}", metadatas);
+
+        return Futures.future(() -> doLoad(metadatas), executionContext);
+    }
+
+    private Optional<SelectedSnapshot> doLoad(final Deque<SnapshotMetadata> metadatas) throws IOException {
+        SnapshotMetadata metadata = metadatas.removeFirst();
+        File file = toSnapshotFile(metadata);
+
+        LOG.debug("doLoad {}", file);
+
+        try {
+            Object data = deserialize(file);
+
+            LOG.debug("deserialized data: {}", data);
+
+            return Optional.of(new SelectedSnapshot(metadata, data));
+        } catch (IOException e) {
+            LOG.error("Error loading snapshot file {}, remaining attempts: {}", file, metadatas.size(), e);
+
+            if (metadatas.isEmpty()) {
+                throw e;
+            }
+
+            return doLoad(metadatas);
+        }
+    }
+
+    private Object deserialize(final File file) throws IOException {
+        return JavaSerializer.currentSystem().withValue((ExtendedActorSystem) context().system(),
+            (Callable<Object>) () -> {
+                try (ObjectInputStream in = new ObjectInputStream(new BufferedInputStream(new FileInputStream(file)))) {
+                    return in.readObject();
+                } catch (ClassNotFoundException e) {
+                    throw new IOException("Error loading snapshot file " + file, e);
+                } catch (IOException e) {
+                    LOG.debug("Error loading snapshot file {}", file, e);
+                    return tryDeserializeAkkaSnapshot(file);
+                }
+            });
+    }
+
+    private Object tryDeserializeAkkaSnapshot(final File file) throws IOException {
+        LOG.debug("tryDeserializeAkkaSnapshot {}", file);
+
+        // The snapshot was probably previously stored via akka's LocalSnapshotStore which wraps the data
+        // in a Snapshot instance and uses the SnapshotSerializer to serialize it to a byte[]. So we'll use
+        // the SnapshotSerializer to try to de-serialize it.
+
+        SnapshotSerializer snapshotSerializer = new SnapshotSerializer((ExtendedActorSystem) context().system());
+
+        try (InputStream in = new BufferedInputStream(new FileInputStream(file))) {
+            return ((Snapshot)snapshotSerializer.fromBinary(ByteStreams.toByteArray(in))).data();
+        }
+    }
+
+    @Override
+    public Future<Void> doSaveAsync(final SnapshotMetadata metadata, final Object snapshot) {
+        LOG.debug("In doSaveAsync - metadata: {}, snapshot: {}", metadata, snapshot);
+
+        return Futures.future(() -> doSave(metadata, snapshot), executionContext);
+    }
+
+    private Void doSave(final SnapshotMetadata metadata, final Object snapshot) throws IOException {
+        final File actual = toSnapshotFile(metadata);
+        final File temp = File.createTempFile(actual.getName(), null, snapshotDir);
+
+        LOG.debug("Saving to temp file: {}", temp);
+
+        try (ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(temp)))) {
+            out.writeObject(snapshot);
+        } catch (IOException e) {
+            LOG.error("Error saving snapshot file {}. Deleting file..", temp, e);
+            if (!temp.delete()) {
+                LOG.error("Failed to successfully delete file {}", temp);
+            }
+            throw e;
+        }
+
+        LOG.debug("Renaming to: {}", actual);
+        try {
+            Files.move(temp.toPath(), actual.toPath(), StandardCopyOption.ATOMIC_MOVE);
+        } catch (IOException e) {
+            LOG.warn("Failed to move {} to {}. Deleting {}..", temp, actual, temp, e);
+            if (!temp.delete()) {
+                LOG.error("Failed to successfully delete file {}", temp);
+            }
+            throw e;
+        }
+
+        return null;
+    }
+
+    @Override
+    public Future<Void> doDeleteAsync(final SnapshotMetadata metadata) {
+        LOG.debug("In doDeleteAsync - metadata: {}", metadata);
+
+        // Multiple snapshot files here mean that there were multiple snapshots for this seqNr - we delete all of them.
+        // Usually snapshot-stores would keep one snapshot per sequenceNr however here in the file-based one we
+        // timestamp snapshots and allow multiple to be kept around (for the same seqNr) if desired.
+
+        return Futures.future(() -> doDelete(metadata), executionContext);
+    }
+
+    @Override
+    public Future<Void> doDeleteAsync(final String persistenceId, final SnapshotSelectionCriteria criteria) {
+        LOG.debug("In doDeleteAsync - persistenceId: {}, criteria: {}", persistenceId, criteria);
+
+        return Futures.future(() -> doDelete(persistenceId, criteria), executionContext);
+    }
+
+    private Void doDelete(final String persistenceId, final SnapshotSelectionCriteria criteria) {
+        final List<File> files = getSnapshotMetadatas(persistenceId, criteria).stream()
+                .flatMap(md -> Stream.of(toSnapshotFile(md))).collect(Collectors.toList());
+
+        LOG.debug("Deleting files: {}", files);
+
+        files.forEach(file -> {
+            try {
+                Files.delete(file.toPath());
+            } catch (IOException | SecurityException e) {
+                LOG.error("Unable to delete snapshot file: {}, persistenceId: {} ", file, persistenceId);
+            }
+        });
+        return null;
+    }
+
+    private Void doDelete(final SnapshotMetadata metadata) {
+        final Collection<File> files = getSnapshotFiles(metadata);
+
+        LOG.debug("Deleting files: {}", files);
+
+        files.forEach(file -> {
+            try {
+                Files.delete(file.toPath());
+            } catch (IOException | SecurityException e) {
+                LOG.error("Unable to delete snapshot file: {}", file);
+            }
+        });
+        return null;
+    }
+
+    private Collection<File> getSnapshotFiles(final String persistenceId) {
+        String encodedPersistenceId = encode(persistenceId);
+
+        File[] files = snapshotDir.listFiles((dir, name) -> {
+            int persistenceIdEndIndex = name.lastIndexOf('-', name.lastIndexOf('-') - 1);
+            return PERSISTENCE_ID_START_INDEX + encodedPersistenceId.length() == persistenceIdEndIndex
+                    && name.startsWith(encodedPersistenceId, PERSISTENCE_ID_START_INDEX) && !name.endsWith(".tmp");
+        });
+
+        if (files == null) {
+            return Collections.emptyList();
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("getSnapshotFiles for persistenceId: {}, found files: {}", encodedPersistenceId,
+                    Arrays.toString(files));
+        }
+
+        return Arrays.asList(files);
+    }
+
+    private Collection<File> getSnapshotFiles(final SnapshotMetadata metadata) {
+        return getSnapshotFiles(metadata.persistenceId()).stream().filter(file -> {
+            SnapshotMetadata possible = extractMetadata(file);
+            return possible != null && possible.sequenceNr() == metadata.sequenceNr()
+                    && (metadata.timestamp() == 0L || possible.timestamp() == metadata.timestamp());
+        }).collect(Collectors.toList());
+    }
+
+    private Collection<SnapshotMetadata> getSnapshotMetadatas(final String persistenceId,
+            final SnapshotSelectionCriteria criteria) {
+        return getSnapshotFiles(persistenceId).stream().flatMap(file -> toStream(extractMetadata(file)))
+                .filter(criteria::matches).collect(Collectors.toList());
+    }
+
+    private static Stream<SnapshotMetadata> toStream(final @Nullable SnapshotMetadata md) {
+        return md != null ? Stream.of(md) : Stream.empty();
+    }
+
+    private static @Nullable SnapshotMetadata extractMetadata(final File file) {
+        String name = file.getName();
+        int sequenceNumberEndIndex = name.lastIndexOf('-');
+        int persistenceIdEndIndex = name.lastIndexOf('-', sequenceNumberEndIndex - 1);
+        if (PERSISTENCE_ID_START_INDEX >= persistenceIdEndIndex) {
+            return null;
+        }
+
+        try {
+            // Since the persistenceId is url encoded in the filename, we need
+            // to decode relevant filename's part to obtain persistenceId back
+            String persistenceId = decode(name.substring(PERSISTENCE_ID_START_INDEX, persistenceIdEndIndex));
+            long sequenceNumber = Long.parseLong(name.substring(persistenceIdEndIndex + 1, sequenceNumberEndIndex));
+            long timestamp = Long.parseLong(name.substring(sequenceNumberEndIndex + 1));
+            return new SnapshotMetadata(persistenceId, sequenceNumber, timestamp);
+        } catch (NumberFormatException e) {
+            return null;
+        }
+    }
+
+    private File toSnapshotFile(final SnapshotMetadata metadata) {
+        return new File(snapshotDir, String.format("snapshot-%s-%d-%d", encode(metadata.persistenceId()),
+            metadata.sequenceNr(), metadata.timestamp()));
+    }
+
+    private static <T> Collector<T, ?, List<T>> reverse() {
+        return Collectors.collectingAndThen(Collectors.toList(), list -> {
+            Collections.reverse(list);
+            return list;
+        });
+    }
+
+    private static String encode(final String str) {
+        try {
+            return URLEncoder.encode(str, StandardCharsets.UTF_8.name());
+        } catch (UnsupportedEncodingException e) {
+            // Shouldn't happen
+            LOG.warn("Error encoding {}", str, e);
+            return str;
+        }
+    }
+
+    private static String decode(final String str) {
+        try {
+            return URLDecoder.decode(str, StandardCharsets.UTF_8.name());
+        } catch (final UnsupportedEncodingException e) {
+            // Shouldn't happen
+            LOG.warn("Error decoding {}", str, e);
+            return str;
+        }
+    }
+
+    @VisibleForTesting
+    static int compare(final SnapshotMetadata m1, final SnapshotMetadata m2) {
+        checkArgument(m1.persistenceId().equals(m2.persistenceId()),
+                "Persistence id does not match. id1: %s, id2: %s", m1.persistenceId(), m2.persistenceId());
+        final int cmp = Long.compare(m1.timestamp(), m2.timestamp());
+        return cmp != 0 ? cmp : Long.compare(m1.sequenceNr(), m2.sequenceNr());
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java b/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java
new file mode 100644 (file)
index 0000000..b970ba4
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+
+/**
+ * An instance of a Payload class is meant to be used as the Payload for
+ * AppendEntries.
+ *
+ * <p>
+ * When an actor which is derived from RaftActor attempts to persistData it
+ * must pass an instance of the Payload class. Similarly when state needs to
+ * be applied to the derived RaftActor it will be passed an instance of the
+ * Payload class.
+ */
+public abstract class Payload {
+    public abstract int size();
+}
diff --git a/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java b/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java
new file mode 100644 (file)
index 0000000..828ed2c
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+
+/**
+ * This is a tagging interface for a Payload implementation that needs to always be persisted regardless of
+ * whether or not the component is configured to be persistent.
+ *
+ * @author Thomas Pantelis
+ */
+public interface PersistentPayload {
+}
diff --git a/java/org/opendaylight/controller/cluster/reporting/MetricsReporter.java b/java/org/opendaylight/controller/cluster/reporting/MetricsReporter.java
new file mode 100644 (file)
index 0000000..a488342
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.reporting;
+
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.jmx.JmxReporter;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+/**
+ * Maintains metrics registry that is provided to reporters.
+ * At the moment only one reporter exists {@code JmxReporter}.
+ * More reporters can be added.
+ *
+ * <p>
+ * The consumers of this class will only be interested in {@code MetricsRegistry}
+ * where metrics for that consumer gets stored.
+ */
+public class MetricsReporter implements AutoCloseable {
+
+    private static final LoadingCache<String, MetricsReporter> METRIC_REPORTERS = CacheBuilder.newBuilder().build(
+        new CacheLoader<String, MetricsReporter>() {
+            @Override
+            public MetricsReporter load(final String domainName) {
+                return new MetricsReporter(domainName);
+            }
+        });
+
+    private final String domainName;
+    private final JmxReporter jmxReporter;
+    private final MetricRegistry metricRegistry = new MetricRegistry();
+
+    MetricsReporter(final String domainName) {
+        this.domainName = domainName;
+        jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(domainName).build();
+        jmxReporter.start();
+    }
+
+    public static MetricsReporter getInstance(final String domainName) {
+        return METRIC_REPORTERS.getUnchecked(domainName);
+    }
+
+    public MetricRegistry getMetricsRegistry() {
+        return metricRegistry;
+    }
+
+    @Override
+    public void close() {
+        jmxReporter.close();
+
+        METRIC_REPORTERS.invalidate(domainName);
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java b/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java
new file mode 100644 (file)
index 0000000..03b44b5
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.schema.provider;
+
+import com.google.common.annotations.Beta;
+import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.schema.provider.impl.YangTextSchemaSourceSerializationProxy;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import scala.concurrent.Future;
+
+/**
+ * A remote yang text source provider provides serializable yang text sources.
+ */
+@Beta
+public interface RemoteYangTextSourceProvider {
+
+    Future<Set<SourceIdentifier>> getProvidedSources();
+
+    Future<YangTextSchemaSourceSerializationProxy> getYangTextSchemaSource(@NonNull SourceIdentifier identifier);
+}
diff --git a/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java b/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java
new file mode 100644 (file)
index 0000000..88e5842
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.schema.provider.impl;
+
+import akka.dispatch.OnComplete;
+import com.google.common.annotations.Beta;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.ExecutionContext;
+import scala.concurrent.Future;
+
+/**
+ * Provides schema sources from {@link RemoteYangTextSourceProvider}.
+ */
+@Beta
+public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSchemaSource> {
+    private static final Logger LOG = LoggerFactory.getLogger(RemoteSchemaProvider.class);
+
+    private final RemoteYangTextSourceProvider remoteRepo;
+    private final ExecutionContext executionContext;
+
+    public RemoteSchemaProvider(final RemoteYangTextSourceProvider remoteRepo,
+            final ExecutionContext executionContext) {
+        this.remoteRepo = remoteRepo;
+        this.executionContext = executionContext;
+    }
+
+    @Override
+    public ListenableFuture<YangTextSchemaSource> getSource(final SourceIdentifier sourceIdentifier) {
+        LOG.trace("Getting yang schema source for {}", sourceIdentifier.getName());
+
+        Future<YangTextSchemaSourceSerializationProxy> result = remoteRepo.getYangTextSchemaSource(sourceIdentifier);
+
+        final SettableFuture<YangTextSchemaSource> res = SettableFuture.create();
+        result.onComplete(new OnComplete<YangTextSchemaSourceSerializationProxy>() {
+            @Override
+            public void onComplete(final Throwable throwable,
+                    final YangTextSchemaSourceSerializationProxy yangTextSchemaSourceSerializationProxy) {
+                if (yangTextSchemaSourceSerializationProxy != null) {
+                    res.set(yangTextSchemaSourceSerializationProxy.getRepresentation());
+                }
+                if (throwable != null) {
+                    res.setException(throwable);
+                }
+            }
+        }, executionContext);
+
+        return res;
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java b/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java
new file mode 100644 (file)
index 0000000..5e88952
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.schema.provider.impl;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.io.IOException;
+import java.util.Set;
+import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.Promise;
+
+/**
+ *  Remote schema provider implementation backed by local schema provider.
+ */
+@Beta
+public class RemoteYangTextSourceProviderImpl implements RemoteYangTextSourceProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(RemoteYangTextSourceProviderImpl.class);
+
+    private final SchemaRepository repository;
+    private final Set<SourceIdentifier> providedSources;
+
+    public RemoteYangTextSourceProviderImpl(final SchemaRepository repository,
+            final Set<SourceIdentifier> providedSources) {
+        this.repository = requireNonNull(repository);
+        this.providedSources = providedSources;
+    }
+
+    @Override
+    public Future<Set<SourceIdentifier>> getProvidedSources() {
+        return akka.dispatch.Futures.successful(providedSources);
+    }
+
+    @Override
+    public Future<YangTextSchemaSourceSerializationProxy> getYangTextSchemaSource(final SourceIdentifier identifier) {
+        LOG.trace("Sending yang schema source for {}", identifier);
+
+        final Promise<YangTextSchemaSourceSerializationProxy> promise = akka.dispatch.Futures.promise();
+        ListenableFuture<YangTextSchemaSource> future =
+                repository.getSchemaSource(identifier, YangTextSchemaSource.class);
+
+        Futures.addCallback(future, new FutureCallback<YangTextSchemaSource>() {
+            @Override
+            public void onSuccess(final YangTextSchemaSource result) {
+                try {
+                    promise.success(new YangTextSchemaSourceSerializationProxy(result));
+                } catch (IOException e) {
+                    LOG.warn("Unable to read schema source for {}", result.getIdentifier(), e);
+                    promise.failure(e);
+                }
+            }
+
+            @Override
+            public void onFailure(final Throwable failure) {
+                LOG.warn("Unable to retrieve schema source from provider", failure);
+                promise.failure(failure);
+            }
+        }, MoreExecutors.directExecutor());
+
+        return promise.future();
+    }
+}
diff --git a/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java b/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java
new file mode 100644 (file)
index 0000000..202de58
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.schema.provider.impl;
+
+import com.google.common.annotations.Beta;
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.Serializable;
+import org.opendaylight.yangtools.yang.common.Revision;
+import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+
+/**
+ * {@link org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource} serialization proxy.
+ */
+@Beta
+public class YangTextSchemaSourceSerializationProxy implements Serializable {
+    private static final long serialVersionUID = -6361268518176019477L;
+
+    private final byte[] schemaSource;
+    private final Revision revision;
+    private final String name;
+
+    public YangTextSchemaSourceSerializationProxy(final YangTextSchemaSource source) throws IOException {
+        this.revision = source.getIdentifier().getRevision().orElse(null);
+        this.name = source.getIdentifier().getName();
+        this.schemaSource = source.read();
+    }
+
+    public YangTextSchemaSource getRepresentation() {
+        return YangTextSchemaSource.delegateForByteSource(
+                RevisionSourceIdentifier.create(name, revision), ByteSource.wrap(schemaSource));
+    }
+}