Merge changes from topic 'netconf-clustering'
authorTony Tkacik <ttkacik@cisco.com>
Mon, 4 Jan 2016 13:25:10 +0000 (13:25 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Mon, 4 Jan 2016 13:25:10 +0000 (13:25 +0000)
* changes:
  Move mount point registration after schema resolution
  Cluster schema resolution pipeline
  Clustered sources resolution
  Change onNodeUpdated to first cleanup previous state
  Use normal identify messages first
  Use lock in topology node writer
  Prevent NPE's on failures
  RemoteDeviceDataBroker proxy

44 files changed:
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/InitialStateProvider.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/NodeManager.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/NodeManagerCallback.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/RoleChangeStrategy.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/TopologyManagerCallback.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/example/ExampleNodeManagerCallback.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/example/ExampleTopologyManagerCallback.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/BaseNodeManager.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/BaseTopologyManager.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/NodeRoleChangeStrategy.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/NoopRoleChangeStrategy.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/TopologyRoleChangeStrategy.java
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPoint.java [new file with mode: 0644]
opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPointDown.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/pom.xml
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/AbstractNetconfTopology.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/NetconfTopology.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/ClusteredNetconfTopology.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/NetconfNodeManagerCallback.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/NetconfNodeOperationalDataAggregator.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/NetconfTopologyImpl.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/NetconfTopologyManagerCallback.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/impl/TopologyNodeWriter.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolver.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolverImpl.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDevice.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDeviceCommunicator.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProvider.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderImpl.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderOnSameNodeException.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceMasterDataBroker.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceSlaveDataBroker.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ProxyNetconfDeviceDataBroker.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/TopologyMountPointFacade.java
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceClusteredDeviceSourcesResolverUp.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterOnSameNodeUp.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterSourceProviderUp.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/NetconfDeviceDataBrokerProxy.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyReadOnlyTransaction.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyWriteOnlyTransaction.java [new file with mode: 0644]
opendaylight/netconf/netconf-topology/src/test/java/org/opendaylight/netconf/topology/ActorTest.java
opendaylight/netconf/netconf-topology/src/test/java/org/opendaylight/netconf/topology/TestingTopologyDispatcher.java
opendaylight/netconf/sal-netconf-connector/src/main/java/org/opendaylight/netconf/sal/connect/netconf/NetconfDevice.java
opendaylight/netconf/sal-netconf-connector/src/main/java/org/opendaylight/netconf/sal/connect/netconf/listener/NetconfDeviceCommunicator.java

index ed3623da49fc4e09f21260d70c85a5dc674c6fa1..ae53a9dc797a25a11d0c7e8f19872f7637a1c290 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.netconf.topology;
 
 import com.google.common.annotations.Beta;
 import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
 
@@ -22,5 +23,5 @@ public interface InitialStateProvider {
     Node getInitialState(@Nonnull final NodeId nodeId, @Nonnull final Node configNode);
 
     @Nonnull
-    Node getFailedState(@Nonnull final NodeId nodeId, @Nonnull final Node configNode);
+    Node getFailedState(@Nonnull final NodeId nodeId, @Nullable final Node configNode);
 }
index b4b26a859a48adf21dfdafe006e158f00749e406..c8ca14d252a30038e470f82f03502cb5b618a8fe 100644 (file)
@@ -10,11 +10,13 @@ package org.opendaylight.netconf.topology;
 
 import akka.actor.TypedActor.Receiver;
 import com.google.common.annotations.Beta;
+import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 
 /**
  * Node manager that handles communication between node managers and delegates calls to the customizable NodeManagerCallback
  */
 @Beta
-public interface NodeManager extends InitialStateProvider, NodeListener, Receiver, RemoteNodeListener {
+public interface NodeManager extends InitialStateProvider, NodeListener, Receiver, RemoteNodeListener, RemoteDeviceHandler<NetconfSessionPreferences> {
 
 }
index fd0444aa5c207639c2fb6f6390eba69f83155858..670bb81ebe1d2ac4fbb487367af04938e2eddfb4 100644 (file)
@@ -11,12 +11,14 @@ package org.opendaylight.netconf.topology;
 import akka.actor.ActorSystem;
 import akka.actor.TypedActor.Receiver;
 import com.google.common.annotations.Beta;
+import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 
 /**
  * Customizable layer that handles communication with your application.
  */
 @Beta
-public interface NodeManagerCallback extends InitialStateProvider, NodeListener, Receiver {
+public interface NodeManagerCallback extends InitialStateProvider, NodeListener, Receiver, RemoteDeviceHandler<NetconfSessionPreferences> {
 
     interface NodeManagerCallbackFactory<M> {
         NodeManagerCallback create(String nodeId, String topologyId, ActorSystem actorSystem);
index 39cccf653c6ee54a280a0aa4aebe49c4515d7676..f4f3013b7f9be842b14e03521c8ba30e9ad33879 100644 (file)
@@ -30,4 +30,10 @@ public interface RoleChangeStrategy extends RoleChangeListener {
      */
     void unregisterRoleCandidate();
 
+    /**
+     *
+     * @return True/False based on if this candidate is already registered into ownership service
+     */
+    boolean isCandidateRegistered();
+
 }
index ea7d05d98fcce15146dc9b305a92720b00eaab49..8eee06a54ed642af28fa4f3fafff772db45c27f7 100644 (file)
@@ -16,7 +16,7 @@ import com.google.common.annotations.Beta;
  * Customizable extension layer between the top level TopologyManager and NodeManager
  */
 @Beta
-public interface TopologyManagerCallback extends NodeListener, Receiver, RoleChangeListener {
+public interface TopologyManagerCallback extends InitialStateProvider, NodeListener, Receiver, RoleChangeListener {
 
     interface TopologyManagerCallbackFactory {
         TopologyManagerCallback create(ActorSystem actorSystem, String topologyId);
index 05b69fc79a40d6049072f08c2686057eb58e7922..823c44b47aa552bf8420d1eff71f13e106a00e60 100644 (file)
@@ -12,6 +12,9 @@ import akka.actor.ActorRef;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 import org.opendaylight.netconf.topology.NodeManagerCallback;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
@@ -19,6 +22,7 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev15
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 public class ExampleNodeManagerCallback implements NodeManagerCallback {
 
@@ -70,4 +74,29 @@ public class ExampleNodeManagerCallback implements NodeManagerCallback {
     public void onRoleChanged(RoleChangeDTO roleChangeDTO) {
 
     }
+
+    @Override
+    public void onDeviceConnected(SchemaContext remoteSchemaContext, NetconfSessionPreferences netconfSessionPreferences, DOMRpcService deviceRpc) {
+
+    }
+
+    @Override
+    public void onDeviceDisconnected() {
+
+    }
+
+    @Override
+    public void onDeviceFailed(Throwable throwable) {
+
+    }
+
+    @Override
+    public void onNotification(DOMNotification domNotification) {
+
+    }
+
+    @Override
+    public void close() {
+
+    }
 }
index 76e55acf4ad45fc7e6f85efc22ae7a8d04b66dc2..a2eed6a01954cb3988ad6ff487e8f51c476101a6 100644 (file)
@@ -17,6 +17,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import java.util.HashMap;
 import java.util.Map;
 import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.netconf.topology.NodeManager;
 import org.opendaylight.netconf.topology.NodeManagerCallback.NodeManagerCallbackFactory;
@@ -147,4 +148,16 @@ public class ExampleTopologyManagerCallback implements TopologyManagerCallback {
     public void onReceive(Object o, ActorRef actorRef) {
 
     }
+
+    @Nonnull
+    @Override
+    public Node getInitialState(@Nonnull NodeId nodeId, @Nonnull Node configNode) {
+        return nodes.get(nodeId).getInitialState(nodeId, configNode);
+    }
+
+    @Nonnull
+    @Override
+    public Node getFailedState(@Nonnull NodeId nodeId, @Nullable Node configNode) {
+        return nodes.get(nodeId).getFailedState(nodeId, configNode);
+    }
 }
index 03151505f72c3aa6aa1ab01af487dd618e9f9c24..2f8b1b7f6220cc28cfbff9064f39caa30b281f44 100644 (file)
@@ -17,6 +17,9 @@ import akka.japi.Creator;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListenableFuture;
 import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 import org.opendaylight.netconf.topology.NodeManager;
 import org.opendaylight.netconf.topology.NodeManagerCallback;
 import org.opendaylight.netconf.topology.NodeManagerCallback.NodeManagerCallbackFactory;
@@ -24,6 +27,7 @@ import org.opendaylight.netconf.topology.RoleChangeStrategy;
 import org.opendaylight.netconf.topology.util.messages.NormalizedNodeMessage;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -36,7 +40,7 @@ public final class BaseNodeManager implements NodeManager {
     private final String topologyId;
     private final ActorSystem actorSystem;
 
-    private boolean isMaster;
+    private boolean isMaster = false;
     private NodeManagerCallback delegate;
 
     private BaseNodeManager(final String nodeId,
@@ -99,7 +103,7 @@ public final class BaseNodeManager implements NodeManager {
 
     @Override
     public void onReceive(Object o, ActorRef actorRef) {
-
+        delegate.onReceive(o, actorRef);
     }
 
     @Override
@@ -122,6 +126,31 @@ public final class BaseNodeManager implements NodeManager {
         return null;
     }
 
+    @Override
+    public void onDeviceConnected(SchemaContext remoteSchemaContext, NetconfSessionPreferences netconfSessionPreferences, DOMRpcService deviceRpc) {
+        delegate.onDeviceConnected(remoteSchemaContext, netconfSessionPreferences, deviceRpc);
+    }
+
+    @Override
+    public void onDeviceDisconnected() {
+        delegate.onDeviceDisconnected();
+    }
+
+    @Override
+    public void onDeviceFailed(Throwable throwable) {
+        delegate.onDeviceFailed(throwable);
+    }
+
+    @Override
+    public void onNotification(DOMNotification domNotification) {
+        delegate.onNotification(domNotification);
+    }
+
+    @Override
+    public void close() {
+        // NOOP
+    }
+
     /**
      * Builder of BaseNodeManager instances that are proxied as TypedActors
      */
index ca55bb0552ee5e5bf4d7ce41e56652f023ee5fec..682555ef931145313f471e551c1ad6757249aa9c 100644 (file)
@@ -8,9 +8,12 @@
 
 package org.opendaylight.netconf.topology.util;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorIdentity;
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Address;
+import akka.actor.Identify;
 import akka.actor.TypedActor;
 import akka.actor.TypedActorExtension;
 import akka.actor.TypedProps;
@@ -24,18 +27,25 @@ import akka.cluster.ClusterEvent.ReachableMember;
 import akka.cluster.ClusterEvent.UnreachableMember;
 import akka.cluster.Member;
 import akka.dispatch.OnComplete;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.netconf.topology.NodeManager;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.netconf.topology.RoleChangeStrategy;
 import org.opendaylight.netconf.topology.StateAggregator;
 import org.opendaylight.netconf.topology.TopologyManager;
@@ -53,17 +63,22 @@ import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.
 import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
 import scala.concurrent.impl.Promise.DefaultPromise;
 
 public final class BaseTopologyManager
         implements TopologyManager {
 
     private static final Logger LOG = LoggerFactory.getLogger(BaseTopologyManager.class);
+    private static final InstanceIdentifier<NetworkTopology> NETWORK_TOPOLOGY_PATH = InstanceIdentifier.builder(NetworkTopology.class).build();
+
+    private final KeyedInstanceIdentifier<Topology, TopologyKey> topologyListPath;
 
     private final ActorSystem system;
     private final TypedActorExtension typedExtension;
@@ -80,8 +95,8 @@ public final class BaseTopologyManager
     private final NodeWriter naSalNodeWriter;
     private final String topologyId;
     private final TopologyManagerCallback delegateTopologyHandler;
+    private final Set<NodeId> created = new HashSet<>();
 
-    private final Map<NodeId, NodeManager> nodes = new HashMap<>();
     private final Map<Address, TopologyManager> peers = new HashMap<>();
     private TopologyManager masterPeer = null;
     private final int id = new Random().nextInt();
@@ -123,6 +138,8 @@ public final class BaseTopologyManager
         // election has not yet happened
         this.isMaster = isMaster;
 
+        this.topologyListPath = NETWORK_TOPOLOGY_PATH.child(Topology.class, new TopologyKey(new TopologyId(topologyId)));
+
         LOG.debug("Base manager started ", +id);
     }
 
@@ -146,6 +163,11 @@ public final class BaseTopologyManager
     public ListenableFuture<Node> onNodeCreated(final NodeId nodeId, final Node node) {
         LOG.debug("TopologyManager({}) onNodeCreated received, nodeid: {} , isMaster: {}", id, nodeId.getValue(), isMaster);
 
+        if (created.contains(nodeId)) {
+            LOG.warn("Node{} already exists, triggering update..", nodeId);
+            return onNodeUpdated(nodeId, node);
+        }
+        created.add(nodeId);
         final ArrayList<ListenableFuture<Node>> futures = new ArrayList<>();
 
         if (isMaster) {
@@ -191,7 +213,7 @@ public final class BaseTopologyManager
                 public void onFailure(final Throwable t) {
                     // If the combined connection attempt failed, set the node to connection failed
                     LOG.debug("Futures aggregation failed");
-                    naSalNodeWriter.update(nodeId, nodes.get(nodeId).getFailedState(nodeId, node));
+                    naSalNodeWriter.update(nodeId, delegateTopologyHandler.getFailedState(nodeId, node));
                     // FIXME disconnect those which succeeded
                     // just issue a delete on delegateTopologyHandler that gets handled on lower level
                 }
@@ -209,55 +231,37 @@ public final class BaseTopologyManager
     public ListenableFuture<Node> onNodeUpdated(final NodeId nodeId, final Node node) {
         LOG.debug("TopologyManager({}) onNodeUpdated received, nodeid: {}", id, nodeId.getValue());
 
-        final ArrayList<ListenableFuture<Node>> futures = new ArrayList<>();
-
         // Master needs to trigger onNodeUpdated on peers and combine results
         if (isMaster) {
-            futures.add(delegateTopologyHandler.onNodeUpdated(nodeId, node));
-            for (TopologyManager topologyManager : peers.values()) {
-                // convert binding into NormalizedNode for transfer
-                final Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> normalizedNodeEntry = codecRegistry.toNormalizedNode(getNodeIid(topologyId), node);
-
-                // add a future into our futures that gets its completion status from the converted scala future
-                final SettableFuture<Node> settableFuture = SettableFuture.create();
-                futures.add(settableFuture);
-                final Future<NormalizedNodeMessage> scalaFuture = topologyManager.onRemoteNodeUpdated(new NormalizedNodeMessage(normalizedNodeEntry.getKey(), normalizedNodeEntry.getValue()));
-                scalaFuture.onComplete(new OnComplete<NormalizedNodeMessage>() {
-                    @Override
-                    public void onComplete(Throwable failure, NormalizedNodeMessage success) throws Throwable {
-                        if (failure != null) {
-                            settableFuture.setException(failure);
-                            return;
+            // first cleanup old node
+            final ListenableFuture<Void> deleteFuture = onNodeDeleted(nodeId);
+            final SettableFuture<Node> createFuture = SettableFuture.create();
+            final TopologyManager selfProxy = TypedActor.self();
+            final ActorContext context = TypedActor.context();
+            Futures.addCallback(deleteFuture, new FutureCallback<Void>() {
+                @Override
+                public void onSuccess(Void result) {
+                    LOG.warn("Delete part of update succesfull, triggering create");
+                    // trigger create on all nodes
+                    Futures.addCallback(selfProxy.onNodeCreated(nodeId, node), new FutureCallback<Node>() {
+                        @Override
+                        public void onSuccess(Node result) {
+                            createFuture.set(result);
                         }
-                        final Entry<InstanceIdentifier<?>, DataObject> fromNormalizedNode =
-                                codecRegistry.fromNormalizedNode(success.getIdentifier(), success.getNode());
-                        final Node value = (Node) fromNormalizedNode.getValue();
-
-                        settableFuture.set(value);
-                    }
-                }, TypedActor.context().dispatcher());
-            }
 
-            final ListenableFuture<Node> aggregatedFuture = aggregator.combineUpdateAttempts(futures);
-            Futures.addCallback(aggregatedFuture, new FutureCallback<Node>() {
-                @Override
-                public void onSuccess(final Node result) {
-                    // FIXME make this (writing state data for nodes) optional and customizable
-                    // this should be possible with providing your own NodeWriter implementation, maybe rename this interface?
-                    naSalNodeWriter.update(nodeId, result);
+                        @Override
+                        public void onFailure(Throwable t) {
+                            createFuture.setException(t);
+                        }
+                    }, context.dispatcher());
                 }
 
                 @Override
-                public void onFailure(final Throwable t) {
-                    // If the combined connection attempt failed, set the node to connection failed
-                    naSalNodeWriter.update(nodeId, nodes.get(nodeId).getFailedState(nodeId, node));
-                    // FIXME disconnect those which succeeded
-                    // just issue a delete on delegateTopologyHandler that gets handled on lower level
+                public void onFailure(Throwable t) {
+                    LOG.warn("Delete part of update failed, {}", t);
                 }
-            });
-
-            //combine peer futures
-            return aggregatedFuture;
+            }, context.dispatcher());
+            return createFuture;
         }
 
         // Trigger update on this slave
@@ -272,6 +276,7 @@ public final class BaseTopologyManager
     @Override
     public ListenableFuture<Void> onNodeDeleted(final NodeId nodeId) {
         final ArrayList<ListenableFuture<Void>> futures = new ArrayList<>();
+        created.remove(nodeId);
 
         // Master needs to trigger delete on peers and combine results
         if (isMaster) {
@@ -376,7 +381,7 @@ public final class BaseTopologyManager
                 public void onFailure(final Throwable t) {
                     // If the combined connection attempt failed, set the node to connection failed
                     LOG.debug("Futures aggregation failed");
-                    naSalNodeWriter.update(nodeId, nodes.get(nodeId).getFailedState(nodeId, null));
+                    naSalNodeWriter.update(nodeId, delegateTopologyHandler.getFailedState(nodeId, null));
                     // FIXME disconnect those which succeeded
                     // just issue a delete on delegateTopologyHandler that gets handled on lower level
                 }
@@ -518,7 +523,8 @@ public final class BaseTopologyManager
             final String path = member.address() + PATH + topologyId;
             LOG.debug("Actor at :{} is resolving topology actor for path {}", clusterExtension.selfAddress(), path);
 
-            clusterExtension.system().actorSelection(path).tell(new CustomIdentifyMessage(clusterExtension.selfAddress()), TypedActor.context().self());
+            // first send basic identify message in case our messages have not been loaded through osgi yet to prevent crashing akka.
+            clusterExtension.system().actorSelection(path).tell(new Identify(member.address()), TypedActor.context().self());
         } else if (message instanceof MemberExited) {
             // remove peer
             final Member member = ((MemberExited) message).member();
@@ -546,20 +552,71 @@ public final class BaseTopologyManager
             final String path = member.address() + PATH + topologyId;
             LOG.debug("Actor at :{} is resolving topology actor for path {}", clusterExtension.selfAddress(), path);
 
+            clusterExtension.system().actorSelection(path).tell(new Identify(member.address()), TypedActor.context().self());
+        } else if (message instanceof ActorIdentity) {
+            LOG.debug("Received ActorIdentity message", message);
+            final String path = ((ActorIdentity) message).correlationId() + PATH + topologyId;
+            if (((ActorIdentity) message).getRef() == null) {
+                LOG.debug("ActorIdentity has null actor ref, retrying..", message);
+                final ActorRef self = TypedActor.context().self();
+                final ActorContext context = TypedActor.context();
+                system.scheduler().scheduleOnce(new FiniteDuration(5, TimeUnit.SECONDS), new Runnable() {
+                    @Override
+                    public void run() {
+                        LOG.debug("Retrying identify message from master to node {} , full path {}", ((ActorIdentity) message).correlationId(), path);
+                        context.system().actorSelection(path).tell(new Identify(((ActorIdentity) message).correlationId()), self);
+
+                    }
+                }, system.dispatcher());
+                return;
+            }
+            LOG.debug("Actor at :{} is resolving topology actor for path {}, with a custom message", clusterExtension.selfAddress(), path);
+
             clusterExtension.system().actorSelection(path).tell(new CustomIdentifyMessage(clusterExtension.selfAddress()), TypedActor.context().self());
         } else if (message instanceof CustomIdentifyMessageReply) {
-            LOG.debug("Received a custom identify reply message from: {}", ((CustomIdentifyMessageReply) message).getAddress());
+
+            LOG.warn("Received a custom identify reply message from: {}", ((CustomIdentifyMessageReply) message).getAddress());
             if (!peers.containsKey(((CustomIdentifyMessage) message).getAddress())) {
                 final TopologyManager peer = typedExtension.typedActorOf(new TypedProps<>(TopologyManager.class, BaseTopologyManager.class), actorRef);
                 peers.put(((CustomIdentifyMessageReply) message).getAddress(), peer);
+                if (isMaster) {
+                    resyncPeer(peer);
+                }
             }
         } else if (message instanceof CustomIdentifyMessage) {
-            LOG.debug("Received a custom identify message from: {}", ((CustomIdentifyMessage) message).getAddress());
+            LOG.warn("Received a custom identify message from: {}", ((CustomIdentifyMessage) message).getAddress());
             if (!peers.containsKey(((CustomIdentifyMessage) message).getAddress())) {
                 final TopologyManager peer = typedExtension.typedActorOf(new TypedProps<>(TopologyManager.class, BaseTopologyManager.class), actorRef);
                 peers.put(((CustomIdentifyMessage) message).getAddress(), peer);
+                if (isMaster) {
+                    resyncPeer(peer);
+                }
             }
             actorRef.tell(new CustomIdentifyMessageReply(clusterExtension.selfAddress()), TypedActor.context().self());
         }
     }
+
+    private void resyncPeer(final TopologyManager peer) {
+        final ReadOnlyTransaction rTx = dataBroker.newReadOnlyTransaction();
+        final CheckedFuture<Optional<Topology>, ReadFailedException> read = rTx.read(LogicalDatastoreType.CONFIGURATION, topologyListPath);
+
+        Futures.addCallback(read, new FutureCallback<Optional<Topology>>() {
+            @Override
+            public void onSuccess(Optional<Topology> result) {
+                if (result.isPresent()) {
+                    for (final Node node : result.get().getNode()) {
+                        final Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> entry = codecRegistry.toNormalizedNode(getNodeIid(topologyId), node);
+                        peer.onRemoteNodeCreated(new NormalizedNodeMessage(entry.getKey(), entry.getValue()));
+                        // we dont care about the future from now on since we will be notified by the onConnected event
+                    }
+                }
+            }
+
+            @Override
+            public void onFailure(Throwable t) {
+                LOG.error("Unable to read from datastore");
+            }
+        });
+
+    }
 }
index 55bdb2e2dd071e548c387337b28dd2388e7c74f4..4783404b54149555ddab4affde1fed29dfe95342 100644 (file)
@@ -27,6 +27,7 @@ public class NodeRoleChangeStrategy implements RoleChangeStrategy, EntityOwnersh
     private final EntityOwnershipService entityOwnershipService;
     private final String entityType;
     private final String entityName;
+    private final Entity entity;
     private NodeListener ownershipCandidate;
 
     private EntityOwnershipCandidateRegistration candidateRegistration = null;
@@ -38,6 +39,7 @@ public class NodeRoleChangeStrategy implements RoleChangeStrategy, EntityOwnersh
         this.entityOwnershipService = entityOwnershipService;
         this.entityType = entityType + "/" + entityName;
         this.entityName = entityName;
+        this.entity = new Entity(this.entityType, entityName);
     }
 
     @Override
@@ -48,7 +50,7 @@ public class NodeRoleChangeStrategy implements RoleChangeStrategy, EntityOwnersh
             if (candidateRegistration != null) {
                 unregisterRoleCandidate();
             }
-            candidateRegistration = entityOwnershipService.registerCandidate(new Entity(entityType, entityName));
+            candidateRegistration = entityOwnershipService.registerCandidate(entity);
             ownershipListenerRegistration = entityOwnershipService.registerListener(entityType, this);
         } catch (CandidateAlreadyRegisteredException e) {
             LOG.error("Candidate already registered for election", e);
@@ -59,10 +61,19 @@ public class NodeRoleChangeStrategy implements RoleChangeStrategy, EntityOwnersh
     @Override
     public void unregisterRoleCandidate() {
         LOG.debug("Unregistering role candidate");
-        candidateRegistration.close();
-        candidateRegistration = null;
-        ownershipListenerRegistration.close();
-        ownershipListenerRegistration = null;
+        if (candidateRegistration != null) {
+            candidateRegistration.close();
+            candidateRegistration = null;
+        }
+        if (ownershipListenerRegistration != null) {
+            ownershipListenerRegistration.close();
+            ownershipListenerRegistration = null;
+        }
+    }
+
+    @Override
+    public boolean isCandidateRegistered() {
+        return entityOwnershipService.isCandidateRegistered(entity);
     }
 
     @Override
index ab76cc2d64b01a71a9b9d08fdb984cd026470cb1..ea6e5d5eeb2a0850fde981857d0a711cefa04773 100644 (file)
@@ -27,6 +27,11 @@ public class NoopRoleChangeStrategy implements RoleChangeStrategy {
 
     }
 
+    @Override
+    public boolean isCandidateRegistered() {
+        return false;
+    }
+
     @Override
     public void onRoleChanged(RoleChangeDTO roleChangeDTO) {
 
index de9f7aca39aa45180603f40bfb32cee4ae3a21c9..94cd8799af1122c56a26a038f65751f856fb28f7 100644 (file)
@@ -49,7 +49,7 @@ public class TopologyRoleChangeStrategy implements RoleChangeStrategy, Clustered
     private NodeListener ownershipCandidate;
     private final String entityType;
     // use topologyId as entityName
-    private final String entityName;
+    private final Entity entity;
 
     private EntityOwnershipCandidateRegistration candidateRegistration = null;
     private EntityOwnershipListenerRegistration ownershipListenerRegistration = null;
@@ -63,7 +63,7 @@ public class TopologyRoleChangeStrategy implements RoleChangeStrategy, Clustered
         this.dataBroker = dataBroker;
         this.entityOwnershipService = entityOwnershipService;
         this.entityType = entityType;
-        this.entityName = entityName;
+        this.entity = new Entity(entityType, entityName);
 
         datastoreListenerRegistration = null;
     }
@@ -76,7 +76,7 @@ public class TopologyRoleChangeStrategy implements RoleChangeStrategy, Clustered
             if (candidateRegistration != null) {
                 unregisterRoleCandidate();
             }
-            candidateRegistration = entityOwnershipService.registerCandidate(new Entity(entityType, entityName));
+            candidateRegistration = entityOwnershipService.registerCandidate(entity);
             ownershipListenerRegistration = entityOwnershipService.registerListener(entityType, this);
         } catch (CandidateAlreadyRegisteredException e) {
             LOG.error("Candidate already registered for election", e);
@@ -92,6 +92,11 @@ public class TopologyRoleChangeStrategy implements RoleChangeStrategy, Clustered
         ownershipListenerRegistration = null;
     }
 
+    @Override
+    public boolean isCandidateRegistered() {
+        return entityOwnershipService.isCandidateRegistered(entity);
+    }
+
     @Override
     public void onRoleChanged(RoleChangeDTO roleChangeDTO) {
         if (roleChangeDTO.isOwner()) {
diff --git a/opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPoint.java b/opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPoint.java
new file mode 100644 (file)
index 0000000..6624a28
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.util.messages;
+
+import java.io.Serializable;
+
+public class AnnounceMasterMountPoint implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    public AnnounceMasterMountPoint() {}
+}
diff --git a/opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPointDown.java b/opendaylight/netconf/abstract-topology/src/main/java/org/opendaylight/netconf/topology/util/messages/AnnounceMasterMountPointDown.java
new file mode 100644 (file)
index 0000000..a7c3f87
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.util.messages;
+
+import java.io.Serializable;
+
+public class AnnounceMasterMountPointDown implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    public AnnounceMasterMountPointDown() {
+
+    }
+}
index fb9c4257f5010dc0f3e4963a5333948a686f87e3..4944abfa417f62120677a5680115cb346b1d0331 100644 (file)
             <version>1.6.5</version>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-model-api</artifactId>
+        </dependency>
     </dependencies>
 
     <build>
index 88ed13ca6de1dc3689db3afc70621d6cf3da84cd..8f3f62140f1794278db72bfd30279286c77a52ec 100644 (file)
@@ -32,6 +32,7 @@ import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
 import org.opendaylight.controller.sal.core.api.Provider;
 import org.opendaylight.netconf.client.NetconfClientDispatcher;
+import org.opendaylight.netconf.client.NetconfClientSessionListener;
 import org.opendaylight.netconf.client.conf.NetconfClientConfiguration;
 import org.opendaylight.netconf.client.conf.NetconfReconnectingClientConfiguration;
 import org.opendaylight.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
@@ -78,12 +79,12 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractNetconfTopology.class);
 
-    private static final long DEFAULT_REQUEST_TIMEOUT_MILIS = 60000L;
-    private static final int DEFAULT_KEEPALIVE_DELAY = 0;
-    private static final boolean DEFAULT_RECONNECT_ON_CHANGED_SCHEMA = false;
-    private static final int DEFAULT_MAX_CONNECTION_ATTEMPTS = 0;
-    private static final int DEFAULT_BETWEEN_ATTEMPTS_TIMEOUT_MILLIS = 2000;
-    private static final BigDecimal DEFAULT_SLEEP_FACTOR = new BigDecimal(1.5);
+    protected static final long DEFAULT_REQUEST_TIMEOUT_MILIS = 60000L;
+    protected static final int DEFAULT_KEEPALIVE_DELAY = 0;
+    protected static final boolean DEFAULT_RECONNECT_ON_CHANGED_SCHEMA = false;
+    protected static final int DEFAULT_MAX_CONNECTION_ATTEMPTS = 0;
+    protected static final int DEFAULT_BETWEEN_ATTEMPTS_TIMEOUT_MILLIS = 2000;
+    protected static final BigDecimal DEFAULT_SLEEP_FACTOR = new BigDecimal(1.5);
 
     private static FilesystemSchemaSourceCache<YangTextSchemaSource> CACHE = null;
     //keep track of already initialized repositories to avoid adding redundant listeners
@@ -92,14 +93,14 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
     protected final String topologyId;
     private final NetconfClientDispatcher clientDispatcher;
     protected final BindingAwareBroker bindingAwareBroker;
-    private final Broker domBroker;
+    protected final Broker domBroker;
     private final EventExecutor eventExecutor;
-    private final ScheduledThreadPool keepaliveExecutor;
-    private final ThreadPool processingExecutor;
-    private final SharedSchemaRepository sharedSchemaRepository;
+    protected final ScheduledThreadPool keepaliveExecutor;
+    protected final ThreadPool processingExecutor;
+    protected final SharedSchemaRepository sharedSchemaRepository;
 
-    private SchemaSourceRegistry schemaRegistry = null;
-    private SchemaContextFactory schemaContextFactory = null;
+    protected SchemaSourceRegistry schemaRegistry = null;
+    protected SchemaContextFactory schemaContextFactory = null;
 
     protected DOMMountPointService mountPointService = null;
     protected DataBroker dataBroker = null;
@@ -181,7 +182,7 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
         return Futures.immediateFuture(null);
     }
 
-    private ListenableFuture<NetconfDeviceCapabilities> setupConnection(final NodeId nodeId,
+    protected ListenableFuture<NetconfDeviceCapabilities> setupConnection(final NodeId nodeId,
                                                                         final Node configNode) {
         final NetconfNode netconfNode = configNode.getAugmentation(NetconfNode.class);
 
@@ -191,8 +192,10 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
 
         final NetconfConnectorDTO deviceCommunicatorDTO = createDeviceCommunicator(nodeId, netconfNode);
         final NetconfDeviceCommunicator deviceCommunicator = deviceCommunicatorDTO.getCommunicator();
-        final NetconfReconnectingClientConfiguration clientConfig = getClientConfig(deviceCommunicator, netconfNode);
+        final NetconfClientSessionListener netconfClientSessionListener = deviceCommunicatorDTO.getSessionListener();
+        final NetconfReconnectingClientConfiguration clientConfig = getClientConfig(netconfClientSessionListener, netconfNode);
         final ListenableFuture<NetconfDeviceCapabilities> future = deviceCommunicator.initializeRemoteConnection(clientDispatcher, clientConfig);
+
         activeConnectors.put(nodeId, deviceCommunicatorDTO);
 
         Futures.addCallback(future, new FutureCallback<NetconfDeviceCapabilities>() {
@@ -211,7 +214,7 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
         return future;
     }
 
-    private NetconfConnectorDTO createDeviceCommunicator(final NodeId nodeId,
+    protected NetconfConnectorDTO createDeviceCommunicator(final NodeId nodeId,
                                                          final NetconfNode node) {
         //setup default values since default value is not supported yet in mdsal
         // TODO remove this when mdsal starts supporting default values
@@ -242,7 +245,7 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
         return new NetconfConnectorDTO(new NetconfDeviceCommunicator(remoteDeviceId, device), salFacade);
     }
 
-    public NetconfReconnectingClientConfiguration getClientConfig(final NetconfDeviceCommunicator listener, NetconfNode node) {
+    public NetconfReconnectingClientConfiguration getClientConfig(final NetconfClientSessionListener listener, NetconfNode node) {
 
         //setup default values since default value is not supported yet in mdsal
         // TODO remove this when mdsal starts supporting default values
@@ -357,12 +360,12 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
         }
     }
 
-    protected static final class NetconfConnectorDTO {
+    protected static class NetconfConnectorDTO {
 
         private final NetconfDeviceCommunicator communicator;
         private final RemoteDeviceHandler<NetconfSessionPreferences> facade;
 
-        private NetconfConnectorDTO(final NetconfDeviceCommunicator communicator, final RemoteDeviceHandler<NetconfSessionPreferences> facade) {
+        public NetconfConnectorDTO(final NetconfDeviceCommunicator communicator, final RemoteDeviceHandler<NetconfSessionPreferences> facade) {
             this.communicator = communicator;
             this.facade = facade;
         }
@@ -374,6 +377,10 @@ public abstract class AbstractNetconfTopology implements NetconfTopology, Bindin
         public RemoteDeviceHandler<NetconfSessionPreferences> getFacade() {
             return facade;
         }
+
+        public NetconfClientSessionListener getSessionListener() {
+            return communicator;
+        }
     }
 
 }
index c9b5409d20e24c2f970bdea7c8784918be4bf794..a8743ec56dbcbe37123e57732197cc48f531fbaf 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.netconf.topology;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorRef;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
@@ -28,7 +30,20 @@ public interface NetconfTopology {
 
     ListenableFuture<Void> disconnectNode(NodeId nodeId);
 
-    void registerMountPoint(NodeId nodeId);
+    /**
+     * register master mount point
+     * @param context
+     * @param nodeId
+     */
+    void registerMountPoint(ActorContext context, NodeId nodeId);
+
+    /**
+     * register slave mountpoint with the provided ActorRef
+     * @param context
+     * @param nodeId
+     * @param masterRef
+     */
+    void registerMountPoint(ActorContext context, NodeId nodeId, ActorRef masterRef);
 
     void unregisterMountPoint(NodeId nodeId);
 
index 25b4e2d2f7f1ba9c1923d617654c59066fac16ae..6b64d9513e93110b9ff51326ab6c8477f405bfe3 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.netconf.topology.impl;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.TypedActor;
 import akka.actor.TypedActorExtension;
@@ -16,6 +18,7 @@ import akka.japi.Creator;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import io.netty.util.concurrent.EventExecutor;
+import java.net.InetSocketAddress;
 import java.util.Collection;
 import java.util.Collections;
 import javassist.ClassPool;
@@ -27,8 +30,12 @@ import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
 import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.netconf.client.NetconfClientDispatcher;
+import org.opendaylight.netconf.client.NetconfClientSessionListener;
 import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.netconf.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.netconf.sal.connect.netconf.NetconfStateSchemas;
 import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.netconf.sal.connect.netconf.sal.KeepaliveSalFacade;
 import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
 import org.opendaylight.netconf.topology.AbstractNetconfTopology;
 import org.opendaylight.netconf.topology.NetconfTopology;
@@ -39,13 +46,18 @@ import org.opendaylight.netconf.topology.TopologyManager;
 import org.opendaylight.netconf.topology.TopologyManagerCallback;
 import org.opendaylight.netconf.topology.TopologyManagerCallback.TopologyManagerCallbackFactory;
 import org.opendaylight.netconf.topology.example.LoggingSalNodeWriter;
+import org.opendaylight.netconf.topology.pipeline.ClusteredNetconfDevice;
+import org.opendaylight.netconf.topology.pipeline.ClusteredNetconfDeviceCommunicator;
+import org.opendaylight.netconf.topology.pipeline.ClusteredNetconfDeviceCommunicator.NetconfClientSessionListenerRegistration;
 import org.opendaylight.netconf.topology.pipeline.TopologyMountPointFacade;
 import org.opendaylight.netconf.topology.pipeline.TopologyMountPointFacade.ConnectionStatusListenerRegistration;
 import org.opendaylight.netconf.topology.util.BaseTopologyManager;
 import org.opendaylight.netconf.topology.util.NodeRoleChangeStrategy;
 import org.opendaylight.netconf.topology.util.NodeWriter;
 import org.opendaylight.netconf.topology.util.TopologyRoleChangeStrategy;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.$YangModuleInfoImpl;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
 import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
 import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
@@ -91,6 +103,8 @@ public class ClusteredNetconfTopology extends AbstractNetconfTopology implements
         LOG.warn("Clustered netconf topo started");
     }
 
+
+
     @Override
     public void onSessionInitiated(final ProviderContext session) {
         dataBroker = session.getSALService(DataBroker.class);
@@ -116,23 +130,60 @@ public class ClusteredNetconfTopology extends AbstractNetconfTopology implements
     public void close() throws Exception {
         // close all existing connectors, delete whole topology in datastore?
         for (NetconfConnectorDTO connectorDTO : activeConnectors.values()) {
-            connectorDTO.getCommunicator().disconnect();
+            connectorDTO.getCommunicator().close();
         }
         activeConnectors.clear();
     }
 
+    @Override
+    protected NetconfConnectorDTO createDeviceCommunicator(final NodeId nodeId,
+                                                           final NetconfNode node) {
+        //setup default values since default value is not supported yet in mdsal
+        // TODO remove this when mdsal starts supporting default values
+        final Long defaultRequestTimeoutMillis = node.getDefaultRequestTimeoutMillis() == null ? DEFAULT_REQUEST_TIMEOUT_MILIS : node.getDefaultRequestTimeoutMillis();
+        final Long keepaliveDelay = node.getKeepaliveDelay() == null ? DEFAULT_KEEPALIVE_DELAY : node.getKeepaliveDelay();
+        final Boolean reconnectOnChangedSchema = node.isReconnectOnChangedSchema() == null ? DEFAULT_RECONNECT_ON_CHANGED_SCHEMA : node.isReconnectOnChangedSchema();
+
+        IpAddress ipAddress = node.getHost().getIpAddress();
+        InetSocketAddress address = new InetSocketAddress(ipAddress.getIpv4Address() != null ?
+                ipAddress.getIpv4Address().getValue() : ipAddress.getIpv6Address().getValue(),
+                node.getPort().getValue());
+        RemoteDeviceId remoteDeviceId = new RemoteDeviceId(nodeId.getValue(), address);
+
+        RemoteDeviceHandler<NetconfSessionPreferences> salFacade =
+                createSalFacade(remoteDeviceId, domBroker, bindingAwareBroker, defaultRequestTimeoutMillis);
+
+        if (keepaliveDelay > 0) {
+            LOG.warn("Adding keepalive facade, for device {}", nodeId);
+            salFacade = new KeepaliveSalFacade(remoteDeviceId, salFacade, keepaliveExecutor.getExecutor(), keepaliveDelay);
+        }
+
+        NetconfDevice.SchemaResourcesDTO schemaResourcesDTO =
+                new NetconfDevice.SchemaResourcesDTO(schemaRegistry, schemaContextFactory, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
+
+        NetconfDevice device = new ClusteredNetconfDevice(schemaResourcesDTO, remoteDeviceId, salFacade,
+                processingExecutor.getExecutor(), sharedSchemaRepository, actorSystem, topologyId, nodeId.getValue(), TypedActor.context());
+
+        return new NetconfConnectorDTO(new ClusteredNetconfDeviceCommunicator(remoteDeviceId, device, entityOwnershipService), salFacade);
+    }
+
     @Override
     protected RemoteDeviceHandler<NetconfSessionPreferences> createSalFacade(final RemoteDeviceId id, final Broker domBroker, final BindingAwareBroker bindingBroker, long defaultRequestTimeoutMillis) {
-        return new TopologyMountPointFacade(id, domBroker, bindingBroker, defaultRequestTimeoutMillis);
+        return new TopologyMountPointFacade(topologyId, id, domBroker, bindingBroker, defaultRequestTimeoutMillis);
     }
 
     @Override
-    public void registerMountPoint(NodeId nodeId) {
-        ((TopologyMountPointFacade) activeConnectors.get(nodeId).getFacade()).registerMountPoint();
+    public void registerMountPoint(final ActorContext context, final NodeId nodeId) {
+        ((TopologyMountPointFacade) activeConnectors.get(nodeId).getFacade()).registerMountPoint(actorSystem, context);
     }
 
     @Override
-    public void unregisterMountPoint(NodeId nodeId) {
+    public void registerMountPoint(final ActorContext context, final NodeId nodeId, final ActorRef masterRef) {
+        ((TopologyMountPointFacade) activeConnectors.get(nodeId).getFacade()).registerMountPoint(actorSystem, context, masterRef);
+    }
+
+    @Override
+    public void unregisterMountPoint(final NodeId nodeId) {
         Preconditions.checkState(activeConnectors.containsKey(nodeId), "Cannot unregister nonexistent mountpoint");
         ((TopologyMountPointFacade) activeConnectors.get(nodeId).getFacade()).unregisterMountPoint();
     }
@@ -148,6 +199,11 @@ public class ClusteredNetconfTopology extends AbstractNetconfTopology implements
         return Collections.emptySet();
     }
 
+    public NetconfClientSessionListenerRegistration registerNetconfClientSessionListener(final NodeId node, final NetconfClientSessionListener listener) {
+        Preconditions.checkState(activeConnectors.containsKey(node), "Need to connect a node before a session listener can be registered");
+        return ((ClusteredNetconfDeviceCommunicator) activeConnectors.get(node).getCommunicator()).registerNetconfClientSessionListener(listener);
+    }
+
     static class TopologyCallbackFactory implements TopologyManagerCallbackFactory {
 
         private final NetconfTopology netconfTopology;
index a17650a6fb0403a30eeb33f445dd671682c548e2..c4eaae9ad77c3e8c67ee66c7f3378b263052e172 100644 (file)
@@ -8,6 +8,7 @@
 
 package org.opendaylight.netconf.topology.impl;
 
+import akka.actor.ActorContext;
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.TypedActor;
@@ -29,15 +30,23 @@ import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
 import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.netconf.api.NetconfMessage;
+import org.opendaylight.netconf.api.NetconfTerminationReason;
+import org.opendaylight.netconf.client.NetconfClientSession;
+import org.opendaylight.netconf.client.NetconfClientSessionListener;
 import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfDeviceCapabilities;
 import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 import org.opendaylight.netconf.topology.NetconfTopology;
+import org.opendaylight.netconf.topology.NodeManager;
 import org.opendaylight.netconf.topology.NodeManagerCallback;
 import org.opendaylight.netconf.topology.RoleChangeStrategy;
 import org.opendaylight.netconf.topology.TopologyManager;
+import org.opendaylight.netconf.topology.pipeline.ClusteredNetconfDeviceCommunicator.NetconfClientSessionListenerRegistration;
 import org.opendaylight.netconf.topology.pipeline.TopologyMountPointFacade.ConnectionStatusListenerRegistration;
+import org.opendaylight.netconf.topology.util.BaseNodeManager;
 import org.opendaylight.netconf.topology.util.BaseTopologyManager;
+import org.opendaylight.netconf.topology.util.messages.AnnounceMasterMountPoint;
+import org.opendaylight.netconf.topology.util.messages.AnnounceMasterMountPointDown;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeConnectionStatus.ConnectionStatus;
@@ -60,7 +69,7 @@ import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDeviceHandler<NetconfSessionPreferences>{
+public class NetconfNodeManagerCallback implements NodeManagerCallback, NetconfClientSessionListener{
 
     private static final Logger LOG = LoggerFactory.getLogger(NetconfNodeManagerCallback.class);
 
@@ -92,11 +101,18 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
     private String nodeId;
     private String topologyId;
     private TopologyManager topologyManager;
+    private NodeManager nodeManager;
+    // cached context so that we can use it in callbacks from topology
+    private ActorContext cachedContext;
 
     private Node currentConfig;
     private Node currentOperationalNode;
 
-    private ConnectionStatusListenerRegistration registration = null;
+    private ConnectionStatusListenerRegistration connectionStatusregistration = null;
+    private NetconfClientSessionListenerRegistration sessionListener = null;
+
+    private ActorRef masterDataBrokerRef = null;
+    private boolean connected = false;
 
     public NetconfNodeManagerCallback(final String nodeId,
                                       final String topologyId,
@@ -123,6 +139,18 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                 topologyManager = TypedActor.get(actorSystem).typedActorOf(new TypedProps<>(TopologyManager.class, BaseTopologyManager.class), actorRef);
             }
         }, actorSystem.dispatcher());
+
+        final Future<ActorRef> nodeRefFuture = actorSystem.actorSelection("/user/" + topologyId + "/" + nodeId).resolveOne(FiniteDuration.create(10L, TimeUnit.SECONDS));
+        nodeRefFuture.onComplete(new OnComplete<ActorRef>() {
+            @Override
+            public void onComplete(Throwable throwable, ActorRef actorRef) throws Throwable {
+                if (throwable != null) {
+                    LOG.warn("Unable to resolve actor for path: {} ", "/user/" + topologyId + "/" + nodeId, throwable);
+                }
+                LOG.debug("Actor ref for path {} resolved", "/user/" + topologyId);
+                nodeManager = TypedActor.get(actorSystem).typedActorOf(new TypedProps<>(NodeManager.class, BaseNodeManager.class), actorRef);
+            }
+        }, actorSystem.dispatcher());
     }
 
 
@@ -138,6 +166,8 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                 .setHost(netconfNode.getHost())
                                 .setPort(netconfNode.getPort())
                                 .setConnectionStatus(ConnectionStatus.Connecting)
+                                .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                 .setClusteredConnectionStatus(
                                         new ClusteredConnectionStatusBuilder()
                                                 .setNodeStatus(
@@ -158,16 +188,18 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
     }
 
     @Nonnull @Override public Node getFailedState(@Nonnull final NodeId nodeId,
-                                                  @Nonnull final Node configNode) {
-        final NetconfNode netconfNode = configNode.getAugmentation(NetconfNode.class);
+                                                  @Nullable final Node configNode) {
+        final NetconfNode netconfNode = configNode == null ? currentOperationalNode.getAugmentation(NetconfNode.class) : configNode.getAugmentation(NetconfNode.class);
 
-        return new NodeBuilder()
+        final Node failedNode = new NodeBuilder()
                 .setNodeId(nodeId)
                 .addAugmentation(NetconfNode.class,
                         new NetconfNodeBuilder()
                                 .setHost(netconfNode.getHost())
                                 .setPort(netconfNode.getPort())
                                 .setConnectionStatus(ConnectionStatus.UnableToConnect)
+                                .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                 .setClusteredConnectionStatus(
                                         new ClusteredConnectionStatusBuilder()
                                                 .setNodeStatus(
@@ -179,10 +211,17 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                                 .build())
                                 .build())
                 .build();
+
+        if (currentOperationalNode == null) {
+            currentOperationalNode = failedNode;
+        }
+
+        return failedNode;
     }
 
     @Nonnull @Override public ListenableFuture<Node> onNodeCreated(@Nonnull final NodeId nodeId,
                                                                    @Nonnull final Node configNode) {
+        cachedContext = TypedActor.context();
         this.nodeId = nodeId.getValue();
         this.currentConfig = configNode;
         // set initial state before anything happens
@@ -194,7 +233,8 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
         Futures.addCallback(connectionFuture, new FutureCallback<NetconfDeviceCapabilities>() {
             @Override
             public void onSuccess(@Nullable NetconfDeviceCapabilities result) {
-                registration = topologyDispatcher.registerConnectionStatusListener(nodeId, NetconfNodeManagerCallback.this);
+                connectionStatusregistration = topologyDispatcher.registerConnectionStatusListener(nodeId, nodeManager);
+                sessionListener = topologyDispatcher.registerNetconfClientSessionListener(nodeId, NetconfNodeManagerCallback.this);
             }
 
             @Override
@@ -226,8 +266,8 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                                         .build())
                                         .setHost(netconfNode.getHost())
                                         .setPort(netconfNode.getPort())
-                                        .setAvailableCapabilities(new AvailableCapabilitiesBuilder().build())
-                                        .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().build())
+                                        .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                        .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                         .build()).build();
                 return currentOperationalNode;
             }
@@ -240,7 +280,10 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                                 @Nonnull final Node configNode) {
         // first disconnect this node
         topologyDispatcher.unregisterMountPoint(nodeId);
-        registration.close();
+
+        if (connectionStatusregistration != null) {
+            connectionStatusregistration.close();
+        }
         topologyDispatcher.disconnectNode(nodeId);
 
         // now reinit this connection with new settings
@@ -249,7 +292,7 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
         Futures.addCallback(connectionFuture, new FutureCallback<NetconfDeviceCapabilities>() {
             @Override
             public void onSuccess(@Nullable NetconfDeviceCapabilities result) {
-                registration = topologyDispatcher.registerConnectionStatusListener(nodeId, NetconfNodeManagerCallback.this);
+                connectionStatusregistration = topologyDispatcher.registerConnectionStatusListener(nodeId, NetconfNodeManagerCallback.this);
             }
 
             @Override
@@ -281,10 +324,10 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                                         .build())
                                         .setHost(netconfNode.getHost())
                                         .setPort(netconfNode.getPort())
-                                        .setAvailableCapabilities(new AvailableCapabilitiesBuilder().build())
-                                        .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().build())
+                                        .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                        .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                         .build())
-                                .build();
+                        .build();
             }
         });
     }
@@ -292,7 +335,10 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
     @Nonnull @Override public ListenableFuture<Void> onNodeDeleted(@Nonnull final NodeId nodeId) {
         // cleanup and disconnect
         topologyDispatcher.unregisterMountPoint(nodeId);
-        registration.close();
+
+        if(connectionStatusregistration != null) {
+            connectionStatusregistration.close();
+        }
         roleChangeStrategy.unregisterRoleCandidate();
         return topologyDispatcher.disconnectNode(nodeId);
     }
@@ -306,24 +352,23 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
 
     @Override
     public void onRoleChanged(final RoleChangeDTO roleChangeDTO) {
-        if (roleChangeDTO.isOwner() && roleChangeDTO.wasOwner()) {
-            return;
-        }
+        topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
+
         isMaster = roleChangeDTO.isOwner();
-        //TODO instead of registering mount point, init remote schema repo when its done
-        if (isMaster) {
-            // unregister old mountPoint if ownership changed, register a new one
-            topologyDispatcher.registerMountPoint(new NodeId(nodeId));
-        } else {
-            topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
-        }
     }
 
     @Override
     public void onDeviceConnected(final SchemaContext remoteSchemaContext, final NetconfSessionPreferences netconfSessionPreferences, final DOMRpcService deviceRpc) {
         // we need to notify the higher level that something happened, get a current status from all other nodes, and aggregate a new result
-        LOG.debug("onDeviceConnected received, registering role candidate");
-        roleChangeStrategy.registerRoleCandidate(this);
+        connected = true;
+        if (isMaster) {
+            LOG.debug("Master is done with schema resolution, registering mount point");
+            topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId));
+        } else if (masterDataBrokerRef != null) {
+            LOG.warn("Device connected, master already present in topology, registering mount point");
+            topologyDispatcher.registerMountPoint(cachedContext, new NodeId(nodeId), masterDataBrokerRef);
+        }
+
         List<String> capabilityList = new ArrayList<>();
         capabilityList.addAll(netconfSessionPreferences.getNetconfDeviceCapabilities().getNonModuleBasedCapabilities());
         capabilityList.addAll(FluentIterable.from(netconfSessionPreferences.getNetconfDeviceCapabilities().getResolvedCapabilities()).transform(AVAILABLE_CAPABILITY_TRANSFORMER).toList());
@@ -354,22 +399,28 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                 .setUnavailableCapabilities(unavailableCapabilities)
                                 .build())
                 .build();
-        // TODO need to implement forwarding of this msg to master
         topologyManager.notifyNodeStatusChange(new NodeId(nodeId));
     }
 
     @Override
     public void onDeviceDisconnected() {
         // we need to notify the higher level that something happened, get a current status from all other nodes, and aggregate a new result
-        // no need to remove mountpoint, we should receive onRoleChanged callback after unregistering from election that unregisters the mountpoint
-        LOG.debug("onDeviceDisconnected received, unregistering role candidate");
-        topologyDispatcher.unregisterMountPoint(currentOperationalNode.getNodeId());
-        roleChangeStrategy.unregisterRoleCandidate();
+        LOG.debug("onDeviceDisconnected received, unregistered role candidate");
+        connected = false;
+        if (isMaster) {
+            // set master to false since we are unregistering, the ownershipChanged callback can sometimes lag behind causing multiple nodes behaving as masters
+            isMaster = false;
+            // onRoleChanged() callback can sometimes lag behind, so unregister the mount right when it disconnects
+            topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
+        }
+
         final NetconfNode netconfNode = currentConfig.getAugmentation(NetconfNode.class);
         currentOperationalNode = new NodeBuilder().setNodeId(new NodeId(nodeId))
                 .addAugmentation(NetconfNode.class,
                         new NetconfNodeBuilder()
                                 .setConnectionStatus(ConnectionStatus.Connecting)
+                                .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                 .setClusteredConnectionStatus(
                                         new ClusteredConnectionStatusBuilder()
                                                 .setNodeStatus(
@@ -382,7 +433,6 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
                                 .setHost(netconfNode.getHost())
                                 .setPort(netconfNode.getPort())
                                 .build()).build();
-        // TODO need to implement forwarding of this msg to master
         topologyManager.notifyNodeStatusChange(new NodeId(nodeId));
     }
 
@@ -390,14 +440,16 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
     public void onDeviceFailed(Throwable throwable) {
         // we need to notify the higher level that something happened, get a current status from all other nodes, and aggregate a new result
         // no need to remove mountpoint, we should receive onRoleChanged callback after unregistering from election that unregisters the mountpoint
-        LOG.debug("onDeviceFailed received");
+        LOG.warn("Netconf node {} failed with {}", nodeId, throwable);
+        connected = false;
         String reason = (throwable != null && throwable.getMessage() != null) ? throwable.getMessage() : UNKNOWN_REASON;
 
-        roleChangeStrategy.unregisterRoleCandidate();
         currentOperationalNode = new NodeBuilder().setNodeId(new NodeId(nodeId))
                 .addAugmentation(NetconfNode.class,
                         new NetconfNodeBuilder()
                                 .setConnectionStatus(ConnectionStatus.UnableToConnect)
+                                .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                 .setClusteredConnectionStatus(
                                         new ClusteredConnectionStatusBuilder()
                                                 .setNodeStatus(
@@ -412,7 +464,6 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
         topologyManager.notifyNodeStatusChange(new NodeId(nodeId));
     }
 
-
     @Override
     public void onNotification(DOMNotification domNotification) {
         //NOOP
@@ -424,7 +475,44 @@ public class NetconfNodeManagerCallback implements NodeManagerCallback, RemoteDe
     }
 
     @Override
-    public void onReceive(Object o, ActorRef actorRef) {
+    public void onReceive(Object message, ActorRef actorRef) {
+        LOG.debug("Netconf node callback received message {}", message);
+        if (message instanceof AnnounceMasterMountPoint) {
+            masterDataBrokerRef = actorRef;
+            // candidate gets registered when mount point is already prepared so we can go ahead a register it
+            if (connected) {
+                topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId), masterDataBrokerRef);
+            } else {
+                LOG.debug("Announce master mount point msg received but mount point is not ready yet");
+            }
+        } else if (message instanceof AnnounceMasterMountPointDown) {
+            LOG.debug("Master mountpoint went down");
+            masterDataBrokerRef = null;
+            topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
+        }
+    }
 
+    @Override
+    public void onSessionUp(NetconfClientSession netconfClientSession) {
+        //NetconfClientSession is up, we can register role candidate
+        LOG.debug("Netconf client session is up, registering role candidate");
+        roleChangeStrategy.registerRoleCandidate(nodeManager);
+    }
+
+    @Override
+    public void onSessionDown(NetconfClientSession netconfClientSession, Exception e) {
+        LOG.debug("Netconf client session is down, unregistering role candidate");
+        roleChangeStrategy.unregisterRoleCandidate();
+    }
+
+    @Override
+    public void onSessionTerminated(NetconfClientSession netconfClientSession, NetconfTerminationReason netconfTerminationReason) {
+        LOG.debug("Netconf client session is down, unregistering role candidate");
+        roleChangeStrategy.unregisterRoleCandidate();
+    }
+
+    @Override
+    public void onMessage(NetconfClientSession netconfClientSession, NetconfMessage netconfMessage) {
+        //NOOP
     }
 }
\ No newline at end of file
index 1d9e7c91700fb8ea682aa08ab726e42e708c2cd7..4a739f9750642c7166a65388736e6c23bfe8ed65 100644 (file)
@@ -18,7 +18,9 @@ import org.opendaylight.netconf.topology.StateAggregator;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeConnectionStatus.ConnectionStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.AvailableCapabilities;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.ClusteredConnectionStatusBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.UnavailableCapabilities;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.clustered.connection.status.NodeStatus;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
@@ -38,6 +40,8 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
             public void onSuccess(final List<Node> result) {
                 Node base = null;
                 NetconfNode baseAugmentation = null;
+                AvailableCapabilities masterCaps = null;
+                UnavailableCapabilities unavailableMasterCaps = null;
                 final ArrayList<NodeStatus> statusList = new ArrayList<>();
                 for (final Node node : result) {
                     final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
@@ -45,6 +49,15 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                         base = node;
                         baseAugmentation = netconfNode;
                     }
+                    // we need to pull out caps from master, since slave does not go through resolution
+                    if (masterCaps == null) {
+                        masterCaps = netconfNode.getAvailableCapabilities();
+                        unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+                    }
+                    if (netconfNode.getAvailableCapabilities().getAvailableCapability().size() > masterCaps.getAvailableCapability().size()) {
+                        masterCaps = netconfNode.getAvailableCapabilities();
+                        unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+                    }
                     LOG.debug(netconfNode.toString());
                     statusList.addAll(netconfNode.getClusteredConnectionStatus().getNodeStatus());
                 }
@@ -52,11 +65,9 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                 if (base == null) {
                     base = result.get(0);
                     baseAugmentation = result.get(0).getAugmentation(NetconfNode.class);
-                    LOG.warn("All results {}", result.toString());
+                    LOG.debug("All results {}", result.toString());
                 }
 
-                LOG.warn("Base node: {}", base);
-
                 final Node aggregatedNode =
                         new NodeBuilder(base)
                                 .addAugmentation(NetconfNode.class,
@@ -65,8 +76,11 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                                                         new ClusteredConnectionStatusBuilder()
                                                                 .setNodeStatus(statusList)
                                                                 .build())
+                                                .setAvailableCapabilities(masterCaps)
+                                                .setUnavailableCapabilities(unavailableMasterCaps)
                                                 .build())
                                 .build();
+
                 future.set(aggregatedNode);
             }
 
@@ -88,6 +102,8 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
             public void onSuccess(final List<Node> result) {
                 Node base = null;
                 NetconfNode baseAugmentation = null;
+                AvailableCapabilities masterCaps = null;
+                UnavailableCapabilities unavailableMasterCaps = null;
                 final ArrayList<NodeStatus> statusList = new ArrayList<>();
                 for (final Node node : result) {
                     final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
@@ -95,6 +111,15 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                         base = node;
                         baseAugmentation = netconfNode;
                     }
+                    // we need to pull out caps from master, since slave does not go through resolution
+                    if (masterCaps == null) {
+                        masterCaps = netconfNode.getAvailableCapabilities();
+                        unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+                    }
+                    if (netconfNode.getAvailableCapabilities().getAvailableCapability().size() > masterCaps.getAvailableCapability().size()) {
+                        masterCaps = netconfNode.getAvailableCapabilities();
+                        unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+                    }
                     LOG.debug(netconfNode.toString());
                     statusList.addAll(netconfNode.getClusteredConnectionStatus().getNodeStatus());
                 }
@@ -102,7 +127,7 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                 if (base == null) {
                     base = result.get(0);
                     baseAugmentation = result.get(0).getAugmentation(NetconfNode.class);
-                    LOG.warn("All results {}", result.toString());
+                    LOG.debug("All results {}", result.toString());
                 }
 
                 final Node aggregatedNode =
@@ -113,6 +138,8 @@ public class NetconfNodeOperationalDataAggregator implements StateAggregator{
                                                         new ClusteredConnectionStatusBuilder()
                                                                 .setNodeStatus(statusList)
                                                                 .build())
+                                                .setAvailableCapabilities(masterCaps)
+                                                .setUnavailableCapabilities(unavailableMasterCaps)
                                                 .build())
                                 .build();
                 future.set(aggregatedNode);
index 4329063b8a3486eff734abf59f222f6443d8a6a6..73d238d1d3cf1edde28367fa51e381239d9f5813 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.netconf.topology.impl;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorRef;
 import io.netty.util.concurrent.EventExecutor;
 import java.util.Collection;
 import javax.annotation.Nonnull;
@@ -72,7 +74,12 @@ public class NetconfTopologyImpl extends AbstractNetconfTopology implements Data
     }
 
     @Override
-    public void registerMountPoint(NodeId nodeId) {
+    public void registerMountPoint(ActorContext context, NodeId nodeId) {
+        throw new UnsupportedOperationException("MountPoint registration is not supported in regular topology, this happens automaticaly in the netconf pipeline");
+    }
+
+    @Override
+    public void registerMountPoint(ActorContext context, NodeId nodeId, ActorRef masterRef) {
         throw new UnsupportedOperationException("MountPoint registration is not supported in regular topology, this happens automaticaly in the netconf pipeline");
     }
 
index fecc93f72bce88a046017229ded0a8b33e2c65d0..3a9c8db73201749ebcb01425e91ae6e250806579 100644 (file)
@@ -83,8 +83,10 @@ public class NetconfTopologyManagerCallback implements TopologyManagerCallback {
                 createNodeManager(nodeId);
         nodes.put(nodeId, naBaseNodeManager);
 
-        // put initial state into datastore
-        naSalNodeWriter.init(nodeId, naBaseNodeManager.getInitialState(nodeId, node));
+        // only master should put initial state into datastore
+        if (isMaster) {
+            naSalNodeWriter.init(nodeId, naBaseNodeManager.getInitialState(nodeId, node));
+        }
 
         // trigger connect on this node
         return naBaseNodeManager.onNodeCreated(nodeId, node);
@@ -92,8 +94,10 @@ public class NetconfTopologyManagerCallback implements TopologyManagerCallback {
 
     @Override
     public ListenableFuture<Node> onNodeUpdated(final NodeId nodeId, final Node node) {
-        // put initial state into datastore
-        naSalNodeWriter.init(nodeId, nodes.get(nodeId).getInitialState(nodeId, node));
+        // only master should put initial state into datastore
+        if (isMaster) {
+            naSalNodeWriter.init(nodeId, nodes.get(nodeId).getInitialState(nodeId, node));
+        }
 
         // Trigger onNodeUpdated only on this node
         return nodes.get(nodeId).onNodeUpdated(nodeId, node);
@@ -123,6 +127,9 @@ public class NetconfTopologyManagerCallback implements TopologyManagerCallback {
     @Nonnull
     @Override
     public ListenableFuture<Node> getCurrentStatusForNode(@Nonnull NodeId nodeId) {
+        if (!nodes.containsKey(nodeId)) {
+            nodes.put(nodeId, createNodeManager(nodeId));
+        }
         return nodes.get(nodeId).getCurrentStatusForNode(nodeId);
     }
 
@@ -145,4 +152,16 @@ public class NetconfTopologyManagerCallback implements TopologyManagerCallback {
     public void onReceive(Object o, ActorRef actorRef) {
 
     }
+
+    @Nonnull
+    @Override
+    public Node getInitialState(@Nonnull NodeId nodeId, @Nonnull Node configNode) {
+        return nodes.get(nodeId).getInitialState(nodeId, configNode);
+    }
+
+    @Nonnull
+    @Override
+    public Node getFailedState(@Nonnull NodeId nodeId, @Nonnull Node configNode) {
+        return nodes.get(nodeId).getFailedState(nodeId, configNode);
+    }
 }
index d652b114d01360b0199345a31a2ab4e7007d764d..54ef71f2b626ece1afadd55e6c5e8993a4324a67 100644 (file)
@@ -12,6 +12,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
+import java.util.concurrent.locks.ReentrantLock;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
@@ -48,6 +49,8 @@ public class TopologyNodeWriter implements NodeWriter{
     private final InstanceIdentifier<NetworkTopology> networkTopologyPath;
     private final KeyedInstanceIdentifier<Topology, TopologyKey> topologyListPath;
 
+    private final ReentrantLock lock = new ReentrantLock(true);
+
     public TopologyNodeWriter(final String topologyId, final DataBroker dataBroker) {
         this.topologyId = topologyId;
         this.txChain = Preconditions.checkNotNull(dataBroker).createTransactionChain(new TransactionChainListener() {
@@ -70,49 +73,65 @@ public class TopologyNodeWriter implements NodeWriter{
 
     @Override
     public void init(@Nonnull NodeId id, @Nonnull Node operationalDataNode) {
-        final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
-
-        createNetworkTopologyIfNotPresent(writeTx);
-        final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
-
-        LOG.trace("{}: Init device state transaction {} putting if absent operational data started. Putting data on path {}",
-                id.getValue(), writeTx.getIdentifier(), path);
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, path, operationalDataNode);
-        LOG.trace("{}: Init device state transaction {} putting operational data ended.",
-                id.getValue(), writeTx.getIdentifier());
-
-        commitTransaction(writeTx, "init", id);
+        lock.lock();
+        try {
+            final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+
+            createNetworkTopologyIfNotPresent(writeTx);
+            final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
+
+            LOG.trace("{}: Init device state transaction {} putting if absent operational data started. Putting data on path {}",
+                    id.getValue(), writeTx.getIdentifier(), path);
+            writeTx.put(LogicalDatastoreType.OPERATIONAL, path, operationalDataNode);
+            LOG.trace("{}: Init device state transaction {} putting operational data ended.",
+                    id.getValue(), writeTx.getIdentifier());
+
+            commitTransaction(writeTx, "init", id);
+        } finally {
+            lock.unlock();
+        }
     }
 
     @Override
     public void update(@Nonnull NodeId id, @Nonnull Node operationalDataNode) {
-        final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
-
-        final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
-        LOG.trace("{}: Update device state transaction {} merging operational data started. Putting data on path {}",
-                id, writeTx.getIdentifier(), operationalDataNode);
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, path, operationalDataNode);
-        LOG.trace("{}: Update device state transaction {} merging operational data ended.",
-                id, writeTx.getIdentifier());
+        lock.lock();
+        try {
+            final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+
+            final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
+            LOG.trace("{}: Update device state transaction {} merging operational data started. Putting data on path {}",
+                    id, writeTx.getIdentifier(), operationalDataNode);
+            writeTx.put(LogicalDatastoreType.OPERATIONAL, path, operationalDataNode);
+            LOG.trace("{}: Update device state transaction {} merging operational data ended.",
+                    id, writeTx.getIdentifier());
+
+            commitTransaction(writeTx, "update", id);
+        } finally {
+            lock.unlock();
+        }
 
-        commitTransaction(writeTx, "update", id);
     }
 
     @Override
     public void delete(@Nonnull NodeId id) {
-        final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
-
-        final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
-
-        LOG.trace(
-                "{}: Close device state transaction {} removing all data started. Path: {}",
-                id, writeTx.getIdentifier(), path);
-        writeTx.delete(LogicalDatastoreType.OPERATIONAL, path);
-        LOG.trace(
-                "{}: Close device state transaction {} removing all data ended.",
-                id, writeTx.getIdentifier());
-
-        commitTransaction(writeTx, "close", id);
+        lock.lock();
+        try {
+            final WriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+
+            final InstanceIdentifier<Node> path = createBindingPathForTopology(new NodeKey(id), topologyId);
+
+            LOG.trace(
+                    "{}: Close device state transaction {} removing all data started. Path: {}",
+                    id, writeTx.getIdentifier(), path);
+            writeTx.delete(LogicalDatastoreType.OPERATIONAL, path);
+            LOG.trace(
+                    "{}: Close device state transaction {} removing all data ended.",
+                    id, writeTx.getIdentifier());
+
+            commitTransaction(writeTx, "close", id);
+        } finally {
+            lock.unlock();
+        }
     }
 
     private void commitTransaction(final WriteTransaction transaction, final String txType, final NodeId id) {
@@ -134,7 +153,6 @@ public class TopologyNodeWriter implements NodeWriter{
                 throw new IllegalStateException(id.getValue() + "  Transaction(" + txType + ") not committed correctly", t);
             }
         });
-
     }
 
     private void createNetworkTopologyIfNotPresent(final WriteTransaction writeTx) {
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolver.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolver.java
new file mode 100644 (file)
index 0000000..6e0c1d1
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.TypedActor;
+import java.util.Set;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import scala.concurrent.Future;
+
+public interface ClusteredDeviceSourcesResolver extends TypedActor.Receiver, TypedActor.PreStart {
+
+    Future<Set<SourceIdentifier>> getResolvedSources();
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolverImpl.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredDeviceSourcesResolverImpl.java
new file mode 100644 (file)
index 0000000..3a74e40
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+
+package org.opendaylight.netconf.topology.pipeline;
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.TypedActor;
+import akka.actor.TypedProps;
+import akka.cluster.Cluster;
+import akka.cluster.Member;
+import akka.dispatch.Futures;
+import akka.dispatch.OnComplete;
+import java.util.List;
+import java.util.Set;
+import org.opendaylight.controller.cluster.schema.provider.impl.RemoteSchemaProvider;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceClusteredDeviceSourcesResolverUp;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceMasterOnSameNodeUp;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceMasterSourceProviderUp;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistration;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.Promise;
+
+
+public class ClusteredDeviceSourcesResolverImpl implements ClusteredDeviceSourcesResolver {
+
+    private static Logger LOG = LoggerFactory.getLogger(ClusteredDeviceSourcesResolver.class);
+
+    private final String topologyId;
+    private final String nodeId;
+    private final ActorSystem actorSystem;
+    private final SchemaSourceRegistry schemaRegistry;
+    private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations;
+
+    private final Promise<Set<SourceIdentifier>> resolvedSourcesPromise;
+    private MasterSourceProvider remoteYangTextSourceProvider;
+
+    public ClusteredDeviceSourcesResolverImpl(String topologyId, String nodeId, ActorSystem actorSystem,
+                                              SchemaSourceRegistry schemaRegistry,
+                                              List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations) {
+        this.topologyId = topologyId;
+        this.nodeId = nodeId;
+        this.actorSystem = actorSystem;
+        this.schemaRegistry = schemaRegistry;
+        this.sourceRegistrations = sourceRegistrations;
+        resolvedSourcesPromise = Futures.promise();
+    }
+
+    @Override
+    public void preStart(){
+        Cluster cluster = Cluster.get(actorSystem);
+        for(Member node : cluster.state().getMembers()) {
+            if(!node.address().equals(cluster.selfAddress())) {
+                final String path = node.address() + "/user/" + topologyId + "/" + nodeId + "/masterSourceProvider";
+                actorSystem.actorSelection(path).tell(new AnnounceClusteredDeviceSourcesResolverUp(), TypedActor.context().self());
+            }
+        }
+    }
+
+    @Override
+    public void onReceive(Object o, ActorRef actorRef) {
+        if(o instanceof AnnounceMasterSourceProviderUp) {
+            if(remoteYangTextSourceProvider == null) {
+                remoteYangTextSourceProvider = TypedActor.get(actorSystem).typedActorOf(
+                        new TypedProps<>(MasterSourceProvider.class,
+                                MasterSourceProviderImpl.class), actorRef);
+                registerProvidedSourcesToSchemaRegistry();
+            }
+        } else if(o instanceof AnnounceMasterOnSameNodeUp) {
+            resolvedSourcesPromise.failure(new MasterSourceProviderOnSameNodeException());
+        }
+    }
+
+    private void registerProvidedSourcesToSchemaRegistry() {
+        Future<Set<SourceIdentifier>> sourcesFuture = remoteYangTextSourceProvider.getProvidedSources();
+        resolvedSourcesPromise.completeWith(sourcesFuture);
+        final RemoteSchemaProvider remoteProvider = new RemoteSchemaProvider(remoteYangTextSourceProvider, actorSystem.dispatcher());
+
+        sourcesFuture.onComplete(new OnComplete<Set<SourceIdentifier>>() {
+            @Override
+            public void onComplete(Throwable throwable, Set<SourceIdentifier> sourceIdentifiers) throws Throwable {
+                for (SourceIdentifier sourceId : sourceIdentifiers) {
+                   sourceRegistrations.add(schemaRegistry.registerSchemaSource(remoteProvider,
+                           PotentialSchemaSource.create(sourceId, YangTextSchemaSource.class, PotentialSchemaSource.Costs.REMOTE_IO.getValue())));
+                }
+            }
+        }, actorSystem.dispatcher());
+    }
+
+    @Override
+    public Future<Set<SourceIdentifier>> getResolvedSources() {
+        return resolvedSourcesPromise.future();
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDevice.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDevice.java
new file mode 100644 (file)
index 0000000..0040c55
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.ActorContext;
+import akka.actor.ActorSystem;
+import akka.actor.TypedActor;
+import akka.actor.TypedProps;
+import akka.dispatch.OnComplete;
+import akka.japi.Creator;
+import com.google.common.base.Optional;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipChange;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.netconf.api.NetconfMessage;
+import org.opendaylight.netconf.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.netconf.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfDeviceCapabilities;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.netconf.sal.connect.netconf.sal.NetconfDeviceRpc;
+import org.opendaylight.netconf.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
+import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClusteredNetconfDevice extends NetconfDevice implements EntityOwnershipListener {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ClusteredNetconfDevice.class);
+
+    private boolean isMaster = false;
+    private NetconfDeviceCommunicator listener;
+    private NetconfSessionPreferences sessionPreferences;
+    private SchemaRepository schemaRepo;
+    private final ActorSystem actorSystem;
+    private final String topologyId;
+    private final String nodeId;
+    private final ActorContext cachedContext;
+
+    private MasterSourceProvider masterSourceProvider = null;
+    private ClusteredDeviceSourcesResolver resolver = null;
+
+    public ClusteredNetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionPreferences> salFacade,
+                                  final ExecutorService globalProcessingExecutor, SchemaRepository schemaRepo, ActorSystem actorSystem, String topologyId, String nodeId,
+                                  ActorContext cachedContext) {
+        super(schemaResourcesDTO, id, salFacade, globalProcessingExecutor);
+        this.schemaRepo = schemaRepo;
+        this.actorSystem = actorSystem;
+        this.topologyId = topologyId;
+        this.nodeId = nodeId;
+        this.cachedContext = cachedContext;
+    }
+
+    @Override
+    public void onRemoteSessionUp(NetconfSessionPreferences remoteSessionCapabilities, NetconfDeviceCommunicator listener) {
+        LOG.warn("Node {} SessionUp, with capabilities {}", nodeId, remoteSessionCapabilities);
+        this.listener = listener;
+        this.sessionPreferences = remoteSessionCapabilities;
+        slaveSetupSchema();
+    }
+
+
+    @Override
+    protected void handleSalInitializationSuccess(SchemaContext result, NetconfSessionPreferences remoteSessionCapabilities, DOMRpcService deviceRpc) {
+        super.handleSalInitializationSuccess(result, remoteSessionCapabilities, deviceRpc);
+
+        final Set<SourceIdentifier> sourceIds = Sets.newHashSet();
+        for(ModuleIdentifier id : result.getAllModuleIdentifiers()) {
+            sourceIds.add(SourceIdentifier.create(id.getName(), (SimpleDateFormatUtil.DEFAULT_DATE_REV == id.getRevision() ? Optional.<String>absent() :
+                    Optional.of(SimpleDateFormatUtil.getRevisionFormat().format(id.getRevision())))));
+        }
+
+        //TODO extract string constant to util class
+        LOG.debug("Creating master source provider");
+        masterSourceProvider = TypedActor.get(cachedContext).typedActorOf(
+                new TypedProps<>(MasterSourceProvider.class,
+                        new Creator<MasterSourceProviderImpl>() {
+                            @Override
+                            public MasterSourceProviderImpl create() throws Exception {
+                                return new MasterSourceProviderImpl(schemaRepo, sourceIds, actorSystem, topologyId, nodeId);
+                            }
+                        }), "masterSourceProvider");
+    }
+
+    @Override
+    public void onRemoteSessionDown() {
+        super.onRemoteSessionDown();
+        listener = null;
+        sessionPreferences = null;
+        if (masterSourceProvider != null) {
+            // if we have master the slave that started on this node should be already killed via PoisonPill, so stop master only now
+            LOG.debug("Stopping master source provider for node {}", nodeId);
+            TypedActor.get(actorSystem).stop(masterSourceProvider);
+            masterSourceProvider = null;
+        } else {
+            LOG.debug("Stopping slave source resolver for node {}", nodeId);
+            TypedActor.get(actorSystem).stop(resolver);
+            resolver = null;
+        }
+    }
+
+    private void slaveSetupSchema() {
+        //TODO extract string constant to util class
+        resolver = TypedActor.get(cachedContext).typedActorOf(
+                new TypedProps<>(ClusteredDeviceSourcesResolver.class,
+                        new Creator<ClusteredDeviceSourcesResolverImpl>() {
+                            @Override
+                            public ClusteredDeviceSourcesResolverImpl create() throws Exception {
+                                return new ClusteredDeviceSourcesResolverImpl(topologyId, nodeId, actorSystem, schemaRegistry, sourceRegistrations);
+                            }
+                        }), "clusteredDeviceSourcesResolver");
+
+
+        final FutureCallback<SchemaContext> schemaContextFuture = new FutureCallback<SchemaContext>() {
+            @Override
+            public void onSuccess(SchemaContext schemaContext) {
+                LOG.debug("{}: Schema context built successfully.", id);
+
+                final NetconfDeviceCapabilities deviceCap = sessionPreferences.getNetconfDeviceCapabilities();
+                final Set<QName> providedSourcesQnames = Sets.newHashSet();
+                for(ModuleIdentifier id : schemaContext.getAllModuleIdentifiers()) {
+                    providedSourcesQnames.add(QName.create(id.getQNameModule(), id.getName()));
+                }
+
+                deviceCap.addNonModuleBasedCapabilities(sessionPreferences.getNonModuleCaps());
+                deviceCap.addCapabilities(providedSourcesQnames);
+
+                ClusteredNetconfDevice.super.handleSalInitializationSuccess(
+                        schemaContext, sessionPreferences, getDeviceSpecificRpc(schemaContext, listener));
+            }
+
+            @Override
+            public void onFailure(Throwable throwable) {
+                LOG.warn("{}: Unexpected error resolving device sources: {}", id, throwable);
+                handleSalInitializationFailure(throwable, listener);
+            }
+        };
+
+        resolver.getResolvedSources().onComplete(
+                new OnComplete<Set<SourceIdentifier>>() {
+                    @Override
+                    public void onComplete(Throwable throwable, Set<SourceIdentifier> sourceIdentifiers) throws Throwable {
+                        if(throwable != null) {
+                            if(throwable instanceof MasterSourceProviderOnSameNodeException) {
+                                //do nothing
+                            } else {
+                                LOG.warn("{}: Unexpected error resolving device sources: {}", id, throwable);
+                                handleSalInitializationFailure(throwable, listener);
+                            }
+                        } else {
+                            LOG.trace("{}: Trying to build schema context from {}", id, sourceIdentifiers);
+                            Futures.addCallback(schemaContextFactory.createSchemaContext(sourceIdentifiers), schemaContextFuture);
+                        }
+                    }
+                }, actorSystem.dispatcher());
+    }
+
+    private NetconfDeviceRpc getDeviceSpecificRpc(SchemaContext result, RemoteDeviceCommunicator<NetconfMessage> listener) {
+        return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result, true));
+    }
+
+    @Override
+    public void ownershipChanged(EntityOwnershipChange ownershipChange) {
+        LOG.debug("Entity ownership change received {}", ownershipChange);
+        if(ownershipChange.isOwner()) {
+            super.onRemoteSessionUp(sessionPreferences, listener);
+        } else if (ownershipChange.wasOwner()) {
+            slaveSetupSchema();
+        }
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDeviceCommunicator.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ClusteredNetconfDeviceCommunicator.java
new file mode 100644 (file)
index 0000000..0b19197
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import java.util.ArrayList;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
+import org.opendaylight.netconf.api.NetconfMessage;
+import org.opendaylight.netconf.api.NetconfTerminationReason;
+import org.opendaylight.netconf.client.NetconfClientSession;
+import org.opendaylight.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.netconf.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+
+public class ClusteredNetconfDeviceCommunicator extends NetconfDeviceCommunicator {
+
+    private final EntityOwnershipService ownershipService;
+
+    private final ArrayList<NetconfClientSessionListener> netconfClientSessionListeners = new ArrayList<>();
+    private EntityOwnershipListenerRegistration ownershipListenerRegistration = null;
+
+    public ClusteredNetconfDeviceCommunicator(RemoteDeviceId id, NetconfDevice remoteDevice, EntityOwnershipService ownershipService) {
+        super(id, remoteDevice);
+        this.ownershipService = ownershipService;
+    }
+
+    @Override
+    public void onMessage(NetconfClientSession session, NetconfMessage message) {
+        super.onMessage(session, message);
+        for(NetconfClientSessionListener listener : netconfClientSessionListeners) {
+            listener.onMessage(session, message);
+        }
+    }
+
+    @Override
+    public void onSessionDown(NetconfClientSession session, Exception e) {
+        super.onSessionDown(session, e);
+        ownershipListenerRegistration.close();
+        for(NetconfClientSessionListener listener : netconfClientSessionListeners) {
+            listener.onSessionDown(session, e);
+        }
+    }
+
+    @Override
+    public void onSessionUp(NetconfClientSession session) {
+        super.onSessionUp(session);
+        ownershipListenerRegistration = ownershipService.registerListener("netconf-node/" + id.getName(), (ClusteredNetconfDevice) remoteDevice);
+        for(NetconfClientSessionListener listener : netconfClientSessionListeners) {
+            listener.onSessionUp(session);
+        }
+    }
+
+    @Override
+    public void onSessionTerminated(NetconfClientSession session, NetconfTerminationReason reason) {
+        super.onSessionTerminated(session, reason);
+        ownershipListenerRegistration.close();
+        for(NetconfClientSessionListener listener : netconfClientSessionListeners) {
+            listener.onSessionTerminated(session, reason);
+        }
+    }
+
+    public NetconfClientSessionListenerRegistration registerNetconfClientSessionListener(NetconfClientSessionListener listener) {
+        netconfClientSessionListeners.add(listener);
+        return new NetconfClientSessionListenerRegistration(listener);
+    }
+
+    public class NetconfClientSessionListenerRegistration {
+
+        private final NetconfClientSessionListener listener;
+
+        public NetconfClientSessionListenerRegistration(NetconfClientSessionListener listener) {
+            this.listener = listener;
+        }
+
+        public void close() {
+            netconfClientSessionListeners.remove(listener);
+        }
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProvider.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProvider.java
new file mode 100644 (file)
index 0000000..f9726f4
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.TypedActor;
+import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+
+public interface MasterSourceProvider
+        extends TypedActor.PreStart, TypedActor.Receiver, RemoteYangTextSourceProvider {
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderImpl.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderImpl.java
new file mode 100644 (file)
index 0000000..42375dc
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import akka.actor.TypedActor;
+import akka.cluster.Cluster;
+import akka.cluster.Member;
+import java.util.Set;
+import org.opendaylight.controller.cluster.schema.provider.impl.RemoteYangTextSourceProviderImpl;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceClusteredDeviceSourcesResolverUp;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceMasterOnSameNodeUp;
+import org.opendaylight.netconf.topology.pipeline.messages.AnnounceMasterSourceProviderUp;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MasterSourceProviderImpl extends RemoteYangTextSourceProviderImpl
+        implements MasterSourceProvider {
+
+    private static Logger LOG = LoggerFactory.getLogger(MasterSourceProviderImpl.class);
+
+    private final ActorSystem actorSystem;
+    private final String topologyId;
+    private final String nodeId;
+
+    public MasterSourceProviderImpl(SchemaRepository schemaRepo, Set<SourceIdentifier> providedSources, ActorSystem actorSystem, String topologyId, String nodeId) {
+        super(schemaRepo, providedSources);
+        this.actorSystem = actorSystem;
+        this.topologyId = topologyId;
+        this.nodeId = nodeId;
+    }
+
+    @Override
+    public void onReceive(Object o, ActorRef actorRef) {
+        if(o instanceof AnnounceClusteredDeviceSourcesResolverUp) {
+            LOG.debug("Received source resolver up");
+            actorRef.tell(new AnnounceMasterSourceProviderUp(), TypedActor.context().self());
+        }
+    }
+
+    @Override
+    public void preStart() {
+        Cluster cluster = Cluster.get(actorSystem);
+        cluster.join(cluster.selfAddress());
+        LOG.debug("Notifying members master schema source provider is up.");
+        for(Member node : cluster.state().getMembers()) {
+            final String path = node.address() + "/user/" + topologyId + "/" + nodeId + "/clusteredDeviceSourcesResolver";
+            if(node.address().equals(cluster.selfAddress())) {
+                actorSystem.actorSelection(path).tell(new AnnounceMasterOnSameNodeUp(), TypedActor.context().self());
+                actorSystem.actorSelection(path).tell(PoisonPill.getInstance(), TypedActor.context().self());
+            } else {
+                //TODO extract string constant to util class
+                actorSystem.actorSelection(path).tell(new AnnounceMasterSourceProviderUp(), TypedActor.context().self());
+            }
+        }
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderOnSameNodeException.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/MasterSourceProviderOnSameNodeException.java
new file mode 100644 (file)
index 0000000..71f3005
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+public class MasterSourceProviderOnSameNodeException extends Exception {
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceMasterDataBroker.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceMasterDataBroker.java
new file mode 100644 (file)
index 0000000..f210820
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.ActorSystem;
+import akka.actor.TypedActor;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
+import org.opendaylight.netconf.sal.connect.netconf.sal.NetconfDeviceDataBroker;
+import org.opendaylight.netconf.sal.connect.netconf.sal.tx.ReadWriteTx;
+import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.netconf.topology.pipeline.tx.ProxyReadOnlyTransaction;
+import org.opendaylight.netconf.topology.pipeline.tx.ProxyWriteOnlyTransaction;
+import org.opendaylight.netconf.topology.util.messages.NormalizedNodeMessage;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Future;
+import scala.concurrent.impl.Promise.DefaultPromise;
+
+public class NetconfDeviceMasterDataBroker implements ProxyNetconfDeviceDataBroker {
+
+    private final RemoteDeviceId id;
+
+    private final NetconfDeviceDataBroker delegateBroker;
+    private final ActorSystem actorSystem;
+
+    private DOMDataReadOnlyTransaction readTx;
+    private DOMDataWriteTransaction writeTx;
+
+    public NetconfDeviceMasterDataBroker(final ActorSystem actorSystem, final RemoteDeviceId id,
+                                         final SchemaContext schemaContext, final DOMRpcService rpc,
+                                         final NetconfSessionPreferences netconfSessionPreferences, final long requestTimeoutMillis) {
+        this.id = id;
+        delegateBroker = new NetconfDeviceDataBroker(id, schemaContext, rpc, netconfSessionPreferences, requestTimeoutMillis);
+        this.actorSystem = actorSystem;
+
+        // only ever need 1 readTx since it doesnt need to be closed
+        readTx = delegateBroker.newReadOnlyTransaction();
+    }
+
+    @Override
+    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+        return new ProxyReadOnlyTransaction(actorSystem, id, TypedActor.<NetconfDeviceMasterDataBroker>self());
+    }
+
+    @Override
+    public DOMDataReadWriteTransaction newReadWriteTransaction() {
+        return new ReadWriteTx(new ProxyReadOnlyTransaction(actorSystem, id, TypedActor.<NetconfDeviceMasterDataBroker>self()),
+                newWriteOnlyTransaction());
+    }
+
+    @Override
+    public DOMDataWriteTransaction newWriteOnlyTransaction() {
+        writeTx = delegateBroker.newWriteOnlyTransaction();
+        return new ProxyWriteOnlyTransaction(actorSystem, TypedActor.<NetconfDeviceMasterDataBroker>self());
+    }
+
+    @Override
+    public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(LogicalDatastoreType store, YangInstanceIdentifier path, DOMDataChangeListener listener, DataChangeScope triggeringScope) {
+        throw new UnsupportedOperationException(id + ": Data change listeners not supported for netconf mount point");
+    }
+
+    @Override
+    public DOMTransactionChain createTransactionChain(TransactionChainListener listener) {
+        throw new UnsupportedOperationException(id + ": Transaction chains not supported for netconf mount point");
+    }
+
+    @Nonnull
+    @Override
+    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
+        return Collections.emptyMap();
+    }
+
+    @Override
+    public Future<Optional<NormalizedNodeMessage>> read(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+        final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readFuture = readTx.read(store, path);
+
+        final DefaultPromise<Optional<NormalizedNodeMessage>> promise = new DefaultPromise<>();
+        Futures.addCallback(readFuture, new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
+            @Override
+            public void onSuccess(Optional<NormalizedNode<?, ?>> result) {
+                if (!result.isPresent()) {
+                    promise.success(Optional.<NormalizedNodeMessage>absent());
+                } else {
+                    promise.success(Optional.of(new NormalizedNodeMessage(path, result.get())));
+                }
+            }
+
+            @Override
+            public void onFailure(Throwable t) {
+                promise.failure(t);
+            }
+        });
+        return promise.future();
+    }
+
+    @Override
+    public Future<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+        final CheckedFuture<Boolean, ReadFailedException> existsFuture = readTx.exists(store, path);
+
+        final DefaultPromise<Boolean> promise = new DefaultPromise<>();
+        Futures.addCallback(existsFuture, new FutureCallback<Boolean>() {
+            @Override
+            public void onSuccess(Boolean result) {
+                promise.success(result);
+            }
+
+            @Override
+            public void onFailure(Throwable t) {
+                promise.failure(t);
+            }
+        });
+        return promise.future();
+    }
+
+    @Override
+    public void put(final LogicalDatastoreType store, final NormalizedNodeMessage data) {
+        if (writeTx == null) {
+            writeTx = delegateBroker.newWriteOnlyTransaction();
+        }
+        writeTx.put(store, data.getIdentifier(), data.getNode());
+    }
+
+    @Override
+    public void merge(final LogicalDatastoreType store, final NormalizedNodeMessage data) {
+        if (writeTx == null) {
+            writeTx = delegateBroker.newWriteOnlyTransaction();
+        }
+        writeTx.merge(store, data.getIdentifier(), data.getNode());
+    }
+
+    @Override
+    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+        if (writeTx == null) {
+            writeTx = delegateBroker.newWriteOnlyTransaction();
+        }
+        writeTx.delete(store, path);
+    }
+
+    @Override
+    public boolean cancel() {
+        return writeTx.cancel();
+    }
+
+    @Override
+    public Future<Void> submit() {
+        final CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTx.submit();
+        final DefaultPromise<Void> promise = new DefaultPromise<>();
+        Futures.addCallback(submitFuture, new FutureCallback<Void>() {
+            @Override
+            public void onSuccess(Void result) {
+                promise.success(result);
+            }
+
+            @Override
+            public void onFailure(Throwable t) {
+                promise.failure(t);
+            }
+        });
+        return promise.future();
+    }
+
+    @Override
+    @Deprecated
+    public Future<RpcResult<TransactionStatus>> commit() {
+        final ListenableFuture<RpcResult<TransactionStatus>> commitFuture = writeTx.commit();
+        final DefaultPromise<RpcResult<TransactionStatus>> promise = new DefaultPromise<>();
+        Futures.addCallback(commitFuture, new FutureCallback<RpcResult<TransactionStatus>>() {
+            @Override
+            public void onSuccess(RpcResult<TransactionStatus> result) {
+                promise.success(result);
+            }
+
+            @Override
+            public void onFailure(Throwable t) {
+                promise.failure(t);
+            }
+        });
+        return promise.future();
+    }
+
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceSlaveDataBroker.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/NetconfDeviceSlaveDataBroker.java
new file mode 100644 (file)
index 0000000..23d7e10
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import akka.actor.ActorSystem;
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.opendaylight.netconf.sal.connect.netconf.sal.tx.ReadWriteTx;
+import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.netconf.topology.pipeline.tx.ProxyReadOnlyTransaction;
+import org.opendaylight.netconf.topology.pipeline.tx.ProxyWriteOnlyTransaction;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+public class NetconfDeviceSlaveDataBroker implements DOMDataBroker{
+
+    private final RemoteDeviceId id;
+    private final ProxyNetconfDeviceDataBroker masterDataBroker;
+    private final ActorSystem actorSystem;
+
+    public NetconfDeviceSlaveDataBroker(final ActorSystem actorSystem, final RemoteDeviceId id, final ProxyNetconfDeviceDataBroker masterDataBroker) {
+        this.id = id;
+        this.masterDataBroker = masterDataBroker;
+        this.actorSystem = actorSystem;
+    }
+
+    @Override
+    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+        return new ProxyReadOnlyTransaction(actorSystem, id, masterDataBroker);
+    }
+
+    @Override
+    public DOMDataReadWriteTransaction newReadWriteTransaction() {
+        return new ReadWriteTx(new ProxyReadOnlyTransaction(actorSystem, id, masterDataBroker), new ProxyWriteOnlyTransaction(actorSystem, masterDataBroker));
+    }
+
+    @Override
+    public DOMDataWriteTransaction newWriteOnlyTransaction() {
+        return new ProxyWriteOnlyTransaction(actorSystem, masterDataBroker);
+    }
+
+    @Override
+    public ListenerRegistration<DOMDataChangeListener> registerDataChangeListener(LogicalDatastoreType store, YangInstanceIdentifier path, DOMDataChangeListener listener, DataChangeScope triggeringScope) {
+        throw new UnsupportedOperationException(id + ": Data change listeners not supported for netconf mount point");
+    }
+
+    @Override
+    public DOMTransactionChain createTransactionChain(TransactionChainListener listener) {
+        throw new UnsupportedOperationException(id + ": Transaction chains not supported for netconf mount point");
+    }
+
+    @Nonnull
+    @Override
+    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
+        return Collections.emptyMap();
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ProxyNetconfDeviceDataBroker.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/ProxyNetconfDeviceDataBroker.java
new file mode 100644 (file)
index 0000000..3ef7688
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.netconf.topology.util.messages.NormalizedNodeMessage;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import scala.concurrent.Future;
+
+public interface ProxyNetconfDeviceDataBroker extends DOMDataBroker{
+    Future<Optional<NormalizedNodeMessage>> read(LogicalDatastoreType store, YangInstanceIdentifier path);
+
+    Future<Boolean> exists(LogicalDatastoreType store, YangInstanceIdentifier path);
+
+    void put(LogicalDatastoreType store, NormalizedNodeMessage data);
+
+    void merge(LogicalDatastoreType store, NormalizedNodeMessage data);
+
+    void delete(LogicalDatastoreType store, YangInstanceIdentifier path);
+
+    boolean cancel();
+
+    Future<Void> submit();
+
+    @Deprecated
+    Future<RpcResult<TransactionStatus>> commit();
+}
index ec06c04b92f9e1fc9ac94885ad7b9e8e64ab7fc0..269db6907bc987757b3f642966434cf343bdec11 100644 (file)
@@ -8,6 +8,14 @@
 
 package org.opendaylight.netconf.topology.pipeline;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.TypedActor;
+import akka.actor.TypedProps;
+import akka.cluster.Cluster;
+import akka.cluster.Member;
+import akka.japi.Creator;
 import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
@@ -17,9 +25,10 @@ import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.netconf.sal.connect.api.RemoteDeviceHandler;
 import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
-import org.opendaylight.netconf.sal.connect.netconf.sal.NetconfDeviceDataBroker;
 import org.opendaylight.netconf.sal.connect.netconf.sal.NetconfDeviceNotificationService;
 import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.netconf.topology.util.messages.AnnounceMasterMountPoint;
+import org.opendaylight.netconf.topology.util.messages.AnnounceMasterMountPointDown;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -28,6 +37,9 @@ public class TopologyMountPointFacade implements AutoCloseable, RemoteDeviceHand
 
     private static final Logger LOG = LoggerFactory.getLogger(TopologyMountPointFacade.class);
 
+    private static final String MOUNT_POINT = "mountpoint";
+
+    private final String topologyId;
     private final RemoteDeviceId id;
     private final Broker domBroker;
     private final BindingAwareBroker bindingBroker;
@@ -38,13 +50,17 @@ public class TopologyMountPointFacade implements AutoCloseable, RemoteDeviceHand
     private DOMRpcService deviceRpc = null;
     private final ClusteredNetconfDeviceMountInstanceProxy salProvider;
 
+    private ActorSystem actorSystem;
+    private DOMDataBroker deviceDataBroker = null;
+
     private final ArrayList<RemoteDeviceHandler<NetconfSessionPreferences>> connectionStatusListeners = new ArrayList<>();
 
-    public TopologyMountPointFacade(final RemoteDeviceId id,
+    public TopologyMountPointFacade(final String topologyId,
+                                    final RemoteDeviceId id,
                                     final Broker domBroker,
                                     final BindingAwareBroker bindingBroker,
                                     long defaultRequestTimeoutMillis) {
-
+        this.topologyId = topologyId;
         this.id = id;
         this.domBroker = domBroker;
         this.bindingBroker = bindingBroker;
@@ -62,6 +78,7 @@ public class TopologyMountPointFacade implements AutoCloseable, RemoteDeviceHand
                                   final NetconfSessionPreferences netconfSessionPreferences,
                                   final DOMRpcService deviceRpc) {
         // prepare our prerequisites for mountpoint
+        LOG.debug("Mount point facade onConnected capabilities {}", netconfSessionPreferences);
         this.remoteSchemaContext = remoteSchemaContext;
         this.netconfSessionPreferences = netconfSessionPreferences;
         this.deviceRpc = deviceRpc;
@@ -91,19 +108,69 @@ public class TopologyMountPointFacade implements AutoCloseable, RemoteDeviceHand
         salProvider.getMountInstance().publish(domNotification);
     }
 
-    public void registerMountPoint() {
+    public void registerMountPoint(final ActorSystem actorSystem, final ActorContext context) {
+        if (remoteSchemaContext == null || netconfSessionPreferences == null) {
+            LOG.debug("Master mount point does not have schemas ready yet, delaying registration");
+            return;
+        }
+
         Preconditions.checkNotNull(id);
         Preconditions.checkNotNull(remoteSchemaContext, "Device has no remote schema context yet. Probably not fully connected.");
         Preconditions.checkNotNull(netconfSessionPreferences, "Device has no capabilities yet. Probably not fully connected.");
+        this.actorSystem = actorSystem;
+        final NetconfDeviceNotificationService notificationService = new NetconfDeviceNotificationService();
+
+        LOG.warn("Creating master data broker for device {}", id);
+        deviceDataBroker = TypedActor.get(context).typedActorOf(new TypedProps<>(ProxyNetconfDeviceDataBroker.class, new Creator<NetconfDeviceMasterDataBroker>() {
+            @Override
+            public NetconfDeviceMasterDataBroker create() throws Exception {
+                return new NetconfDeviceMasterDataBroker(actorSystem, id, remoteSchemaContext, deviceRpc, netconfSessionPreferences, defaultRequestTimeoutMillis);
+            }
+        }), MOUNT_POINT);
+        LOG.debug("Master data broker registered on path {}", TypedActor.get(actorSystem).getActorRefFor(deviceDataBroker).path());
+        salProvider.getMountInstance().onTopologyDeviceConnected(remoteSchemaContext, deviceDataBroker, deviceRpc, notificationService);
+        final Cluster cluster = Cluster.get(actorSystem);
+        final Iterable<Member> members = cluster.state().getMembers();
+        final ActorRef deviceDataBrokerRef = TypedActor.get(actorSystem).getActorRefFor(deviceDataBroker);
+        for (final Member member : members) {
+            if (!member.address().equals(cluster.selfAddress())) {
+                final String path = member.address() + "/user/" + topologyId + "/" + id.getName();
+                actorSystem.actorSelection(path).tell(new AnnounceMasterMountPoint(), deviceDataBrokerRef);
+            }
+        }
+    }
 
-        final DOMDataBroker netconfDeviceDataBroker = new NetconfDeviceDataBroker(id, remoteSchemaContext, deviceRpc, netconfSessionPreferences, defaultRequestTimeoutMillis);
+    public void registerMountPoint(final ActorSystem actorSystem, final ActorContext context, final ActorRef masterRef) {
+        if (remoteSchemaContext == null || netconfSessionPreferences == null) {
+            LOG.debug("Slave mount point does not have schemas ready yet, delaying registration");
+            return;
+        }
+
+        Preconditions.checkNotNull(id);
+        Preconditions.checkNotNull(remoteSchemaContext, "Device has no remote schema context yet. Probably not fully connected.");
+        Preconditions.checkNotNull(netconfSessionPreferences, "Device has no capabilities yet. Probably not fully connected.");
+        this.actorSystem = actorSystem;
         final NetconfDeviceNotificationService notificationService = new NetconfDeviceNotificationService();
 
-        salProvider.getMountInstance().onTopologyDeviceConnected(remoteSchemaContext, netconfDeviceDataBroker, deviceRpc, notificationService);
+        final ProxyNetconfDeviceDataBroker masterDataBroker = TypedActor.get(actorSystem).typedActorOf(new TypedProps<>(ProxyNetconfDeviceDataBroker.class, NetconfDeviceMasterDataBroker.class), masterRef);
+        LOG.warn("Creating slave data broker for device {}", id);
+        final DOMDataBroker deviceDataBroker = new NetconfDeviceSlaveDataBroker(actorSystem, id, masterDataBroker);
+        salProvider.getMountInstance().onTopologyDeviceConnected(remoteSchemaContext, deviceDataBroker, deviceRpc, notificationService);
     }
 
     public void unregisterMountPoint() {
         salProvider.getMountInstance().onTopologyDeviceDisconnected();
+        if (deviceDataBroker != null) {
+            LOG.debug("Stopping master data broker for device {}", id.getName());
+            for (final Member member : Cluster.get(actorSystem).state().getMembers()) {
+                if (member.address().equals(Cluster.get(actorSystem).selfAddress())) {
+                    continue;
+                }
+                actorSystem.actorSelection(member.address() + "/user/" + topologyId + "/" + id.getName()).tell(new AnnounceMasterMountPointDown(), null);
+            }
+            TypedActor.get(actorSystem).stop(deviceDataBroker);
+            deviceDataBroker = null;
+        }
     }
 
     public ConnectionStatusListenerRegistration registerConnectionStatusListener(final RemoteDeviceHandler<NetconfSessionPreferences> listener) {
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceClusteredDeviceSourcesResolverUp.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceClusteredDeviceSourcesResolverUp.java
new file mode 100644 (file)
index 0000000..f8328e7
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.messages;
+
+import java.io.Serializable;
+
+public class AnnounceClusteredDeviceSourcesResolverUp implements Serializable {
+    public static final long serialVersionUID = 1L;
+
+    public AnnounceClusteredDeviceSourcesResolverUp() {}
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterOnSameNodeUp.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterOnSameNodeUp.java
new file mode 100644 (file)
index 0000000..793321c
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.messages;
+
+import java.io.Serializable;
+
+public class AnnounceMasterOnSameNodeUp  implements Serializable {
+    public static long serialVersionUID = 1L;
+
+    public AnnounceMasterOnSameNodeUp() {
+
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterSourceProviderUp.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/messages/AnnounceMasterSourceProviderUp.java
new file mode 100644 (file)
index 0000000..7bec681
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.messages;
+
+import java.io.Serializable;
+
+public class AnnounceMasterSourceProviderUp implements Serializable {
+    public static final long serialVersionUID = 1L;
+
+    public AnnounceMasterSourceProviderUp() {
+
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/NetconfDeviceDataBrokerProxy.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/NetconfDeviceDataBrokerProxy.java
new file mode 100644 (file)
index 0000000..23fe120
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.tx;
+
+public interface NetconfDeviceDataBrokerProxy {
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyReadOnlyTransaction.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyReadOnlyTransaction.java
new file mode 100644 (file)
index 0000000..c67673b
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.tx;
+
+import akka.actor.ActorSystem;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import javax.annotation.Nullable;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
+import org.opendaylight.netconf.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.netconf.topology.pipeline.ProxyNetconfDeviceDataBroker;
+import org.opendaylight.netconf.topology.util.messages.NormalizedNodeMessage;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import scala.concurrent.Future;
+
+public class ProxyReadOnlyTransaction implements DOMDataReadOnlyTransaction{
+
+    private final RemoteDeviceId id;
+    private final ProxyNetconfDeviceDataBroker delegate;
+    private final ActorSystem actorSystem;
+
+    public ProxyReadOnlyTransaction(final ActorSystem actorSystem, final RemoteDeviceId id, final ProxyNetconfDeviceDataBroker delegate) {
+        this.id = id;
+        this.delegate = delegate;
+        this.actorSystem = actorSystem;
+    }
+
+    @Override
+    public void close() {
+        //NOOP
+    }
+
+    @Override
+    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+        final Future<Optional<NormalizedNodeMessage>> future = delegate.read(store, path);
+        final SettableFuture<Optional<NormalizedNode<?, ?>>> settableFuture = SettableFuture.create();
+        final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> checkedFuture = Futures.makeChecked(settableFuture, new Function<Exception, ReadFailedException>() {
+            @Nullable
+            @Override
+            public ReadFailedException apply(Exception cause) {
+                return new ReadFailedException("Read from transaction failed", cause);
+            }
+        });
+        future.onComplete(new OnComplete<Optional<NormalizedNodeMessage>>() {
+            @Override
+            public void onComplete(Throwable throwable, Optional<NormalizedNodeMessage> normalizedNodeMessage) throws Throwable {
+                if (throwable == null) {
+                    settableFuture.set(normalizedNodeMessage.transform(new Function<NormalizedNodeMessage, NormalizedNode<?, ?>>() {
+                        @Nullable
+                        @Override
+                        public NormalizedNode<?, ?> apply(NormalizedNodeMessage input) {
+                            return input.getNode();
+                        }
+                    }));
+                } else {
+                    settableFuture.setException(throwable);
+                }
+            }
+        }, actorSystem.dispatcher());
+        return checkedFuture;
+    }
+
+    @Override
+    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
+        final Future<Boolean> existsFuture = delegate.exists(store, path);
+        final SettableFuture<Boolean> settableFuture = SettableFuture.create();
+        final CheckedFuture<Boolean, ReadFailedException> checkedFuture = Futures.makeChecked(settableFuture, new Function<Exception, ReadFailedException>() {
+            @Nullable
+            @Override
+            public ReadFailedException apply(Exception cause) {
+                return new ReadFailedException("Read from transaction failed", cause);
+            }
+        });
+        existsFuture.onComplete(new OnComplete<Boolean>() {
+            @Override
+            public void onComplete(Throwable throwable, Boolean result) throws Throwable {
+                if (throwable == null) {
+                    settableFuture.set(result);
+                } else {
+                    settableFuture.setException(throwable);
+                }
+            }
+        }, actorSystem.dispatcher());
+        return checkedFuture;
+    }
+
+    @Override
+    public Object getIdentifier() {
+        return this;
+    }
+}
diff --git a/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyWriteOnlyTransaction.java b/opendaylight/netconf/netconf-topology/src/main/java/org/opendaylight/netconf/topology/pipeline/tx/ProxyWriteOnlyTransaction.java
new file mode 100644 (file)
index 0000000..d7a3c87
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.netconf.topology.pipeline.tx;
+
+import akka.actor.ActorSystem;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Function;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import javax.annotation.Nullable;
+import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.netconf.topology.pipeline.ProxyNetconfDeviceDataBroker;
+import org.opendaylight.netconf.topology.util.messages.NormalizedNodeMessage;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import scala.concurrent.Future;
+
+public class ProxyWriteOnlyTransaction implements DOMDataWriteTransaction {
+
+    private final ProxyNetconfDeviceDataBroker delegate;
+    private final ActorSystem actorSystem;
+
+    public ProxyWriteOnlyTransaction(ActorSystem actorSystem, final ProxyNetconfDeviceDataBroker delegate) {
+        this.delegate = delegate;
+        this.actorSystem = actorSystem;
+    }
+
+    @Override
+    public void put (final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode < ?,?>data){
+        delegate.put(store, new NormalizedNodeMessage(path, data));
+    }
+
+    @Override
+    public void merge (final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode < ?,?>data){
+        delegate.merge(store, new NormalizedNodeMessage(path, data));
+    }
+
+    @Override
+    public boolean cancel () {
+        return delegate.cancel();
+    }
+
+    @Override
+    public void delete (final LogicalDatastoreType store, final YangInstanceIdentifier path){
+        delegate.delete(store, path);
+    }
+
+    @Override
+    public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+        final Future<Void> submit = delegate.submit();
+        final SettableFuture<Void> settableFuture = SettableFuture.create();
+        final CheckedFuture<Void, TransactionCommitFailedException> checkedFuture = Futures.makeChecked(settableFuture, new Function<Exception, TransactionCommitFailedException>() {
+            @Nullable
+            @Override
+            public TransactionCommitFailedException apply(Exception input) {
+                return new TransactionCommitFailedException("Transaction commit failed", input);
+            }
+        });
+        submit.onComplete(new OnComplete<Void>() {
+            @Override
+            public void onComplete(Throwable throwable, Void aVoid) throws Throwable {
+                if (throwable == null) {
+                    settableFuture.set(aVoid);
+                } else {
+                    settableFuture.setException(throwable);
+                }
+            }
+        }, actorSystem.dispatcher());
+        return checkedFuture;
+    }
+
+    @Override
+    public ListenableFuture<RpcResult<TransactionStatus>> commit () {
+        final Future<RpcResult<TransactionStatus>> commit = delegate.commit();
+        final SettableFuture<RpcResult<TransactionStatus>> settableFuture = SettableFuture.create();
+        commit.onComplete(new OnComplete<RpcResult<TransactionStatus>>() {
+            @Override
+            public void onComplete(Throwable throwable, RpcResult<TransactionStatus> transactionStatusRpcResult) throws Throwable {
+                if (throwable == null) {
+                    settableFuture.set(transactionStatusRpcResult);
+                } else {
+                    settableFuture.setException(throwable);
+                }
+            }
+        }, actorSystem.dispatcher());
+        return settableFuture;
+    }
+
+    @Override
+    public Object getIdentifier () {
+        return this;
+    }
+}
index 392838c4b8eaeca62c88c90f916dcc5401f5168f..df1b464ee4ecb9731e34d1db38420ec62b22c91f 100644 (file)
@@ -18,9 +18,11 @@ import akka.actor.TypedActor;
 import akka.actor.TypedActorExtension;
 import akka.actor.TypedProps;
 import akka.japi.Creator;
+import com.google.common.base.Function;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -35,6 +37,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import javassist.ClassPool;
 import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -42,9 +45,14 @@ import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
 import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.netconf.sal.connect.netconf.listener.NetconfSessionPreferences;
 import org.opendaylight.netconf.topology.NodeManagerCallback.NodeManagerCallbackFactory;
 import org.opendaylight.netconf.topology.TopologyManagerCallback.TopologyManagerCallbackFactory;
 import org.opendaylight.netconf.topology.example.ExampleNodeManagerCallback;
@@ -62,10 +70,14 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev15
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeConnectionStatus.ConnectionStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.AvailableCapabilitiesBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.ClusteredConnectionStatusBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.UnavailableCapabilitiesBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.clustered.connection.status.NodeStatus.Status;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.clustered.connection.status.NodeStatusBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.unavailable.capabilities.UnavailableCapability;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
 import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
 import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
@@ -90,6 +102,9 @@ public class ActorTest {
     @Mock
     private DataBroker dataBroker;
 
+    @Mock
+    private ReadOnlyTransaction mockedReadOnlyTx;
+
     private static final BindingNormalizedNodeCodecRegistry CODEC_REGISTRY;
 
     static {
@@ -123,11 +138,22 @@ public class ActorTest {
     @Before
     public void setup() {
         MockitoAnnotations.initMocks(this);
+        final SettableFuture<Optional<Topology>> settableFuture = SettableFuture.create();
+        final CheckedFuture<Optional<Topology>, ReadFailedException> checkedFuture = Futures.makeChecked(settableFuture, new Function<Exception, ReadFailedException>() {
+            @Nullable
+            @Override
+            public ReadFailedException apply(Exception input) {
+                return new ReadFailedException("Dummy future should never return this");
+            }
+        });
+        settableFuture.set(Optional.<Topology>absent());
+        when(mockedReadOnlyTx.read(any(LogicalDatastoreType.class), any(InstanceIdentifier.class))).thenReturn(checkedFuture);
         when(dataBroker.registerDataChangeListener(
                 any(LogicalDatastoreType.class),
                 any(InstanceIdentifier.class),
                 any(DataChangeListener.class),
                 any(DataChangeScope.class))).thenReturn(null);
+        when(dataBroker.newReadOnlyTransaction()).thenReturn(mockedReadOnlyTx);
     }
 
     private void setMaster(final TopologyManager manager) {
@@ -201,6 +227,8 @@ public class ActorTest {
                 }
             });
         }
+        LOG.debug("Waiting for updates to finish");
+        Futures.allAsList(futures).get();
 
 
         final List<ListenableFuture<Void>> deleteFutures = new ArrayList<>();
@@ -377,6 +405,8 @@ public class ActorTest {
                                     .setHost(netconfNode.getHost())
                                     .setPort(netconfNode.getPort())
                                     .setConnectionStatus(ConnectionStatus.Connecting)
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .setClusteredConnectionStatus(
                                             new ClusteredConnectionStatusBuilder()
                                                     .setNodeStatus(
@@ -401,6 +431,8 @@ public class ActorTest {
                                     .setHost(netconfNode.getHost())
                                     .setPort(netconfNode.getPort())
                                     .setConnectionStatus(ConnectionStatus.UnableToConnect)
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .setClusteredConnectionStatus(
                                             new ClusteredConnectionStatusBuilder()
                                                     .setNodeStatus(
@@ -426,6 +458,8 @@ public class ActorTest {
                                     .setConnectionStatus(ConnectionStatus.Connected)
                                     .setHost(augmentation.getHost())
                                     .setPort(augmentation.getPort())
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .setClusteredConnectionStatus(
                                             new ClusteredConnectionStatusBuilder()
                                                     .setNodeStatus(
@@ -451,6 +485,8 @@ public class ActorTest {
                                     .setConnectionStatus(ConnectionStatus.Connected)
                                     .setHost(augmentation.getHost())
                                     .setPort(augmentation.getPort())
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .setClusteredConnectionStatus(
                                             new ClusteredConnectionStatusBuilder()
                                                     .setNodeStatus(
@@ -486,6 +522,31 @@ public class ActorTest {
         public void onReceive(Object o, ActorRef actorRef) {
 
         }
+
+        @Override
+        public void onDeviceConnected(SchemaContext remoteSchemaContext, NetconfSessionPreferences netconfSessionPreferences, DOMRpcService deviceRpc) {
+
+        }
+
+        @Override
+        public void onDeviceDisconnected() {
+
+        }
+
+        @Override
+        public void onDeviceFailed(Throwable throwable) {
+
+        }
+
+        @Override
+        public void onNotification(DOMNotification domNotification) {
+
+        }
+
+        @Override
+        public void close() {
+
+        }
     }
 
     public static class TestingTopologyManagerCallback implements TopologyManagerCallback {
@@ -504,6 +565,8 @@ public class ActorTest {
                                     .setConnectionStatus(ConnectionStatus.Connected)
                                     .setHost(new Host(new IpAddress(new Ipv4Address("127.0.0.1"))))
                                     .setPort(new PortNumber(2555))
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .build())
                     .build());
         }
@@ -519,6 +582,8 @@ public class ActorTest {
                                     .setConnectionStatus(ConnectionStatus.Connected)
                                     .setHost(new Host(new IpAddress(new Ipv4Address("127.0.0.1"))))
                                     .setPort(new PortNumber(65535))
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
                                     .build())
                     .build());
         }
@@ -544,6 +609,38 @@ public class ActorTest {
         public void onReceive(Object o, ActorRef actorRef) {
 
         }
+
+        @Nonnull
+        @Override
+        public Node getInitialState(@Nonnull NodeId nodeId, @Nonnull Node configNode) {
+            return new NodeBuilder()
+                    .setNodeId(nodeId)
+                    .addAugmentation(NetconfNode.class,
+                            new NetconfNodeBuilder()
+                                    .setConnectionStatus(ConnectionStatus.Connecting)
+                                    .setHost(new Host(new IpAddress(new Ipv4Address("127.0.0.1"))))
+                                    .setPort(new PortNumber(65535))
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
+                                    .build())
+                    .build();
+        }
+
+        @Nonnull
+        @Override
+        public Node getFailedState(@Nonnull NodeId nodeId, @Nonnull Node configNode) {
+            return new NodeBuilder()
+                    .setNodeId(nodeId)
+                    .addAugmentation(NetconfNode.class,
+                            new NetconfNodeBuilder()
+                                    .setConnectionStatus(ConnectionStatus.UnableToConnect)
+                                    .setHost(new Host(new IpAddress(new Ipv4Address("127.0.0.1"))))
+                                    .setPort(new PortNumber(65535))
+                                    .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+                                    .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
+                                    .build())
+                    .build();
+        }
     }
 
     public class TestingSuccesfulStateAggregator implements StateAggregator {
index 7931bb8c392e7136744002269dd14583419cc663..28a1f97122f78d8917768382b598e9e87d99a638 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.netconf.topology;
 
+import akka.actor.ActorContext;
+import akka.actor.ActorRef;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
@@ -108,9 +110,13 @@ public class TestingTopologyDispatcher implements NetconfTopology{
     }
 
     @Override
-    public void registerMountPoint(NodeId nodeId) {
+    public void registerMountPoint(ActorContext context, NodeId nodeId) {
         LOG.debug("Registering mount point for node {}", nodeId.getValue());
+    }
 
+    @Override
+    public void registerMountPoint(ActorContext context, NodeId nodeId, ActorRef masterRef) {
+        LOG.debug("Registering mount point for node {}", nodeId.getValue());
     }
 
     @Override
index 23cb6ad0300a434575731cbb8563dbdddd5a01a6..f6f62fe8a2a63d05daeaa384bf1dc95e394c6063 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.netconf.sal.connect.netconf;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
@@ -64,7 +63,7 @@ import org.slf4j.LoggerFactory;
 /**
  *  This is a mediator between NetconfDeviceCommunicator and NetconfDeviceSalFacade
  */
-public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
+public class NetconfDevice implements RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> {
 
     private static final Logger LOG = LoggerFactory.getLogger(NetconfDevice.class);
 
@@ -95,16 +94,16 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
         }
     };
 
-    private final RemoteDeviceId id;
+    protected final RemoteDeviceId id;
     private final boolean reconnectOnSchemasChange;
 
-    private final SchemaContextFactory schemaContextFactory;
+    protected final SchemaContextFactory schemaContextFactory;
     private final RemoteDeviceHandler<NetconfSessionPreferences> salFacade;
     private final ListeningExecutorService processingExecutor;
-    private final SchemaSourceRegistry schemaRegistry;
+    protected final SchemaSourceRegistry schemaRegistry;
     private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
     private final NotificationHandler notificationHandler;
-    private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
+    protected final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
 
     // Message transformer is constructed once the schemas are available
     private MessageTransformer<NetconfMessage> messageTransformer;
@@ -214,8 +213,7 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
         return remoteSessionCapabilities.isNotificationsSupported() && reconnectOnSchemasChange;
     }
 
-    @VisibleForTesting
-    void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final DOMRpcService deviceRpc) {
+    protected void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionPreferences remoteSessionCapabilities, final DOMRpcService deviceRpc) {
         messageTransformer = new NetconfMessageTransformer(result, true);
 
         updateTransformer(messageTransformer);
@@ -226,7 +224,7 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
         LOG.info("{}: Netconf connector initialized successfully", id);
     }
 
-    private void handleSalInitializationFailure(final Throwable t, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+    protected void handleSalInitializationFailure(final Throwable t, final RemoteDeviceCommunicator<NetconfMessage> listener) {
         LOG.error("{}: Initialization in sal failed, disconnecting from device", id, t);
         listener.close();
         onRemoteSessionDown();
@@ -460,7 +458,7 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
             Futures.addCallback(schemaBuilderFuture, RecursiveSchemaBuilderCallback);
         }
 
-        private NetconfDeviceRpc getDeviceSpecificRpc(final SchemaContext result) {
+        protected NetconfDeviceRpc getDeviceSpecificRpc(final SchemaContext result) {
             return new NetconfDeviceRpc(result, listener, new NetconfMessageTransformer(result, true));
         }
 
index d174d9546abec51ac1ba482a79875511712e7eed..27b05ee5ef36d8378a99a4614730aeca2a501f2b 100644 (file)
@@ -48,9 +48,9 @@ public class NetconfDeviceCommunicator implements NetconfClientSessionListener,
 
     private static final Logger LOG = LoggerFactory.getLogger(NetconfDeviceCommunicator.class);
 
-    private final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice;
+    protected final RemoteDevice<NetconfSessionPreferences, NetconfMessage, NetconfDeviceCommunicator> remoteDevice;
     private final Optional<NetconfSessionPreferences> overrideNetconfCapabilities;
-    private final RemoteDeviceId id;
+    protected final RemoteDeviceId id;
     private final Lock sessionLock = new ReentrantLock();
 
     // TODO implement concurrent message limit