Also a couple other minor bugfixes.
Change-Id: Id1e412a748225f4194f0b9c5e39d45f6bf58db4c
Signed-off-by: Tomas Cere <tcere@cisco.com>
.setHost(netconfNode.getHost())
.setPort(netconfNode.getPort())
.setConnectionStatus(ConnectionStatus.Connecting)
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.setClusteredConnectionStatus(
new ClusteredConnectionStatusBuilder()
.setNodeStatus(
.setHost(netconfNode.getHost())
.setPort(netconfNode.getPort())
.setConnectionStatus(ConnectionStatus.UnableToConnect)
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.setClusteredConnectionStatus(
new ClusteredConnectionStatusBuilder()
.setNodeStatus(
.build())
.setHost(netconfNode.getHost())
.setPort(netconfNode.getPort())
- .setAvailableCapabilities(new AvailableCapabilitiesBuilder().build())
- .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().build())
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.build()).build();
return currentOperationalNode;
}
.build())
.setHost(netconfNode.getHost())
.setPort(netconfNode.getPort())
- .setAvailableCapabilities(new AvailableCapabilitiesBuilder().build())
- .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().build())
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.build())
.build();
}
topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
isMaster = roleChangeDTO.isOwner();
- if (isMaster) {
- LOG.warn("Gained ownership of node - registering master mount point");
- topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId));
- } else {
- // even though mount point is ready, we dont know who the master mount point will be since we havent received the announce msg
- // after we receive the message we can go ahead and register the mount point
- if (connected && masterDataBrokerRef != null) {
- topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId), masterDataBrokerRef);
- } else {
- LOG.debug("Mount point is ready, still waiting for master mount point");
- }
- }
}
@Override
public void onDeviceConnected(final SchemaContext remoteSchemaContext, final NetconfSessionPreferences netconfSessionPreferences, final DOMRpcService deviceRpc) {
// we need to notify the higher level that something happened, get a current status from all other nodes, and aggregate a new result
connected = true;
- if (!isMaster && masterDataBrokerRef != null) {
- // if we're not master but one is present already, we need to register mountpoint
+ if (isMaster) {
+ LOG.debug("Master is done with schema resolution, registering mount point");
+ topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId));
+ } else if (masterDataBrokerRef != null) {
LOG.warn("Device connected, master already present in topology, registering mount point");
topologyDispatcher.registerMountPoint(cachedContext, new NodeId(nodeId), masterDataBrokerRef);
}
+
List<String> capabilityList = new ArrayList<>();
capabilityList.addAll(netconfSessionPreferences.getNetconfDeviceCapabilities().getNonModuleBasedCapabilities());
capabilityList.addAll(FluentIterable.from(netconfSessionPreferences.getNetconfDeviceCapabilities().getResolvedCapabilities()).transform(AVAILABLE_CAPABILITY_TRANSFORMER).toList());
LOG.debug("onDeviceDisconnected received, unregistered role candidate");
connected = false;
if (isMaster) {
- // announce that master mount point is going down
-// for (final Member member : clusterExtension.state().getMembers()) {
-// actorSystem.actorSelection(member.address() + "/user/" + topologyId + "/" + nodeId).tell(new AnnounceMasterMountPointDown(), null);
-// }
// set master to false since we are unregistering, the ownershipChanged callback can sometimes lag behind causing multiple nodes behaving as masters
isMaster = false;
// onRoleChanged() callback can sometimes lag behind, so unregister the mount right when it disconnects
.addAugmentation(NetconfNode.class,
new NetconfNodeBuilder()
.setConnectionStatus(ConnectionStatus.Connecting)
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.setClusteredConnectionStatus(
new ClusteredConnectionStatusBuilder()
.setNodeStatus(
public void onDeviceFailed(Throwable throwable) {
// we need to notify the higher level that something happened, get a current status from all other nodes, and aggregate a new result
// no need to remove mountpoint, we should receive onRoleChanged callback after unregistering from election that unregisters the mountpoint
- LOG.debug("onDeviceFailed received");
+ LOG.warn("Netconf node {} failed with {}", nodeId, throwable);
connected = false;
String reason = (throwable != null && throwable.getMessage() != null) ? throwable.getMessage() : UNKNOWN_REASON;
.addAugmentation(NetconfNode.class,
new NetconfNodeBuilder()
.setConnectionStatus(ConnectionStatus.UnableToConnect)
+ .setAvailableCapabilities(new AvailableCapabilitiesBuilder().setAvailableCapability(new ArrayList<String>()).build())
+ .setUnavailableCapabilities(new UnavailableCapabilitiesBuilder().setUnavailableCapability(new ArrayList<UnavailableCapability>()).build())
.setClusteredConnectionStatus(
new ClusteredConnectionStatusBuilder()
.setNodeStatus(
@Override
public void onReceive(Object message, ActorRef actorRef) {
- LOG.warn("Netconf node callback received message {}", message);
+ LOG.debug("Netconf node callback received message {}", message);
if (message instanceof AnnounceMasterMountPoint) {
masterDataBrokerRef = actorRef;
// candidate gets registered when mount point is already prepared so we can go ahead a register it
- if (roleChangeStrategy.isCandidateRegistered()) {
+ if (connected) {
topologyDispatcher.registerMountPoint(TypedActor.context(), new NodeId(nodeId), masterDataBrokerRef);
} else {
- LOG.warn("Announce master mount point msg received but mount point is not ready yet");
+ LOG.debug("Announce master mount point msg received but mount point is not ready yet");
}
} else if (message instanceof AnnounceMasterMountPointDown) {
- LOG.warn("Master mountpoint went down");
+ LOG.debug("Master mountpoint went down");
masterDataBrokerRef = null;
topologyDispatcher.unregisterMountPoint(new NodeId(nodeId));
}
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.NetconfNodeConnectionStatus.ConnectionStatus;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.AvailableCapabilities;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.ClusteredConnectionStatusBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.UnavailableCapabilities;
import org.opendaylight.yang.gen.v1.urn.opendaylight.netconf.node.topology.rev150114.netconf.node.connection.status.clustered.connection.status.NodeStatus;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
public void onSuccess(final List<Node> result) {
Node base = null;
NetconfNode baseAugmentation = null;
+ AvailableCapabilities masterCaps = null;
+ UnavailableCapabilities unavailableMasterCaps = null;
final ArrayList<NodeStatus> statusList = new ArrayList<>();
for (final Node node : result) {
final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
base = node;
baseAugmentation = netconfNode;
}
+ // we need to pull out caps from master, since slave does not go through resolution
+ if (masterCaps == null) {
+ masterCaps = netconfNode.getAvailableCapabilities();
+ unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+ }
+ if (netconfNode.getAvailableCapabilities().getAvailableCapability().size() > masterCaps.getAvailableCapability().size()) {
+ masterCaps = netconfNode.getAvailableCapabilities();
+ unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+ }
LOG.debug(netconfNode.toString());
statusList.addAll(netconfNode.getClusteredConnectionStatus().getNodeStatus());
}
if (base == null) {
base = result.get(0);
baseAugmentation = result.get(0).getAugmentation(NetconfNode.class);
- LOG.warn("All results {}", result.toString());
+ LOG.debug("All results {}", result.toString());
}
- LOG.warn("Base node: {}", base);
-
final Node aggregatedNode =
new NodeBuilder(base)
.addAugmentation(NetconfNode.class,
new ClusteredConnectionStatusBuilder()
.setNodeStatus(statusList)
.build())
+ .setAvailableCapabilities(masterCaps)
+ .setUnavailableCapabilities(unavailableMasterCaps)
.build())
.build();
+
future.set(aggregatedNode);
}
public void onSuccess(final List<Node> result) {
Node base = null;
NetconfNode baseAugmentation = null;
+ AvailableCapabilities masterCaps = null;
+ UnavailableCapabilities unavailableMasterCaps = null;
final ArrayList<NodeStatus> statusList = new ArrayList<>();
for (final Node node : result) {
final NetconfNode netconfNode = node.getAugmentation(NetconfNode.class);
base = node;
baseAugmentation = netconfNode;
}
+ // we need to pull out caps from master, since slave does not go through resolution
+ if (masterCaps == null) {
+ masterCaps = netconfNode.getAvailableCapabilities();
+ unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+ }
+ if (netconfNode.getAvailableCapabilities().getAvailableCapability().size() > masterCaps.getAvailableCapability().size()) {
+ masterCaps = netconfNode.getAvailableCapabilities();
+ unavailableMasterCaps = netconfNode.getUnavailableCapabilities();
+ }
LOG.debug(netconfNode.toString());
statusList.addAll(netconfNode.getClusteredConnectionStatus().getNodeStatus());
}
if (base == null) {
base = result.get(0);
baseAugmentation = result.get(0).getAugmentation(NetconfNode.class);
- LOG.warn("All results {}", result.toString());
+ LOG.debug("All results {}", result.toString());
}
final Node aggregatedNode =
new ClusteredConnectionStatusBuilder()
.setNodeStatus(statusList)
.build())
+ .setAvailableCapabilities(masterCaps)
+ .setUnavailableCapabilities(unavailableMasterCaps)
.build())
.build();
future.set(aggregatedNode);
final NetconfSessionPreferences netconfSessionPreferences,
final DOMRpcService deviceRpc) {
// prepare our prerequisites for mountpoint
+ LOG.debug("Mount point facade onConnected capabilities {}", netconfSessionPreferences);
this.remoteSchemaContext = remoteSchemaContext;
this.netconfSessionPreferences = netconfSessionPreferences;
this.deviceRpc = deviceRpc;
}
public void registerMountPoint(final ActorSystem actorSystem, final ActorContext context) {
+ if (remoteSchemaContext == null || netconfSessionPreferences == null) {
+ LOG.debug("Master mount point does not have schemas ready yet, delaying registration");
+ return;
+ }
+
Preconditions.checkNotNull(id);
Preconditions.checkNotNull(remoteSchemaContext, "Device has no remote schema context yet. Probably not fully connected.");
Preconditions.checkNotNull(netconfSessionPreferences, "Device has no capabilities yet. Probably not fully connected.");
return new NetconfDeviceMasterDataBroker(actorSystem, id, remoteSchemaContext, deviceRpc, netconfSessionPreferences, defaultRequestTimeoutMillis);
}
}), MOUNT_POINT);
- LOG.warn("Master data broker registered on path {}", TypedActor.get(actorSystem).getActorRefFor(deviceDataBroker).path());
+ LOG.debug("Master data broker registered on path {}", TypedActor.get(actorSystem).getActorRefFor(deviceDataBroker).path());
salProvider.getMountInstance().onTopologyDeviceConnected(remoteSchemaContext, deviceDataBroker, deviceRpc, notificationService);
final Cluster cluster = Cluster.get(actorSystem);
final Iterable<Member> members = cluster.state().getMembers();
}
public void registerMountPoint(final ActorSystem actorSystem, final ActorContext context, final ActorRef masterRef) {
+ if (remoteSchemaContext == null || netconfSessionPreferences == null) {
+ LOG.debug("Slave mount point does not have schemas ready yet, delaying registration");
+ return;
+ }
+
Preconditions.checkNotNull(id);
Preconditions.checkNotNull(remoteSchemaContext, "Device has no remote schema context yet. Probably not fully connected.");
Preconditions.checkNotNull(netconfSessionPreferences, "Device has no capabilities yet. Probably not fully connected.");
this.actorSystem = actorSystem;
final NetconfDeviceNotificationService notificationService = new NetconfDeviceNotificationService();
- LOG.warn("Creating a proxy for master data broker");
final ProxyNetconfDeviceDataBroker masterDataBroker = TypedActor.get(actorSystem).typedActorOf(new TypedProps<>(ProxyNetconfDeviceDataBroker.class, NetconfDeviceMasterDataBroker.class), masterRef);
LOG.warn("Creating slave data broker for device {}", id);
final DOMDataBroker deviceDataBroker = new NetconfDeviceSlaveDataBroker(actorSystem, id, masterDataBroker);
public void unregisterMountPoint() {
salProvider.getMountInstance().onTopologyDeviceDisconnected();
if (deviceDataBroker != null) {
- LOG.warn("Stopping master data broker for device {}", id.getName());
+ LOG.debug("Stopping master data broker for device {}", id.getName());
for (final Member member : Cluster.get(actorSystem).state().getMembers()) {
if (member.address().equals(Cluster.get(actorSystem).selfAddress())) {
continue;