<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
mavenBundle("ch.qos.logback", "logback-classic", "1.0.9"),
// List all the bundles on which the test case depends
mavenBundle("org.opendaylight.controller", "sal",
- "0.4.0-SNAPSHOT"),
+ "0.5.0-SNAPSHOT"),
mavenBundle("org.opendaylight.controller",
"sal.implementation", "0.4.0-SNAPSHOT"),
Assert.assertTrue(stat.getCode().equals(StatusCode.NOTACCEPTABLE));
}
-}
\ No newline at end of file
+}
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal</artifactId>\r
- <version>0.4.0-SNAPSHOT</version>\r
+ <version>0.5.0-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
</project>\r
<Import-Package>
org.opendaylight.controller.sal.core,
org.opendaylight.controller.sal.utils,
+ org.opendaylight.controller.sal.topology,
org.opendaylight.controller.hosttracker,
org.opendaylight.controller.topologymanager,
org.opendaylight.controller.sal.packet.address,
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import java.util.concurrent.Future;
import org.apache.felix.dm.Component;
-import org.apache.taglibs.standard.lang.jstl.DivideOperator;
import org.opendaylight.controller.clustering.services.CacheConfigException;
import org.opendaylight.controller.clustering.services.CacheExistException;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.packet.address.DataLinkAddress;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.sal.utils.GlobalConstants;
import org.opendaylight.controller.sal.utils.HexEncode;
import org.opendaylight.controller.sal.utils.NodeCreator;
import org.slf4j.LoggerFactory;
/**
- * @file HostTracker.java
- * This class tracks the location of IP Hosts as to which Switch, Port, VLAN, they are
- * connected to, as well as their MAC address. This is done dynamically as well as statically.
- * The dynamic mechanism consists of listening to ARP messages as well sending ARP requests.
- * Static mechanism consists of Northbound APIs to add or remove the hosts from the local
- * database. ARP aging is also implemented to age out dynamically learned hosts. Interface
- * methods are provided for other applications to
- * 1. Query the local database for a single host
- * 2. Get a list of all hosts
- * 3. Get notification if a host is learned/added or removed the database
+ * @file HostTracker.java This class tracks the location of IP Hosts as to which
+ * Switch, Port, VLAN, they are connected to, as well as their MAC
+ * address. This is done dynamically as well as statically. The dynamic
+ * mechanism consists of listening to ARP messages as well sending ARP
+ * requests. Static mechanism consists of Northbound APIs to add or remove
+ * the hosts from the local database. ARP aging is also implemented to age
+ * out dynamically learned hosts. Interface methods are provided for other
+ * applications to 1. Query the local database for a single host 2. Get a
+ * list of all hosts 3. Get notification if a host is learned/added or
+ * removed the database
*/
public class HostTracker implements IfIptoHost, IfHostListener,
.getLogger(HostTracker.class);
private IHostFinder hostFinder;
private ConcurrentMap<InetAddress, HostNodeConnector> hostsDB;
- /* Following is a list of hosts which have been requested by NB APIs to be added,
- * but either the switch or the port is not sup, so they will be added here until
- * both come up
+ /*
+ * Following is a list of hosts which have been requested by NB APIs to be
+ * added, but either the switch or the port is not sup, so they will be
+ * added here until both come up
*/
private ConcurrentMap<NodeConnector, HostNodeConnector> inactiveStaticHosts;
private Set<IfNewHostNotify> newHostNotify = Collections
}
}
- //This list contains the hosts for which ARP requests are being sent periodically
+ // This list contains the hosts for which ARP requests are being sent
+ // periodically
private List<ARPPending> ARPPendingList = new ArrayList<HostTracker.ARPPending>();
/*
- * This list below contains the hosts which were initially in ARPPendingList above,
- * but ARP response didn't come from there hosts after multiple attempts over 8
- * seconds. The assumption is that the response didn't come back due to one of the
- * following possibilities:
- * 1. The L3 interface wasn't created for this host in the controller. This would
- * cause arphandler not to know where to send the ARP
- * 2. The host facing port is down
- * 3. The IP host doesn't exist or is not responding to ARP requests
- *
- * Conditions 1 and 2 above can be recovered if ARP is sent when the relevant L3
- * interface is added or the port facing host comes up. Whenever L3 interface is
- * added or host facing port comes up, ARP will be sent to hosts in this list.
- *
+ * This list below contains the hosts which were initially in ARPPendingList
+ * above, but ARP response didn't come from there hosts after multiple
+ * attempts over 8 seconds. The assumption is that the response didn't come
+ * back due to one of the following possibilities: 1. The L3 interface
+ * wasn't created for this host in the controller. This would cause
+ * arphandler not to know where to send the ARP 2. The host facing port is
+ * down 3. The IP host doesn't exist or is not responding to ARP requests
+ *
+ * Conditions 1 and 2 above can be recovered if ARP is sent when the
+ * relevant L3 interface is added or the port facing host comes up. Whenever
+ * L3 interface is added or host facing port comes up, ARP will be sent to
+ * hosts in this list.
+ *
* We can't recover from condition 3 above
*/
private ArrayList<ARPPending> failedARPReqList = new ArrayList<HostTracker.ARPPending>();
}
@SuppressWarnings("deprecation")
- private void allocateCache() {
+ private void allocateCache() {
if (this.clusterContainerService == null) {
- logger
- .error("un-initialized clusterContainerService, can't create cache");
+ logger.error("un-initialized clusterContainerService, can't create cache");
return;
}
logger.debug("Creating Cache for HostTracker");
try {
- this.clusterContainerService.createCache("hostTrackerAH", EnumSet
- .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
- this.clusterContainerService.createCache("hostTrackerIH", EnumSet
- .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ this.clusterContainerService.createCache("hostTrackerAH",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ this.clusterContainerService.createCache("hostTrackerIH",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
} catch (CacheConfigException cce) {
- logger
- .error("Cache couldn't be created for HostTracker - check cache mode");
+ logger.error("Cache couldn't be created for HostTracker - check cache mode");
} catch (CacheExistException cce) {
- logger
- .error("Cache for HostTracker already exists, destroy and recreate");
+ logger.error("Cache for HostTracker already exists, destroy and recreate");
}
logger.debug("Cache successfully created for HostTracker");
}
@SuppressWarnings({ "unchecked", "deprecation" })
private void retrieveCache() {
if (this.clusterContainerService == null) {
- logger
- .error("un-initialized clusterContainerService, can't retrieve cache");
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
return;
}
logger.debug("Retrieving cache for HostTrackerAH");
}
@SuppressWarnings("deprecation")
- private void destroyCache() {
+ private void destroyCache() {
if (this.clusterContainerService == null) {
logger.error("un-initialized clusterMger, can't destroy cache");
return;
for (Entry<NodeConnector, HostNodeConnector> entry : inactiveStaticHosts
.entrySet()) {
if (entry.getValue().equalsByIP(networkAddress)) {
- logger
- .debug(
- "getHostFromInactiveDB(): Inactive Host found for IP:{} ",
- networkAddress.getHostAddress());
+ logger.debug(
+ "getHostFromInactiveDB(): Inactive Host found for IP:{} ",
+ networkAddress.getHostAddress());
return entry;
}
}
public HostNodeConnector hostFind(InetAddress networkAddress) {
/*
- * Sometimes at boot with containers configured in the startup
- * we hit this path (from TIF) when hostFinder has not been set yet
- * Caller already handles the null return
+ * Sometimes at boot with containers configured in the startup we hit
+ * this path (from TIF) when hostFinder has not been set yet Caller
+ * already handles the null return
*/
if (hostFinder == null) {
HostNodeConnector host = hostQuery(networkAddress);
if (host != null) {
- logger.debug("hostFind(): Host found for IP: {}", networkAddress
- .getHostAddress());
+ logger.debug("hostFind(): Host found for IP: {}",
+ networkAddress.getHostAddress());
return host;
}
/* host is not found, initiate a discovery */
hostFinder.find(networkAddress);
/* Also add this host to ARPPending List for any potential retries */
AddtoARPPendingList(networkAddress);
- logger
- .debug(
- "hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...",
- networkAddress.getHostAddress());
+ logger.debug(
+ "hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...",
+ networkAddress.getHostAddress());
return null;
}
arphost.setHostIP(networkAddr);
arphost.setSent_count((short) 1);
ARPPendingList.add(arphost);
- logger.debug("Host Added to ARPPending List, IP: {}", networkAddr
- .toString());
+ logger.debug("Host Added to ARPPending List, IP: {}",
+ networkAddr.toString());
}
private void removePendingARPFromList(int index) {
if (index >= ARPPendingList.size()) {
- logger
- .warn(
- "removePendingARPFromList(): index greater than the List. Size:{}, Index:{}",
- ARPPendingList.size(), index);
+ logger.warn(
+ "removePendingARPFromList(): index greater than the List. Size:{}, Index:{}",
+ ARPPendingList.size(), index);
return;
}
ARPPending arphost = ARPPendingList.remove(index);
for (int i = 0; i < ARPPendingList.size(); i++) {
arphost = ARPPendingList.get(i);
if (arphost.getHostIP().equals(networkAddr)) {
- /* An ARP was sent for this host. The address is learned,
- * remove the request
+ /*
+ * An ARP was sent for this host. The address is learned, remove
+ * the request
*/
removePendingARPFromList(i);
logger.debug("Host Removed from ARPPending List, IP: {}",
for (int i = 0; i < failedARPReqList.size(); i++) {
arphost = failedARPReqList.get(i);
if (arphost.getHostIP().equals(networkAddr)) {
- /* An ARP was sent for this host. The address is learned,
- * remove the request
+ /*
+ * An ARP was sent for this host. The address is learned, remove
+ * the request
*/
failedARPReqList.remove(i);
logger.debug("Host Removed from FailedARPReqList List, IP: {}",
private void learnNewHost(HostNodeConnector host) {
host.initArpSendCountDown();
hostsDB.put(host.getNetworkAddress(), host);
- logger.debug("New Host Learned: MAC: {} IP: {}", HexEncode
- .bytesToHexString(host.getDataLayerAddressBytes()), host
- .getNetworkAddress().getHostAddress());
+ logger.debug("New Host Learned: MAC: {} IP: {}",
+ HexEncode.bytesToHexString(host.getDataLayerAddressBytes()),
+ host.getNetworkAddress().getHostAddress());
}
// Remove known Host
.getHostAddress());
hostsDB.remove(key);
} else {
- logger
- .error(
- "removeKnownHost(): Host for IP address {} not found in hostsDB",
- key.getHostAddress());
+ logger.error(
+ "removeKnownHost(): Host for IP address {} not found in hostsDB",
+ key.getHostAddress());
}
}
/* Check for Host Move case */
if (hostMoved(host)) {
/*
- * Host has been moved from one location (switch,port, MAC, or VLAN).
- * Remove the existing host with its previous location parameters,
- * inform the applications, and add it as a new Host
+ * Host has been moved from one location (switch,port, MAC, or
+ * VLAN). Remove the existing host with its previous location
+ * parameters, inform the applications, and add it as a new Host
*/
HostNodeConnector removedHost = hostsDB.get(host
.getNetworkAddress());
new NotifyHostThread(host).start();
}
- // Notify whoever is interested that a new host was learned (dynamically or statically)
+ // Notify whoever is interested that a new host was learned (dynamically or
+ // statically)
private void notifyHostLearnedOrRemoved(HostNodeConnector host, boolean add) {
// Update listeners if any
if (newHostNotify != null) {
}
}
} else {
- logger
- .error("notifyHostLearnedOrRemoved(): New host notify is null");
+ logger.error("notifyHostLearnedOrRemoved(): New host notify is null");
}
- // Topology update is for some reason outside of listeners registry logic
+ // Topology update is for some reason outside of listeners registry
+ // logic
Node node = host.getnodeconnectorNode();
Host h = null;
NodeConnector p = host.getnodeConnector();
try {
- DataLinkAddress dla = new EthernetAddress(host
- .getDataLayerAddressBytes());
- h = new org.opendaylight.controller.sal.core.Host(dla, host
- .getNetworkAddress());
+ DataLinkAddress dla = new EthernetAddress(
+ host.getDataLayerAddressBytes());
+ h = new org.opendaylight.controller.sal.core.Host(dla,
+ host.getNetworkAddress());
} catch (ConstructionException ce) {
p = null;
h = null;
switchManager.setNodeProp(node, tier);
topologyManager.updateHostLink(p, h, UpdateType.ADDED, null);
/*
- * This is a temporary fix for Cisco Live's Hadoop Demonstration.
- * The concept of Tiering must be revisited based on other application requirements
- * and the design might warrant a separate module (as it involves tracking the topology/
- * host changes & updating the Tiering numbers in an effective manner).
+ * This is a temporary fix for Cisco Live's Hadoop
+ * Demonstration. The concept of Tiering must be revisited based
+ * on other application requirements and the design might
+ * warrant a separate module (as it involves tracking the
+ * topology/ host changes & updating the Tiering numbers in an
+ * effective manner).
*/
updateSwitchTiers(node, 1);
/*
- * The following 2 lines are added for testing purposes.
- * We can remove it once the North-Bound APIs are available for testing.
-
- ArrayList<ArrayList<String>> hierarchies = getHostNetworkHierarchy(host.getNetworkAddress());
- logHierarchies(hierarchies);
+ * The following 2 lines are added for testing purposes. We can
+ * remove it once the North-Bound APIs are available for
+ * testing.
+ *
+ * ArrayList<ArrayList<String>> hierarchies =
+ * getHostNetworkHierarchy(host.getNetworkAddress());
+ * logHierarchies(hierarchies);
*/
} else {
- // No need to reset the tiering if no other hosts are currently connected
- // If this switch was discovered to be an access switch, it still is even if the host is down
+ // No need to reset the tiering if no other hosts are currently
+ // connected
+ // If this switch was discovered to be an access switch, it
+ // still is even if the host is down
Tier tier = new Tier(0);
switchManager.setNodeProp(node, tier);
topologyManager.updateHostLink(p, h, UpdateType.REMOVED, null);
}
/**
- * When a new Host is learnt by the hosttracker module, it places the directly connected Node
- * in Tier-1 & using this function, updates the Tier value for all other Nodes in the network
- * hierarchy.
- *
- * This is a recursive function and it takes care of updating the Tier value for all the connected
- * and eligible Nodes.
- *
- * @param n Node that represents one of the Vertex in the Topology Graph.
- * @param currentTier The Tier on which n belongs
+ * When a new Host is learnt by the hosttracker module, it places the
+ * directly connected Node in Tier-1 & using this function, updates the Tier
+ * value for all other Nodes in the network hierarchy.
+ *
+ * This is a recursive function and it takes care of updating the Tier value
+ * for all the connected and eligible Nodes.
+ *
+ * @param n
+ * Node that represents one of the Vertex in the Topology Graph.
+ * @param currentTier
+ * The Tier on which n belongs
*/
private void updateSwitchTiers(Node n, int currentTier) {
Map<Node, Set<Edge>> ndlinks = topologyManager.getNodeEdges();
}
ArrayList<Node> needsVisiting = new ArrayList<Node>();
for (Edge lt : links) {
- if (!lt.getHeadNodeConnector().getType().equals(
- NodeConnector.NodeConnectorIDType.OPENFLOW)) {
+ if (!lt.getHeadNodeConnector().getType()
+ .equals(NodeConnector.NodeConnectorIDType.OPENFLOW)) {
// We don't want to work on Node that are not openflow
// for now
continue;
}
/**
- * Internal convenience routine to check the eligibility of a Switch for a Tier update.
- * Any Node with Tier=0 or a Tier value that is greater than the new Tier Value is eligible
- * for the update.
- *
- * @param n Node for which the Tier update eligibility is checked
- * @param tier new Tier Value
+ * Internal convenience routine to check the eligibility of a Switch for a
+ * Tier update. Any Node with Tier=0 or a Tier value that is greater than
+ * the new Tier Value is eligible for the update.
+ *
+ * @param n
+ * Node for which the Tier update eligibility is checked
+ * @param tier
+ * new Tier Value
* @return <code>true</code> if the Node is eligible for Tier Update
* <code>false</code> otherwise
*/
}
/**
- * Internal convenience routine to clear all the Tier values to 0.
- * This cleanup is performed during cases such as Topology Change where the existing Tier values
- * might become incorrect
+ * Internal convenience routine to clear all the Tier values to 0. This
+ * cleanup is performed during cases such as Topology Change where the
+ * existing Tier values might become incorrect
*/
private void clearTiers() {
Set<Node> nodes = null;
}
/**
- * getHostNetworkHierarchy is the Back-end routine for the North-Bound API that returns
- * the Network Hierarchy for a given Host. This API is typically used by applications like
- * Hadoop for Rack Awareness functionality.
- *
- * @param hostAddress IP-Address of the host/node.
- * @return Network Hierarchies represented by an Array of Array (of Switch-Ids as String).
+ * getHostNetworkHierarchy is the Back-end routine for the North-Bound API
+ * that returns the Network Hierarchy for a given Host. This API is
+ * typically used by applications like Hadoop for Rack Awareness
+ * functionality.
+ *
+ * @param hostAddress
+ * IP-Address of the host/node.
+ * @return Network Hierarchies represented by an Array of Array (of
+ * Switch-Ids as String).
*/
public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress) {
HostNodeConnector host = hostQuery(hostAddress);
}
/**
- * dpidToHostNameHack is a hack function for Cisco Live Hadoop Demo.
- * Mininet is used as the network for Hadoop Demos & in order to give a meaningful
- * rack-awareness switch names, the DPID is organized in ASCII Characters and
- * retrieved as string.
- *
- * @param dpid Switch DataPath Id
+ * dpidToHostNameHack is a hack function for Cisco Live Hadoop Demo. Mininet
+ * is used as the network for Hadoop Demos & in order to give a meaningful
+ * rack-awareness switch names, the DPID is organized in ASCII Characters
+ * and retrieved as string.
+ *
+ * @param dpid
+ * Switch DataPath Id
* @return Ascii String represented by the DPID.
*/
private String dpidToHostNameHack(long dpid) {
/**
* A convenient recursive routine to obtain the Hierarchy of Switches.
- *
- * @param node Current Node in the Recursive routine.
- * @param currHierarchy Array of Nodes that make this hierarchy on which the Current Switch belong
- * @param fullHierarchy Array of multiple Hierarchies that represent a given host.
+ *
+ * @param node
+ * Current Node in the Recursive routine.
+ * @param currHierarchy
+ * Array of Nodes that make this hierarchy on which the Current
+ * Switch belong
+ * @param fullHierarchy
+ * Array of multiple Hierarchies that represent a given host.
*/
@SuppressWarnings("unchecked")
private void updateCurrentHierarchy(Node node,
ArrayList<String> currHierarchy, List<List<String>> fullHierarchy) {
- //currHierarchy.add(String.format("%x", currSw.getId()));
+ // currHierarchy.add(String.format("%x", currSw.getId()));
currHierarchy.add(dpidToHostNameHack((Long) node.getID()));
ArrayList<String> currHierarchyClone = (ArrayList<String>) currHierarchy
- .clone(); //Shallow copy as required
+ .clone(); // Shallow copy as required
Map<Node, Set<Edge>> ndlinks = topologyManager.getNodeEdges();
if (ndlinks == null) {
- logger
- .debug(
- "updateCurrentHierarchy(): topologyManager returned null ndlinks for node: {}",
- node);
+ logger.debug(
+ "updateCurrentHierarchy(): topologyManager returned null ndlinks for node: {}",
+ node);
return;
}
Node n = NodeCreator.createOFNode((Long) node.getID());
return;
}
for (Edge lt : links) {
- if (!lt.getHeadNodeConnector().getType().equals(
- NodeConnector.NodeConnectorIDType.OPENFLOW)) {
+ if (!lt.getHeadNodeConnector().getType()
+ .equals(NodeConnector.NodeConnectorIDType.OPENFLOW)) {
// We don't want to work on Node that are not openflow
// for now
continue;
ArrayList<String> buildHierarchy = currHierarchy;
if (currHierarchy.size() > currHierarchyClone.size()) {
buildHierarchy = (ArrayList<String>) currHierarchyClone
- .clone(); //Shallow copy as required
+ .clone(); // Shallow copy as required
fullHierarchy.add(buildHierarchy);
}
updateCurrentHierarchy(dstNode, buildHierarchy, fullHierarchy);
}
}
- @Override
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
+ private void edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
Long srcNid = null;
Short srcPort = null;
Long dstNid = null;
// At this point we know we got an openflow update, so
// lets fill everything accordingly.
- srcNid = (Long) e.getTailNodeConnector().getNode()
- .getID();
+ srcNid = (Long) e.getTailNodeConnector().getNode().getID();
srcPort = (Short) e.getTailNodeConnector().getID();
- dstNid = (Long) e.getHeadNodeConnector().getNode()
- .getID();
+ dstNid = (Long) e.getHeadNodeConnector().getNode().getID();
dstPort = (Short) e.getHeadNodeConnector().getID();
// Now lets update the added flag
}
}
- logger.debug("HostTracker Topology linkUpdate handling src:{}[port {}] dst:{}[port {}] added: {}",
- new Object[] { srcNid, srcPort, dstNid, dstPort, added });
+ logger.debug(
+ "HostTracker Topology linkUpdate handling src:{}[port {}] dst:{}[port {}] added: {}",
+ new Object[] { srcNid, srcPort, dstNid, dstPort, added });
clearTiers();
for (Entry<InetAddress, HostNodeConnector> entry : hostsDB.entrySet()) {
HostNodeConnector host = entry.getValue();
}
}
+ @Override
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ for (int i = 0; i < topoedgeupdateList.size(); i++) {
+ Edge e = topoedgeupdateList.get(i).getEdge();
+ Set<Property> p = topoedgeupdateList.get(i).getProperty();
+ UpdateType type = topoedgeupdateList.get(i).getUpdateType();
+ edgeUpdate(e, type, p);
+ }
+ }
+
public void subnetNotify(Subnet sub, boolean add) {
logger.debug("Received subnet notification: {} add={}", sub, add);
if (add) {
for (int i = 0; i < ARPPendingList.size(); i++) {
arphost = ARPPendingList.get(i);
if (arphost.getSent_count() < switchManager.getHostRetryCount()) {
- /* No reply has been received of first ARP Req, send the next one */
+ /*
+ * No reply has been received of first ARP Req, send the
+ * next one
+ */
hostFinder.find(arphost.getHostIP());
arphost.sent_count++;
logger.debug("ARP Sent from ARPPending List, IP: {}",
arphost.getHostIP().getHostAddress());
} else if (arphost.getSent_count() >= switchManager
.getHostRetryCount()) {
- /* Two ARP requests have been sent without
- * receiving a reply, remove this from the
- * pending list
+ /*
+ * Two ARP requests have been sent without receiving a
+ * reply, remove this from the pending list
*/
removePendingARPFromList(i);
- logger
- .debug(
- "ARP reply not received after two attempts, removing from Pending List IP: {}",
- arphost.getHostIP().getHostAddress());
+ logger.debug(
+ "ARP reply not received after two attempts, removing from Pending List IP: {}",
+ arphost.getHostIP().getHostAddress());
/*
- * Add this host to a different list which will be processed on link
- * up events
+ * Add this host to a different list which will be processed
+ * on link up events
*/
logger.debug("Adding the host to FailedARPReqList IP: {}",
arphost.getHostIP().getHostAddress());
failedARPReqList.add(arphost);
} else {
- logger
- .error(
- "Inavlid arp_sent count for entery at index: {}",
- i);
+ logger.error(
+ "Inavlid arp_sent count for entery at index: {}", i);
}
}
}
}
if (hostsDB == null) {
/* hostsDB is not allocated yet */
- logger
- .error("ARPRefreshHandler(): hostsDB is not allocated yet:");
+ logger.error("ARPRefreshHandler(): hostsDB is not allocated yet:");
return;
}
for (Entry<InetAddress, HostNodeConnector> entry : hostsDB
if (arp_cntdown > switchManager.getHostRetryCount()) {
host.setArpSendCountDown(arp_cntdown);
} else if (arp_cntdown <= 0) {
- /* No ARP Reply received in last 2 minutes, remove this host and inform applications*/
+ /*
+ * No ARP Reply received in last 2 minutes, remove this host
+ * and inform applications
+ */
removeKnownHost(entry.getKey());
notifyHostLearnedOrRemoved(host, false);
} else if (arp_cntdown <= switchManager.getHostRetryCount()) {
- /* Use the services of arphandler to check if host is still there */
- logger.trace("ARP Probing ({}) for {}({})", new Object[] {
- arp_cntdown,
- host.getNetworkAddress().getHostAddress(),
- HexEncode.bytesToHexString(host
- .getDataLayerAddressBytes()) });
+ /*
+ * Use the services of arphandler to check if host is still
+ * there
+ */
+ logger.trace(
+ "ARP Probing ({}) for {}({})",
+ new Object[] {
+ arp_cntdown,
+ host.getNetworkAddress().getHostAddress(),
+ HexEncode.bytesToHexString(host
+ .getDataLayerAddressBytes()) });
host.setArpSendCountDown(arp_cntdown);
hostFinder.probe(host);
}
}
/**
- * Inform the controller IP to MAC binding of a host and its
- * connectivity to an openflow switch in terms of Node, port, and
- * VLAN.
- *
- * @param networkAddr IP address of the host
- * @param dataLayer Address MAC address of the host
- * @param nc NodeConnector to which host is connected
- * @param port Port of the switch to which host is connected
- * @param vlan Vlan of which this host is member of
- *
- * @return Status The status object as described in {@code Status}
- * indicating the result of this action.
+ * Inform the controller IP to MAC binding of a host and its connectivity to
+ * an openflow switch in terms of Node, port, and VLAN.
+ *
+ * @param networkAddr
+ * IP address of the host
+ * @param dataLayer
+ * Address MAC address of the host
+ * @param nc
+ * NodeConnector to which host is connected
+ * @param port
+ * Port of the switch to which host is connected
+ * @param vlan
+ * Vlan of which this host is member of
+ *
+ * @return Status The status object as described in {@code Status}
+ * indicating the result of this action.
*/
public Status addStaticHostReq(InetAddress networkAddr,
byte[] dataLayerAddress, NodeConnector nc, short vlan) {
if (dataLayerAddress.length != 6) {
- return new Status(StatusCode.BADREQUEST, "Invalid MAC address");
+ return new Status(StatusCode.BADREQUEST, "Invalid MAC address");
}
HostNodeConnector host = null;
try {
host = new HostNodeConnector(dataLayerAddress, networkAddr, nc,
- vlan);
+ vlan);
if (hostExists(host)) {
- // This host is already learned either via ARP or through a northbound request
+ // This host is already learned either via ARP or through a
+ // northbound request
HostNodeConnector transHost = hostsDB.get(networkAddr);
transHost.setStaticHost(true);
return new Status(StatusCode.SUCCESS, null);
}
host.setStaticHost(true);
/*
- * Before adding host, Check if the switch and the port have already come up
+ * Before adding host, Check if the switch and the port have already
+ * come up
*/
if (switchManager.isNodeConnectorEnabled(nc)) {
learnNewHost(host);
notifyHostLearnedOrRemoved(host, true);
} else {
inactiveStaticHosts.put(nc, host);
- logger
- .debug(
- "Switch or switchport is not up, adding host {} to inactive list",
- networkAddr.getHostName());
+ logger.debug(
+ "Switch or switchport is not up, adding host {} to inactive list",
+ networkAddr.getHostName());
}
return new Status(StatusCode.SUCCESS, null);
} catch (ConstructionException e) {
- return new Status(StatusCode.INTERNALERROR, "Host could not be created");
+ return new Status(StatusCode.INTERNALERROR,
+ "Host could not be created");
}
}
/**
- * Update the controller IP to MAC binding of a host and its
- * connectivity to an openflow switch in terms of
- * switch id, switch port, and VLAN.
- *
- * @param networkAddr IP address of the host
- * @param dataLayer Address MAC address of the host
- * @param nc NodeConnector to which host is connected
- * @param port Port of the switch to which host is connected
- * @param vlan Vlan of which this host is member of
- *
- * @return boolean true if the host was added successfully,
- * false otherwise
+ * Update the controller IP to MAC binding of a host and its connectivity to
+ * an openflow switch in terms of switch id, switch port, and VLAN.
+ *
+ * @param networkAddr
+ * IP address of the host
+ * @param dataLayer
+ * Address MAC address of the host
+ * @param nc
+ * NodeConnector to which host is connected
+ * @param port
+ * Port of the switch to which host is connected
+ * @param vlan
+ * Vlan of which this host is member of
+ *
+ * @return boolean true if the host was added successfully, false otherwise
*/
public boolean updateHostReq(InetAddress networkAddr,
- byte[] dataLayerAddress, NodeConnector nc,
- short vlan) {
+ byte[] dataLayerAddress, NodeConnector nc, short vlan) {
if (nc == null) {
return false;
}
HostNodeConnector host = null;
try {
host = new HostNodeConnector(dataLayerAddress, networkAddr, nc,
- vlan);
+ vlan);
if (!hostExists(host)) {
if ((inactiveStaticHosts.get(nc)) != null) {
inactiveStaticHosts.replace(nc, host);
/**
* Remove from the controller IP to MAC binding of a host and its
* connectivity to an openflow switch
- *
- * @param networkAddr IP address of the host
- *
- * @return boolean true if the host was removed successfully,
- * false otherwise
+ *
+ * @param networkAddr
+ * IP address of the host
+ *
+ * @return boolean true if the host was removed successfully, false
+ * otherwise
*/
public Status removeStaticHostReq(InetAddress networkAddress) {
if (host != null) {
// Validation check
if (!host.isStaticHost()) {
- return new Status(StatusCode.FORBIDDEN,
- "Host " + networkAddress.getHostName() +
- " is not static");
+ return new Status(StatusCode.FORBIDDEN, "Host "
+ + networkAddress.getHostName() + " is not static");
}
// Remove and notify
notifyHostLearnedOrRemoved(host, false);
host = entry.getValue();
// Validation check
if (!host.isStaticHost()) {
- return new Status(StatusCode.FORBIDDEN,
- "Host " + networkAddress.getHostName() +
- " is not static");
+ return new Status(StatusCode.FORBIDDEN, "Host "
+ + networkAddress.getHostName() + " is not static");
}
this.removeHostFromInactiveDB(networkAddress);
return new Status(StatusCode.SUCCESS, null);
switch (type) {
case REMOVED:
long sid = (Long) node.getID();
- logger.debug("Received removedSwitch for sw id {}", HexEncode
- .longToHexString(sid));
+ logger.debug("Received removedSwitch for sw id {}",
+ HexEncode.longToHexString(sid));
for (Entry<InetAddress, HostNodeConnector> entry : hostsDB
.entrySet()) {
HostNodeConnector host = entry.getValue();
@Override
public Status addStaticHost(String networkAddress, String dataLayerAddress,
- NodeConnector nc, String vlan) {
+ NodeConnector nc, String vlan) {
try {
InetAddress ip = InetAddress.getByName(networkAddress);
if (nc == null) {
- return new Status(StatusCode.BADREQUEST, "Invalid NodeId");
+ return new Status(StatusCode.BADREQUEST, "Invalid NodeId");
}
return addStaticHostReq(ip,
- HexEncode
- .bytesFromHexString(dataLayerAddress),
- nc,
+ HexEncode.bytesFromHexString(dataLayerAddress), nc,
Short.valueOf(vlan));
} catch (UnknownHostException e) {
- logger.error("",e);
+ logger.error("", e);
return new Status(StatusCode.BADREQUEST, "Invalid Address");
}
}
address = InetAddress.getByName(networkAddress);
return removeStaticHostReq(address);
} catch (UnknownHostException e) {
- logger.error("",e);
+ logger.error("", e);
return new Status(StatusCode.BADREQUEST, "Invalid Address");
}
}
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init(Component c) {
Dictionary<?, ?> props = c.getServiceProperties();
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
- *
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ *
*/
void destroy() {
destroyCache();
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
- *
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ *
*/
void start() {
}
/**
- * Function called by the dependency manager before the services
- * exported by the component are unregistered, this will be
- * followed by a "destroy ()" calls
- *
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ *
*/
void stop() {
}
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal</artifactId>\r
- <version>0.4.0-SNAPSHOT</version>\r
+ <version>0.5.0-SNAPSHOT</version>\r
</dependency>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
\r
// List all the bundles on which the test case depends\r
mavenBundle("org.opendaylight.controller", "sal",\r
- "0.4.0-SNAPSHOT"),\r
+ "0.5.0-SNAPSHOT"),\r
mavenBundle("org.opendaylight.controller", "sal.implementation",\r
"0.4.0-SNAPSHOT"),\r
\r
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.codehaus.enunciate</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller.thirdparty</groupId>
package org.opendaylight.controller.protocol_plugin.openflow.internal;
import java.util.Dictionary;
+import java.util.List;
import java.util.Set;
+import java.util.ArrayList;
import org.apache.felix.dm.Component;
import org.opendaylight.controller.protocol_plugin.openflow.IRefreshInternalProvider;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.topology.IPluginInTopologyService;
import org.opendaylight.controller.sal.topology.IPluginOutTopologyService;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
public class TopologyServices implements ITopologyServiceShimListener,
IPluginInTopologyService {
@Override
public void edgeUpdate(Edge edge, UpdateType type, Set<Property> props) {
if (this.salTopoService != null) {
- this.salTopoService.edgeUpdate(edge, type, props);
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ TopoEdgeUpdate teu = new TopoEdgeUpdate(edge, props, type);
+ topoedgeupdateList.add(teu);
+ this.salTopoService.edgeUpdate(topoedgeupdateList);
}
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>junit</groupId>
org.slf4j,
org.opendaylight.controller.sal.routing,
org.opendaylight.controller.sal.core,
+ org.opendaylight.controller.sal.topology,
org.opendaylight.controller.sal.utils,
org.opendaylight.controller.sal.reader,
org.apache.commons.collections15,
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.reader.IReadService;
import org.opendaylight.controller.sal.routing.IListenRoutingUpdates;
+
import org.opendaylight.controller.sal.routing.IRouting;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.topologymanager.ITopologyManager;
import org.opendaylight.controller.topologymanager.ITopologyManagerAware;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
+import java.util.ArrayList;
import java.util.Set;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
.getLogger(DijkstraImplementation.class);
private ConcurrentMap<Short, Graph<Node, Edge>> topologyBWAware;
private ConcurrentMap<Short, DijkstraShortestPath<Node, Edge>> sptBWAware;
- DijkstraShortestPath<Node, Edge> mtp; //Max Throughput Path
+ DijkstraShortestPath<Node, Edge> mtp; // Max Throughput Path
private Set<IListenRoutingUpdates> routingAware;
private ISwitchManager switchManager;
private ITopologyManager topologyManager;
Bandwidth.BandwidthPropName);
long srcLinkSpeed = 0, dstLinkSpeed = 0;
- if ((bwSrc == null) || ((srcLinkSpeed = bwSrc.getValue()) == 0)) {
- log.debug("srcNC: {} - Setting srcLinkSpeed to Default!",srcNC);
- srcLinkSpeed = DEFAULT_LINK_SPEED;
+ if ((bwSrc == null)
+ || ((srcLinkSpeed = bwSrc.getValue()) == 0)) {
+ log.debug(
+ "srcNC: {} - Setting srcLinkSpeed to Default!",
+ srcNC);
+ srcLinkSpeed = DEFAULT_LINK_SPEED;
}
-
- if ((bwDst == null) || ((dstLinkSpeed = bwDst.getValue()) == 0)) {
- log.debug("dstNC: {} - Setting dstLinkSpeed to Default!",dstNC);
+
+ if ((bwDst == null)
+ || ((dstLinkSpeed = bwDst.getValue()) == 0)) {
+ log.debug(
+ "dstNC: {} - Setting dstLinkSpeed to Default!",
+ dstNC);
dstLinkSpeed = DEFAULT_LINK_SPEED;
}
long avlDstThruPut = dstLinkSpeed
- readService.getTransmitRate(dstNC);
- //Use lower of the 2 available thruput as the available thruput
+ // Use lower of the 2 available thruput as the available
+ // thruput
long avlThruPut = avlSrcThruPut < avlDstThruPut ? avlSrcThruPut
: avlDstThruPut;
if (avlThruPut <= 0) {
- log.debug("Edge {}: Available Throughput {} <= 0!",
- e, avlThruPut);
+ log.debug("Edge {}: Available Throughput {} <= 0!", e,
+ avlThruPut);
return (double) -1;
}
return (double) (Bandwidth.BW1Pbps / avlThruPut);
};
}
Short baseBW = Short.valueOf((short) 0);
- //Initialize mtp also using the default topo
+ // Initialize mtp also using the default topo
Graph<Node, Edge> g = this.topologyBWAware.get(baseBW);
if (g == null) {
log.error("Default Topology Graph is null");
try {
path = mtp.getMaxThroughputPath(src, dst);
} catch (IllegalArgumentException ie) {
- log.debug("A vertex is yet not known between {} {}", src.toString(),
- dst.toString());
+ log.debug("A vertex is yet not known between {} {}",
+ src.toString(), dst.toString());
return null;
}
Path res;
try {
res = new Path(path);
} catch (ConstructionException e) {
- log.debug("A vertex is yet not known between {} {}", src.toString(),
- dst.toString());
+ log.debug("A vertex is yet not known between {} {}",
+ src.toString(), dst.toString());
return null;
}
return res;
try {
path = spt.getPath(src, dst);
} catch (IllegalArgumentException ie) {
- log.debug("A vertex is yet not known between {} {}", src.toString(),
- dst.toString());
+ log.debug("A vertex is yet not known between {} {}",
+ src.toString(), dst.toString());
return null;
}
Path res;
try {
res = new Path(path);
} catch (ConstructionException e) {
- log.debug("A vertex is yet not known between {} {}", src.toString(),
- dst.toString());
+ log.debug("A vertex is yet not known between {} {}",
+ src.toString(), dst.toString());
return null;
}
return res;
@Override
public synchronized void clearMaxThroughput() {
if (mtp != null) {
- mtp.reset(); //reset maxthruput path
+ mtp.reset(); // reset maxthruput path
}
}
- @SuppressWarnings( { "rawtypes", "unchecked" })
+ @SuppressWarnings({ "rawtypes", "unchecked" })
private synchronized boolean updateTopo(Edge edge, Short bw, boolean added) {
Graph<Node, Edge> topo = this.topologyBWAware.get(bw);
DijkstraShortestPath<Node, Edge> spt = this.sptBWAware.get(bw);
edgePresentInGraph = topo.containsEdge(edge);
if (edgePresentInGraph == false) {
try {
- topo.addEdge(new Edge(src, dst), src
- .getNode(), dst
- .getNode(), EdgeType.DIRECTED);
+ topo.addEdge(new Edge(src, dst), src.getNode(),
+ dst.getNode(), EdgeType.DIRECTED);
} catch (ConstructionException e) {
- log.error("",e);
+ log.error("", e);
return edgePresentInGraph;
}
}
} else {
- //Remove the edge
+ // Remove the edge
try {
topo.removeEdge(new Edge(src, dst));
} catch (ConstructionException e) {
- log.error("",e);
+ log.error("", e);
return edgePresentInGraph;
}
return edgePresentInGraph;
}
- @Override
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
+ private boolean edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
String srcType = null;
String dstType = null;
if (e == null || type == null) {
log.error("Edge or Update type are null!");
- return;
+ return false;
} else {
srcType = e.getTailNodeConnector().getType();
dstType = e.getHeadNodeConnector().getType();
if (srcType.equals(NodeConnector.NodeConnectorIDType.PRODUCTION)) {
log.debug("Skip updates for {}", e);
- return;
+ return false;
}
if (dstType.equals(NodeConnector.NodeConnectorIDType.PRODUCTION)) {
log.debug("Skip updates for {}", e);
- return;
+ return false;
}
}
// Update BW topo
updateTopo(e, (short) bw.getValue(), add);
}
- if (this.routingAware != null) {
- for (IListenRoutingUpdates ra : this.routingAware) {
- try {
- ra.recalculateDone();
- } catch (Exception ex) {
- log.error("Exception on routingAware listener call", e);
- }
+ }
+ return newEdge;
+ }
+
+ @Override
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ boolean callListeners = false;
+ for (int i = 0; i < topoedgeupdateList.size(); i++) {
+ Edge e = topoedgeupdateList.get(i).getEdge();
+ Set<Property> p = topoedgeupdateList.get(i).getProperty();
+ UpdateType type = topoedgeupdateList.get(i).getUpdateType();
+ if ((edgeUpdate(e, type, p)) && (!callListeners)) {
+ callListeners = true;
+ }
+ }
+ if ((callListeners) && (this.routingAware != null)) {
+ for (IListenRoutingUpdates ra : this.routingAware) {
+ try {
+ ra.recalculateDone();
+ } catch (Exception ex) {
+ log.error("Exception on routingAware listener call", ex);
}
}
}
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public void init() {
- log.debug("Routing init() is called");
- this.topologyBWAware = (ConcurrentMap<Short, Graph<Node, Edge>>) new ConcurrentHashMap();
- this.sptBWAware = (ConcurrentMap<Short, DijkstraShortestPath<Node, Edge>>) new ConcurrentHashMap();
- // Now create the default topology, which doesn't consider the
- // BW, also create the corresponding Dijkstra calculation
- Graph<Node, Edge> g = new SparseMultigraph();
- Short sZero = Short.valueOf((short) 0);
- this.topologyBWAware.put(sZero, g);
- this.sptBWAware.put(sZero, new DijkstraShortestPath(g));
- // Topologies for other BW will be added on a needed base
+ log.debug("Routing init() is called");
+ this.topologyBWAware = (ConcurrentMap<Short, Graph<Node, Edge>>) new ConcurrentHashMap();
+ this.sptBWAware = (ConcurrentMap<Short, DijkstraShortestPath<Node, Edge>>) new ConcurrentHashMap();
+ // Now create the default topology, which doesn't consider the
+ // BW, also create the corresponding Dijkstra calculation
+ Graph<Node, Edge> g = new SparseMultigraph();
+ Short sZero = Short.valueOf((short) 0);
+ this.topologyBWAware.put(sZero, g);
+ this.sptBWAware.put(sZero, new DijkstraShortestPath(g));
+ // Topologies for other BW will be added on a needed base
}
+
/**
* Function called by the dependency manager when at least one dependency
* become unsatisfied or when the component is shutting down because for
* example bundle is being stopped.
- *
+ *
*/
void destroy() {
- log.debug("Routing destroy() is called");
+ log.debug("Routing destroy() is called");
}
/**
- * Function called by dependency manager after "init ()" is called
- * and after the services provided by the class are registered in
- * the service registry
- *
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ *
*/
- void start() {
- log.debug("Routing start() is called");
- // build the routing database from the topology if it exists.
- Map<Edge, Set<Property>> edges = topologyManager.getEdges();
- if (edges.isEmpty()) {
- return;
- }
- log.debug("Creating routing database from the topology");
- for (Iterator<Map.Entry<Edge,Set<Property>>> i = edges.entrySet().iterator(); i.hasNext();) {
- Map.Entry<Edge, Set<Property>> entry = i.next();
- Edge e = entry.getKey();
- Set<Property> props = entry.getValue();
- edgeUpdate(e, UpdateType.ADDED, props);
- }
- }
+ void start() {
+ log.debug("Routing start() is called");
+ // build the routing database from the topology if it exists.
+ Map<Edge, Set<Property>> edges = topologyManager.getEdges();
+ if (edges.isEmpty()) {
+ return;
+ }
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ log.debug("Creating routing database from the topology");
+ for (Iterator<Map.Entry<Edge, Set<Property>>> i = edges.entrySet()
+ .iterator(); i.hasNext();) {
+ Map.Entry<Edge, Set<Property>> entry = i.next();
+ Edge e = entry.getKey();
+ Set<Property> props = entry.getValue();
+ TopoEdgeUpdate topoedgeupdate = new TopoEdgeUpdate(e, props,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(topoedgeupdate);
+ }
+ edgeUpdate(topoedgeupdateList);
+ }
/**
* Function called by the dependency manager before the services exported by
* the component are unregistered, this will be followed by a "destroy ()"
* calls
- *
+ *
*/
- public void stop() {
- log.debug("Routing stop() is called");
- }
+ public void stop() {
+ log.debug("Routing stop() is called");
+ }
@Override
public void edgeOverUtilized(Edge edge) {
this.readService = null;
}
}
-
+
public void setTopologyManager(ITopologyManager tm) {
- this.topologyManager = tm;
+ this.topologyManager = tm;
}
-
+
public void unsetTopologyManager(ITopologyManager tm) {
- if (this.topologyManager == tm) {
- this.topologyManager = null;
- }
+ if (this.topologyManager == tm) {
+ this.topologyManager = null;
+ }
}
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.routing.dijkstra_implementation;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.opendaylight.controller.sal.core.Path;
import org.opendaylight.controller.sal.core.Property;
import org.opendaylight.controller.sal.core.UpdateType;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.NodeCreator;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
public class DijkstraTest {
protected static final Logger logger = LoggerFactory
- .getLogger(DijkstraTest.class);
+ .getLogger(DijkstraTest.class);
+
@Test
public void testSinglePathRouteNoBw() {
DijkstraImplementation imp = new DijkstraImplementation();
Node node1 = NodeCreator.createOFNode((long) 1);
Node node2 = NodeCreator.createOFNode((long) 2);
Node node3 = NodeCreator.createOFNode((long) 3);
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
NodeConnector nc11 = NodeConnectorCreator.createOFNodeConnector(
(short) 1, node1);
NodeConnector nc21 = NodeConnectorCreator.createOFNodeConnector(
try {
edge1 = new Edge(nc11, nc21);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props = new HashSet<Property>();
props.add(new Bandwidth(0));
- imp.edgeUpdate(edge1, UpdateType.ADDED, props);
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.ADDED);
+ topoedgeupdateList.add(teu1);
+
NodeConnector nc22 = NodeConnectorCreator.createOFNodeConnector(
(short) 2, node2);
NodeConnector nc31 = NodeConnectorCreator.createOFNodeConnector(
try {
edge2 = new Edge(nc22, nc31);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props2 = new HashSet<Property>();
- props.add(new Bandwidth(0));
- imp.edgeUpdate(edge2, UpdateType.ADDED, props2);
+ props2.add(new Bandwidth(0));
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(edge2, props2,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu2);
+ imp.edgeUpdate(topoedgeupdateList);
Path res = imp.getRoute(node1, node3);
List<Edge> expectedPath = (List<Edge>) new LinkedList<Edge>();
try {
expectedRes = new Path(expectedPath);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
if (!res.equals(expectedRes)) {
System.out.println("Actual Res is " + res);
@Test
public void testShortestPathRouteNoBw() {
DijkstraImplementation imp = new DijkstraImplementation();
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
imp.init();
Node node1 = NodeCreator.createOFNode((long) 1);
Node node2 = NodeCreator.createOFNode((long) 2);
try {
edge1 = new Edge(nc11, nc21);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props = new HashSet<Property>();
props.add(new Bandwidth(0));
- imp.edgeUpdate(edge1, UpdateType.ADDED, props);
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.ADDED);
+ topoedgeupdateList.add(teu1);
NodeConnector nc22 = NodeConnectorCreator.createOFNodeConnector(
(short) 2, node2);
try {
edge2 = new Edge(nc22, nc31);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props2 = new HashSet<Property>();
- props.add(new Bandwidth(0));
- imp.edgeUpdate(edge2, UpdateType.ADDED, props2);
+ props2.add(new Bandwidth(0));
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(edge2, props2,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu2);
NodeConnector nc12 = NodeConnectorCreator.createOFNodeConnector(
(short) 2, node1);
try {
edge3 = new Edge(nc12, nc32);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props3 = new HashSet<Property>();
- props.add(new Bandwidth(0));
- imp.edgeUpdate(edge3, UpdateType.ADDED, props3);
+ props3.add(new Bandwidth(0));
+ TopoEdgeUpdate teu3 = new TopoEdgeUpdate(edge3, props3,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu3);
+ imp.edgeUpdate(topoedgeupdateList);
Path res = imp.getRoute(node1, node3);
try {
expectedRes = new Path(expectedPath);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
if (!res.equals(expectedRes)) {
System.out.println("Actual Res is " + res);
public void testShortestPathRouteNoBwAfterLinkDelete() {
DijkstraImplementation imp = new DijkstraImplementation();
imp.init();
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
Node node1 = NodeCreator.createOFNode((long) 1);
Node node2 = NodeCreator.createOFNode((long) 2);
Node node3 = NodeCreator.createOFNode((long) 3);
try {
edge1 = new Edge(nc11, nc21);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props = new HashSet<Property>();
props.add(new Bandwidth(0));
- imp.edgeUpdate(edge1, UpdateType.ADDED, props);
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(edge1, props, UpdateType.ADDED);
+ topoedgeupdateList.add(teu1);
NodeConnector nc22 = NodeConnectorCreator.createOFNodeConnector(
(short) 2, node2);
try {
edge2 = new Edge(nc22, nc31);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props2 = new HashSet<Property>();
- props.add(new Bandwidth(0));
- imp.edgeUpdate(edge2, UpdateType.ADDED, props2);
+ props2.add(new Bandwidth(0));
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(edge2, props2,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu2);
NodeConnector nc12 = NodeConnectorCreator.createOFNodeConnector(
(short) 2, node1);
try {
edge3 = new Edge(nc12, nc32);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
Set<Property> props3 = new HashSet<Property>();
- props.add(new Bandwidth(0));
- imp.edgeUpdate(edge3, UpdateType.ADDED, props3);
+ props3.add(new Bandwidth(0));
+ TopoEdgeUpdate teu3 = new TopoEdgeUpdate(edge3, props3,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu3);
+ TopoEdgeUpdate teu4 = new TopoEdgeUpdate(edge3, props3,
+ UpdateType.REMOVED);
+ topoedgeupdateList.add(teu4);
- imp.edgeUpdate(edge3, UpdateType.REMOVED, props3);
+ imp.edgeUpdate(topoedgeupdateList);
Path res = imp.getRoute(node1, node3);
List<Edge> expectedPath = (List<Edge>) new LinkedList<Edge>();
try {
expectedRes = new Path(expectedPath);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
if (!res.equals(expectedRes)) {
System.out.println("Actual Res is " + res);
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.core.NodeConnector;
import org.opendaylight.controller.sal.core.Path;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.NodeCreator;
+
import java.util.LinkedList;
import java.util.List;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
public class MaxThruputTest {
protected static final Logger logger = LoggerFactory
- .getLogger(MaxThruputTest.class);
+ .getLogger(MaxThruputTest.class);
Map<Edge, Number> LinkCostMap = new HashMap<Edge, Number>();
@Test
try {
edge1 = new Edge(nc11, nc21);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge1, 10);
Edge edge2 = null;
try {
edge2 = new Edge(nc21, nc11);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge2, 10);
try {
edge3 = new Edge(nc22, nc31);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge3, 30);
Edge edge4 = null;
try {
edge4 = new Edge(nc31, nc22);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge4, 30);
try {
edge5 = new Edge(nc32, nc41);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge5, 10);
Edge edge6 = null;
try {
edge6 = new Edge(nc41, nc32);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge6, 10);
try {
edge7 = new Edge(nc12, nc51);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge7, 20);
Edge edge8 = null;
try {
edge8 = new Edge(nc51, nc12);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge8, 20);
try {
edge9 = new Edge(nc52, nc61);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge9, 20);
Edge edge10 = null;
try {
edge10 = new Edge(nc61, nc52);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge10, 20);
try {
edge11 = new Edge(nc62, nc42);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge11, 20);
Edge edge12 = null;
try {
edge12 = new Edge(nc42, nc62);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
LinkCostMap.put(edge12, 20);
- imp.edgeUpdate(edge1, UpdateType.ADDED, null);
- imp.edgeUpdate(edge2, UpdateType.ADDED, null);
- imp.edgeUpdate(edge3, UpdateType.ADDED, null);
- imp.edgeUpdate(edge4, UpdateType.ADDED, null);
- imp.edgeUpdate(edge5, UpdateType.ADDED, null);
- imp.edgeUpdate(edge6, UpdateType.ADDED, null);
- imp.edgeUpdate(edge7, UpdateType.ADDED, null);
- imp.edgeUpdate(edge8, UpdateType.ADDED, null);
- imp.edgeUpdate(edge9, UpdateType.ADDED, null);
- imp.edgeUpdate(edge10, UpdateType.ADDED, null);
- imp.edgeUpdate(edge11, UpdateType.ADDED, null);
- imp.edgeUpdate(edge12, UpdateType.ADDED, null);
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(edge1, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu1);
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(edge2, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu2);
+ TopoEdgeUpdate teu3 = new TopoEdgeUpdate(edge3, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu3);
+ TopoEdgeUpdate teu4 = new TopoEdgeUpdate(edge4, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu4);
+ TopoEdgeUpdate teu5 = new TopoEdgeUpdate(edge5, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu5);
+ TopoEdgeUpdate teu6 = new TopoEdgeUpdate(edge6, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu6);
+ TopoEdgeUpdate teu7 = new TopoEdgeUpdate(edge7, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu7);
+ TopoEdgeUpdate teu8 = new TopoEdgeUpdate(edge8, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu8);
+ TopoEdgeUpdate teu9 = new TopoEdgeUpdate(edge9, null, UpdateType.ADDED);
+ topoedgeupdateList.add(teu9);
+ TopoEdgeUpdate teu10 = new TopoEdgeUpdate(edge10, null,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu10);
+ TopoEdgeUpdate teu11 = new TopoEdgeUpdate(edge11, null,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu11);
+ TopoEdgeUpdate teu12 = new TopoEdgeUpdate(edge12, null,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu12);
+
+ imp.edgeUpdate(topoedgeupdateList);
imp.initMaxThroughput(LinkCostMap);
try {
expectedRes = new Path(expectedPath);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
if (!res.equals(expectedRes)) {
System.out.println("Actual Res is " + res);
try {
expectedRes = new Path(expectedPath);
} catch (ConstructionException e) {
- logger.error("",e);
+ logger.error("", e);
}
if (!res.equals(expectedRes)) {
System.out.println("Actual Res is " + res);
\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal</artifactId>\r
- <version>0.4.0-SNAPSHOT</version>\r
+ <version>0.5.0-SNAPSHOT</version>\r
<packaging>bundle</packaging>\r
\r
<build>\r
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
package org.opendaylight.controller.sal.topology;
-import java.util.Set;
+import java.util.List;
import org.opendaylight.controller.sal.core.Edge;
-import org.opendaylight.controller.sal.core.Property;
-import org.opendaylight.controller.sal.core.UpdateType;
/**
* @file IListenTopoUpdates.java
/**
* Topology notifications provided by SAL toward the application
- *
+ *
*/
public interface IListenTopoUpdates {
/**
* Called to update on Edge in the topology graph
- *
- * @param e Edge being updated
- * @param type Type of update
- * @param props Properties of the edge, like BandWidth and/or Latency etc.
+ *
+ * @param topoedgeupdateList
+ * List of topoedgeupdates Each topoedgeupdate includes edge, its
+ * Properties ( BandWidth and/or Latency etc) and update type.
*/
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props);
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList);
/**
- * Called when an Edge utilization is above the safety threshold
- * configured on the controller
- *
- * @param edge The edge which bandwidth usage is above the safety level
+ * Called when an Edge utilization is above the safety threshold configured
+ * on the controller
+ *
+ * @param edge
+ * The edge which bandwidth usage is above the safety level
*/
public void edgeOverUtilized(Edge edge);
/**
* Called when the Edge utilization is back to normal, below the safety
* threshold level configured on the controller
- *
+ *
* @param edge
*/
public void edgeUtilBackToNormal(Edge edge);
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
package org.opendaylight.controller.sal.topology;
+import java.util.List;
import java.util.Set;
import org.opendaylight.controller.sal.core.Edge;
/**
* Methods that are invoked from Protocol Plugin toward SAL
- *
+ *
*/
public interface IPluginOutTopologyService {
+
/**
* Called to update on Edge in the topology graph
- *
- * @param e Edge being updated
- * @param type Type of update
- * @param props Properties of the edge, like BandWidth and/or Latency etc.
+ *
+ * @param topoedgeupdateList
+ * List of topoedgeupdates Each topoedgeupdate includes edge, its
+ * Properties ( BandWidth and/or Latency etc) and update type.
*/
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props);
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList);
/**
- * Called when an Edge utilization is above the safety threshold
- * configured on the controller
+ * Called when an Edge utilization is above the safety threshold configured
+ * on the controller
*
* @param edge
*/
/**
* Called when the Edge utilization is back to normal, below the safety
* threshold level configured on the controller
- *
+ *
* @param edge
*/
public void edgeUtilBackToNormal(Edge edge);
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.topology;
+
+import java.util.Set;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.ReflectionToStringBuilder;
+import org.opendaylight.controller.sal.core.Edge;
+import org.opendaylight.controller.sal.core.Property;
+import org.opendaylight.controller.sal.core.UpdateType;
+
+/**
+ * The class represents an Edge, the Edge's Property Set and its UpdateType.
+ */
+
+public class TopoEdgeUpdate {
+ private Edge edge;
+ private Set<Property> props;
+ private UpdateType type;
+
+ public TopoEdgeUpdate(Edge e, Set<Property> p, UpdateType t) {
+ edge = e;
+ props = p;
+ type = t;
+ }
+
+ public Edge getEdge() {
+ return edge;
+ }
+
+ public Set<Property> getProperty() {
+ return props;
+ }
+
+ public UpdateType getUpdateType() {
+ return type;
+ }
+
+ @Override
+ public int hashCode() {
+ return HashCodeBuilder.reflectionHashCode(this);
+ }
+
+ @Override
+ public String toString() {
+ return "TopoEdgeUpdate[" + ReflectionToStringBuilder.toString(this)
+ + "]";
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return EqualsBuilder.reflectionEquals(this, obj);
+ }
+}
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal</artifactId>\r
- <version>0.4.0-SNAPSHOT</version>\r
+ <version>0.5.0-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
</project>\r
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
package org.opendaylight.controller.sal.implementation.internal;
+import java.util.ArrayList;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import java.util.Collections;
import org.opendaylight.controller.sal.topology.IPluginInTopologyService;
import org.opendaylight.controller.sal.topology.IPluginOutTopologyService;
import org.opendaylight.controller.sal.topology.ITopologyService;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init() {
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
- *
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ *
*/
void destroy() {
// Make sure to clear all the data structure we use to track
}
@Override
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
synchronized (this.updateService) {
for (IListenTopoUpdates s : this.updateService) {
- s.edgeUpdate(e, type, props);
+ s.edgeUpdate(topoedgeupdateList);
}
}
}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
mavenBundle("ch.qos.logback", "logback-classic", "1.0.9"),
// List all the bundles on which the test case depends
mavenBundle("org.opendaylight.controller", "sal",
- "0.4.0-SNAPSHOT"),
+ "0.5.0-SNAPSHOT"),
mavenBundle("org.opendaylight.controller",
"sal.implementation", "0.4.0-SNAPSHOT"),
mavenBundle("org.opendaylight.controller", "statisticsmanager",
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.ObjectInputStream;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.Dictionary;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
+import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.opendaylight.controller.sal.core.Node.NodeIDType;
import org.opendaylight.controller.sal.topology.IListenTopoUpdates;
import org.opendaylight.controller.sal.topology.ITopologyService;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.sal.utils.StatusCode;
import org.opendaylight.controller.sal.utils.GlobalConstants;
import org.opendaylight.controller.sal.utils.IObjectReader;
private static String ROOT = GlobalConstants.STARTUPHOME.toString();
private String userLinksFileName = null;
private ConcurrentMap<String, TopologyUserLinkConfig> userLinks;
-
+
void nonClusterObjectCreate() {
- edgesDB = new ConcurrentHashMap<Edge, Set<Property>>();
- hostsDB = new ConcurrentHashMap<NodeConnector, ImmutablePair<Host, Set<Property>>>();
- userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
- nodeConnectorsDB = new ConcurrentHashMap<NodeConnector, Set<Property>>();
+ edgesDB = new ConcurrentHashMap<Edge, Set<Property>>();
+ hostsDB = new ConcurrentHashMap<NodeConnector, ImmutablePair<Host, Set<Property>>>();
+ userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
+ nodeConnectorsDB = new ConcurrentHashMap<NodeConnector, Set<Property>>();
}
-
void setTopologyManagerAware(ITopologyManagerAware s) {
if (this.topologyManagerAware != null) {
- log.debug("Adding ITopologyManagerAware: {}", s);
+ log.debug("Adding ITopologyManagerAware: {}", s);
this.topologyManagerAware.add(s);
}
}
void unsetTopologyManagerAware(ITopologyManagerAware s) {
if (this.topologyManagerAware != null) {
- log.debug("Removing ITopologyManagerAware: {}", s);
+ log.debug("Removing ITopologyManagerAware: {}", s);
this.topologyManagerAware.remove(s);
}
}
void setTopoService(ITopologyService s) {
- log.debug("Adding ITopologyService: {}", s);
+ log.debug("Adding ITopologyService: {}", s);
this.topoService = s;
}
void unsetTopoService(ITopologyService s) {
if (this.topoService == s) {
- log.debug("Removing ITopologyService: {}", s);
+ log.debug("Removing ITopologyService: {}", s);
this.topoService = null;
}
}
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
- *
+ *
*/
void init(Component c) {
String containerName = null;
}
/**
- * Function called after the topology manager has registered the
- * service in OSGi service registry.
- *
+ * Function called after the topology manager has registered the service in
+ * OSGi service registry.
+ *
*/
void started() {
// SollicitRefresh MUST be called here else if called at init
}
/**
- * Function called by the dependency manager when at least one
- * dependency become unsatisfied or when the component is shutting
- * down because for example bundle is being stopped.
- *
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ *
*/
void destroy() {
if (this.clusterContainerService == null) {
// Publish the save config event to the cluster nodes
/**
* Get the CLUSTERING SERVICES WORKING BEFORE TRYING THIS
-
- configSaveEvent.put(new Date().getTime(), SAVE);
+ *
+ * configSaveEvent.put(new Date().getTime(), SAVE);
*/
return saveConfigInternal();
}
public Status saveConfigInternal() {
- Status retS;
+ Status retS;
ObjectWriter objWriter = new ObjectWriter();
- retS = objWriter.write(
- new ConcurrentHashMap<String, TopologyUserLinkConfig>(
+ retS = objWriter
+ .write(new ConcurrentHashMap<String, TopologyUserLinkConfig>(
userLinks), userLinksFileName);
if (retS.isSuccess()) {
}
/**
- * The Map returned is a copy of the current topology hence if the
- * topology changes the copy doesn't
- *
- * @return A Map representing the current topology expressed as
- * edges of the network
+ * The Map returned is a copy of the current topology hence if the topology
+ * changes the copy doesn't
+ *
+ * @return A Map representing the current topology expressed as edges of the
+ * network
*/
@Override
public Map<Edge, Set<Property>> getEdges() {
for (Edge key : this.edgesDB.keySet()) {
// Sets of props are copied because the composition of
// those properties could change with time
- HashSet<Property> prop = new HashSet<Property>(this.edgesDB
- .get(key));
+ HashSet<Property> prop = new HashSet<Property>(
+ this.edgesDB.get(key));
// We can simply reuse the key because the object is
// immutable so doesn't really matter that we are
// referencing the only owned by a different table, the
// TODO remove with spring-dm removal
/**
- * @param set the topologyAware to set
+ * @param set
+ * the topologyAware to set
*/
public void setTopologyAware(Set<Object> set) {
for (Object s : set) {
if (this.hostsDB == null) {
return;
}
-
+
switch (t) {
case ADDED:
case CHANGED:
}
}
- @Override
- public void edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
+ private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type,
+ Set<Property> props) {
switch (type) {
case ADDED:
// Make sure the props are non-null
props = (Set<Property>) new HashSet(props);
}
- // Now make sure thre is the creation timestamp for the
+ // Now make sure there is the creation timestamp for the
// edge, if not there timestamp with the first update
boolean found_create = false;
for (Property prop : props) {
}
}
- // Now lest make sure new properties are non-null
+ // Now lets make sure new properties are non-null
// Make sure the props are non-null
if (props == null) {
props = (Set<Property>) new HashSet();
log.trace("Edge {} {}", e.toString(), type.name());
break;
}
+ return new TopoEdgeUpdate(e, props, type);
+ }
+
+ @Override
+ public void edgeUpdate(List<TopoEdgeUpdate> topoedgeupdateList) {
+ List<TopoEdgeUpdate> teuList = new ArrayList<TopoEdgeUpdate>();
+ for (int i = 0; i < topoedgeupdateList.size(); i++) {
+ Edge e = topoedgeupdateList.get(i).getEdge();
+ Set<Property> p = topoedgeupdateList.get(i).getProperty();
+ UpdateType type = topoedgeupdateList.get(i).getUpdateType();
+ TopoEdgeUpdate teu = edgeUpdate(e, type, p);
+ teuList.add(teu);
+ }
// Now update the listeners
for (ITopologyManagerAware s : this.topologyManagerAware) {
try {
- s.edgeUpdate(e, type, props);
+ s.edgeUpdate(teuList);
} catch (Exception exc) {
log.error("Exception on callback", exc);
}
}
+
}
private Edge getReverseLinkTuple(TopologyUserLinkConfig link) {
- TopologyUserLinkConfig rLink = new TopologyUserLinkConfig(
- link.getName(), link.getDstNodeIDType(), link.getDstSwitchId(),
- link.getDstNodeConnectorIDType(), link.getDstPort(),
- link.getSrcNodeIDType(), link.getSrcSwitchId(),
- link.getSrcNodeConnectorIDType(), link.getSrcPort());
+ TopologyUserLinkConfig rLink = new TopologyUserLinkConfig(
+ link.getName(), link.getDstNodeIDType(), link.getDstSwitchId(),
+ link.getDstNodeConnectorIDType(), link.getDstPort(),
+ link.getSrcNodeIDType(), link.getSrcSwitchId(),
+ link.getSrcNodeConnectorIDType(), link.getSrcPort());
return getLinkTuple(rLink);
}
private Edge getLinkTuple(TopologyUserLinkConfig link) {
Edge linkTuple = null;
- // if atleast 1 link exists for the srcPort and atleast 1 link exists for the dstPort
+ // if atleast 1 link exists for the srcPort and atleast 1 link exists
+ // for the dstPort
// that makes it ineligible for the Manual link addition
// This is just an extra protection to avoid mis-programming.
boolean srcLinkExists = false;
boolean dstLinkExists = false;
- //TODO check a way to validate the port with inventory services
- //if (srcSw.getPorts().contains(srcPort) &&
- //dstSw.getPorts().contains(srcPort) &&
+ // TODO check a way to validate the port with inventory services
+ // if (srcSw.getPorts().contains(srcPort) &&
+ // dstSw.getPorts().contains(srcPort) &&
if (!srcLinkExists && !dstLinkExists) {
Node sNode = null;
Node dNode = null;
String dstNodeIDType = link.getDstNodeIDType();
String dstNodeConnectorIDType = link.getDstNodeConnectorIDType();
try {
- if (srcNodeIDType.equals(NodeIDType.OPENFLOW)) {
+ if (srcNodeIDType.equals(NodeIDType.OPENFLOW)) {
sNode = new Node(srcNodeIDType, link.getSrcSwitchIDLong());
- } else {
- sNode = new Node(srcNodeIDType, link.getSrcSwitchId());
- }
+ } else {
+ sNode = new Node(srcNodeIDType, link.getSrcSwitchId());
+ }
- if (dstNodeIDType.equals(NodeIDType.OPENFLOW)) {
+ if (dstNodeIDType.equals(NodeIDType.OPENFLOW)) {
dNode = new Node(dstNodeIDType, link.getDstSwitchIDLong());
- } else {
- dNode = new Node(dstNodeIDType, link.getDstSwitchId());
- }
-
- if (srcNodeConnectorIDType.equals(NodeConnectorIDType.OPENFLOW)) {
+ } else {
+ dNode = new Node(dstNodeIDType, link.getDstSwitchId());
+ }
+
+ if (srcNodeConnectorIDType.equals(NodeConnectorIDType.OPENFLOW)) {
Short srcPort = Short.valueOf((short) 0);
- if (!link.isSrcPortByName()) {
- srcPort = Short.parseShort(link.getSrcPort());
- }
- sPort = new NodeConnector(srcNodeConnectorIDType,
- srcPort, sNode);
- } else {
- sPort = new NodeConnector(srcNodeConnectorIDType,
- link.getSrcPort(), sNode);
- }
-
- if (dstNodeConnectorIDType.equals(NodeConnectorIDType.OPENFLOW)) {
+ if (!link.isSrcPortByName()) {
+ srcPort = Short.parseShort(link.getSrcPort());
+ }
+ sPort = new NodeConnector(srcNodeConnectorIDType, srcPort,
+ sNode);
+ } else {
+ sPort = new NodeConnector(srcNodeConnectorIDType,
+ link.getSrcPort(), sNode);
+ }
+
+ if (dstNodeConnectorIDType.equals(NodeConnectorIDType.OPENFLOW)) {
Short dstPort = Short.valueOf((short) 0);
- if (!link.isDstPortByName()) {
- dstPort = Short.parseShort(link.getDstPort());
- }
- dPort = new NodeConnector(dstNodeConnectorIDType,
- dstPort, dNode);
- } else {
- dPort = new NodeConnector(dstNodeConnectorIDType,
- link.getDstPort(), dNode);
- }
+ if (!link.isDstPortByName()) {
+ dstPort = Short.parseShort(link.getDstPort());
+ }
+ dPort = new NodeConnector(dstNodeConnectorIDType, dstPort,
+ dNode);
+ } else {
+ dPort = new NodeConnector(dstNodeConnectorIDType,
+ link.getDstPort(), dNode);
+ }
linkTuple = new Edge(sPort, dPort);
} catch (ConstructionException cex) {
- log.warn("Caught exception ", cex);
+ log.warn("Caught exception ", cex);
}
return linkTuple;
}
@Override
public Status addUserLink(TopologyUserLinkConfig link) {
if (!link.isValid()) {
- return new Status(StatusCode.BADREQUEST,
- "Configuration Invalid. Please check the parameters");
+ return new Status(StatusCode.BADREQUEST,
+ "Configuration Invalid. Please check the parameters");
}
if (userLinks.get(link.getName()) != null) {
- return new Status(StatusCode.CONFLICT,
- "Link with name : " + link.getName()
+ return new Status(StatusCode.CONFLICT, "Link with name : "
+ + link.getName()
+ " already exists. Please use another name");
}
if (userLinks.containsValue(link)) {
link.setStatus(TopologyUserLinkConfig.STATUS.SUCCESS);
} catch (Exception e) {
return new Status(StatusCode.INTERNALERROR,
- "Exception while adding custom link : " +
- e.getMessage());
+ "Exception while adding custom link : "
+ + e.getMessage());
}
}
return new Status(StatusCode.SUCCESS, null);
@Override
public Status deleteUserLink(String linkName) {
if (linkName == null) {
- return new Status(StatusCode.BADREQUEST,
- "A valid linkName is required to Delete a link");
+ return new Status(StatusCode.BADREQUEST,
+ "A valid linkName is required to Delete a link");
}
TopologyUserLinkConfig link = userLinks.get(linkName);
userLinks.remove(linkName);
if (linkTuple != null) {
try {
- //oneTopology.deleteUserConfiguredLink(linkTuple);
+ // oneTopology.deleteUserConfiguredLink(linkTuple);
} catch (Exception e) {
- log
- .warn("Harmless : Exception while Deleting User Configured link {} {}",
- link, e.toString());
+ log.warn(
+ "Harmless : Exception while Deleting User Configured link {} {}",
+ link, e.toString());
}
linkTuple = getReverseLinkTuple(link);
try {
- //oneTopology.deleteUserConfiguredLink(linkTuple);
+ // oneTopology.deleteUserConfiguredLink(linkTuple);
} catch (Exception e) {
- log
- .warn("Harmless : Exception while Deleting User Configured Reverse link {} {}",
- link, e.toString());
+ log.warn(
+ "Harmless : Exception while Deleting User Configured Reverse link {} {}",
+ link, e.toString());
}
}
return new Status(StatusCode.SUCCESS, null);
public String getHelp() {
StringBuffer help = new StringBuffer();
help.append("---Topology Manager---\n");
- help.append("\t addTopo name <NodeIDType> <src-sw-id> <NodeConnectorIDType> <port-number> <NodeIDType> <dst-sw-id> <NodeConnectorIDType> <port-number>\n");
+ help.append("\t addTopo name <NodeIDType> <src-sw-id> <NodeConnectorIDType> <port-number> <NodeIDType> <dst-sw-id> <NodeConnectorIDType> <port-number>\n");
help.append("\t delTopo name\n");
help.append("\t printTopo\n");
help.append("\t printNodeEdges\n");
public void _printTopo(CommandInterpreter ci) {
for (String name : this.userLinks.keySet()) {
- TopologyUserLinkConfig linkConfig = userLinks.get(name);
+ TopologyUserLinkConfig linkConfig = userLinks.get(name);
ci.println("Name : " + name);
ci.println(linkConfig);
- ci.println("Edge " + getLinkTuple(linkConfig));
- ci.println("Reverse Edge " + getReverseLinkTuple(linkConfig));
+ ci.println("Edge " + getLinkTuple(linkConfig));
+ ci.println("Reverse Edge " + getReverseLinkTuple(linkConfig));
}
}
ci.println("Null destination port number");
return;
}
- TopologyUserLinkConfig config = new TopologyUserLinkConfig(name,
- srcNodeIDType, dpid, srcNodeConnectorIDType, port,
- dstNodeIDType, ddpid, dstNodeConnectorIDType, dport);
+ TopologyUserLinkConfig config = new TopologyUserLinkConfig(name,
+ srcNodeIDType, dpid, srcNodeConnectorIDType, port,
+ dstNodeIDType, ddpid, dstNodeConnectorIDType, dport);
ci.println(this.addUserLink(config));
}
}
public void _printNodeEdges(CommandInterpreter ci) {
- Map<Node, Set<Edge>> nodeEdges = getNodeEdges();
- if (nodeEdges == null) {
- return;
- }
- Set<Node> nodeSet = nodeEdges.keySet();
- if (nodeSet == null) {
- return;
- }
+ Map<Node, Set<Edge>> nodeEdges = getNodeEdges();
+ if (nodeEdges == null) {
+ return;
+ }
+ Set<Node> nodeSet = nodeEdges.keySet();
+ if (nodeSet == null) {
+ return;
+ }
ci.println(" Node Edge");
- for (Node node : nodeSet) {
- Set<Edge> edgeSet = nodeEdges.get(node);
- if (edgeSet == null) {
- continue;
- }
- for (Edge edge : edgeSet) {
- ci.println(node + " " + edge);
- }
+ for (Node node : nodeSet) {
+ Set<Edge> edgeSet = nodeEdges.get(node);
+ if (edgeSet == null) {
+ continue;
+ }
+ for (Edge edge : edgeSet) {
+ ci.println(node + " " + edge);
+ }
}
}
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import java.net.InetAddress;
import java.net.UnknownHostException;
+import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import org.opendaylight.controller.sal.core.State;
import org.opendaylight.controller.sal.core.UpdateType;
import org.opendaylight.controller.sal.packet.address.EthernetAddress;
+import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
import org.opendaylight.controller.sal.utils.StatusCode;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.opendaylight.controller.sal.utils.NodeCreator;
import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
-public class TopologyManagerImplTest {
-
- /*
- * Sets the node, edges and properties for edges here:
- * Edge <SwitchId : NodeConnectorId> :
- * <1:1>--><11:11>; <1:2>--><11:12>;
- * <3:3>--><13:13>; <3:4>--><13:14>;
- * <5:5>--><15:15>; <5:6>--><15:16>;
- * Method used by two tests: testGetNodeEdges and testGetEdges
- * @param topoManagerImpl
- * @throws ConstructionException
- */
- public void setNodeEdges(TopologyManagerImpl topoManagerImpl) throws ConstructionException {
- topoManagerImpl.nonClusterObjectCreate();
-
- State state;
- Bandwidth bw;
- Latency l;
-
- Set<Property> props = new HashSet<Property>();
- state = new State(State.EDGE_UP);
- bw = new Bandwidth(Bandwidth.BW100Gbps);
- l = new Latency(Latency.LATENCY100ns);
- props.add(state);
- props.add(bw);
- props.add(l);
-
- for (short i = 1; i < 6; i=(short) (i+2)) {
- NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(i, NodeCreator.createOFNode((long)i));
- NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector((short)(i+10), NodeCreator.createOFNode((long)(i+10)));
- Edge e1 = new Edge(headnc1, tailnc1);
- topoManagerImpl.edgeUpdate(e1, UpdateType.ADDED, props);
-
- NodeConnector headnc2 = NodeConnectorCreator.createOFNodeConnector((short) (i+1), headnc1.getNode());
- NodeConnector tailnc2 = NodeConnectorCreator.createOFNodeConnector((short)(i+11), tailnc1.getNode());
- Edge e2 = new Edge(headnc2, tailnc2);
- topoManagerImpl.edgeUpdate(e2, UpdateType.ADDED, props);
- }
- }
-
- @Test
- public void testGetNodeEdges() throws ConstructionException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- setNodeEdges(topoManagerImpl);
-
- Map<Node, Set<Edge>> nodeEdgeMap = topoManagerImpl.getNodeEdges();
- for (Iterator<Map.Entry<Node,Set<Edge>>> i = nodeEdgeMap.entrySet().iterator(); i.hasNext();) {
- Map.Entry<Node, Set<Edge>> entry = i.next();
- Node node = entry.getKey();
- Long nodeId = ((Long) node.getID()).longValue();
- Assert.assertTrue((node.getType().equals(NodeIDType.OPENFLOW)));
-
- Set<Edge> edges = entry.getValue();
- for (Edge edge : edges) {
- Long headNcId = ((Short)edge.getHeadNodeConnector().getID()).longValue();
- Long tailNcId = ((Short) edge.getTailNodeConnector().getID()).longValue();
- if (nodeId == 1 || nodeId == 3 || nodeId == 5) {
- Assert.assertTrue((headNcId.equals(nodeId) && tailNcId.equals(nodeId + 10)) ||
- (headNcId.equals(nodeId + 10) && tailNcId.equals(nodeId)) ||
- (headNcId.equals(nodeId + 1) && tailNcId.equals(nodeId + 11)) ||
- (headNcId.equals(nodeId + 11) && tailNcId.equals(nodeId + 1)));
- } else if (nodeId == 11 || nodeId == 13 || nodeId == 15) {
- Assert.assertTrue((headNcId.equals(nodeId) && tailNcId.equals(nodeId - 10)) ||
- (headNcId.equals(nodeId) && tailNcId.equals(nodeId - 10)) ||
- (headNcId.equals(nodeId - 9) && tailNcId.equals(nodeId + 1)) ||
- (headNcId.equals(nodeId + 1) && tailNcId.equals(nodeId - 9)));
- }
- }
- i.remove();
- }
- Assert.assertTrue(nodeEdgeMap.isEmpty());
- }
-
- @Test
- public void testGetEdges() throws ConstructionException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- setNodeEdges(topoManagerImpl);
-
- Map<Edge, Set<Property>> edgeProperty = topoManagerImpl.getEdges();
-
- for (Iterator <Map.Entry<Edge, Set<Property>>> i = edgeProperty.entrySet().iterator() ; i.hasNext();) {
- Map.Entry<Edge, Set<Property>> entry = i.next();
- Edge e = entry.getKey();
- NodeConnector headnc = e.getHeadNodeConnector();
- NodeConnector tailnc = e.getTailNodeConnector();
-
- Long headNodeId = (Long) headnc.getNode().getID();
-
- Long headNcId = ((Short)headnc.getID()).longValue();
- Long tailNcId = ((Short)tailnc.getID()).longValue();
-
- if (headNodeId == 1 || headNodeId == 3 || headNodeId == 5) {
- Assert.assertTrue((headNcId.equals(headNodeId) && tailNcId.equals(headNodeId + 10)) ||
- (headNcId.equals(headNodeId + 10) && tailNcId.equals(headNodeId)) ||
- (headNcId.equals(headNodeId + 1) && tailNcId.equals(headNodeId + 11)) ||
- (headNcId.equals(headNodeId + 11) && tailNcId.equals(headNodeId + 1)));
- } else if (headNodeId == 11 || headNodeId == 13 || headNodeId == 15) {
- Assert.assertTrue((headNcId.equals(headNodeId) && tailNcId.equals(headNodeId - 10)) ||
- (headNcId.equals(headNodeId) && tailNcId.equals(headNodeId - 10)) ||
- (headNcId.equals(headNodeId - 9) && tailNcId.equals(headNodeId + 1)) ||
- (headNcId.equals(headNodeId + 1) && tailNcId.equals(headNodeId - 9)));
- }
-
- Set<Property> prop = entry.getValue();
- for (Property p : prop) {
- String pName;
- long pValue;
- if (p instanceof Bandwidth) {
- Bandwidth b = (Bandwidth)p;
- pName = Bandwidth.BandwidthPropName;
- pValue = b.getValue();
- Assert.assertTrue(pName.equals(p.getName()) && pValue == Bandwidth.BW100Gbps );
- continue;
- }
- if (p instanceof Latency) {
- Latency l = (Latency)p;
- pName = Latency.LatencyPropName;
- pValue = l.getValue();
- Assert.assertTrue(pName.equals(p.getName()) && pValue == Latency.LATENCY100ns);
- continue;
- }
- if (p instanceof State) {
- State state = (State)p;
- pName = State.StatePropName;
- pValue = state.getValue();
- Assert.assertTrue(pName.equals(p.getName()) && pValue == State.EDGE_UP);
- continue;
- }
- }
- i.remove();
- }
- Assert.assertTrue(edgeProperty.isEmpty());
- }
-
-
- @Test
- public void testAddDeleteUserLink () {
- TopologyUserLinkConfig link1 = new TopologyUserLinkConfig("default1", "OF", "1", "OF", "2", "OF", "1", "OF", "2");
- TopologyUserLinkConfig link2 = new TopologyUserLinkConfig("default1", "OF", "10", "OF", "20", "OF", "10", "OF", "20");
- TopologyUserLinkConfig link3 = new TopologyUserLinkConfig("default2", "OF", "1", "OF", "2", "OF", "1", "OF", "2");
- TopologyUserLinkConfig link4 = new TopologyUserLinkConfig("default20", "OF", "10", "OF", "20", "OF", "10", "OF", "20");
-
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- topoManagerImpl.nonClusterObjectCreate();
-
- Assert.assertTrue (topoManagerImpl.addUserLink(link1).isSuccess());
- Assert.assertTrue (topoManagerImpl.addUserLink(link2).getCode() == StatusCode.CONFLICT);
- Assert.assertTrue (topoManagerImpl.addUserLink(link3).getCode() == StatusCode.CONFLICT);
- Assert.assertTrue (topoManagerImpl.addUserLink(link4).isSuccess());
-
- Assert.assertTrue (topoManagerImpl.deleteUserLink(null).getCode() == StatusCode.BADREQUEST);
- Assert.assertTrue (topoManagerImpl.deleteUserLink(link1.getName()).isSuccess());
- Assert.assertTrue (topoManagerImpl.deleteUserLink(link4.getName()).isSuccess());
- Assert.assertTrue (topoManagerImpl.getUserLinks().isEmpty());
-
- }
-
- @Test
- public void testGetUserLink () {
- TopologyUserLinkConfig[] link = new TopologyUserLinkConfig[5];
- TopologyUserLinkConfig[] reverseLink = new TopologyUserLinkConfig[5];
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- topoManagerImpl.nonClusterObjectCreate();
-
- String name = null;
- String srcNodeIDType = null;
- String srcSwitchId = null;
- String srcNodeConnectorIDType = null;
- String srcPort = null;
- String dstNodeIDType = null;
- String dstSwitchId = null;
- String dstNodeConnectorIDType = null;
- String dstPort = null;
-
- /*Creating userlinks and checking for their validity*/
- link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
- srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
- dstNodeConnectorIDType, dstPort);
- Assert.assertTrue(link[0].isValid() == false);
-
- srcSwitchId = "1";
- link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
- srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
- dstNodeConnectorIDType, dstPort);
- Assert.assertTrue(link[0].isValid() == false);
-
- dstSwitchId = "2";
- link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
- srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
- dstNodeConnectorIDType, dstPort);
- Assert.assertTrue(link[0].isValid() == false);
-
-
- Integer i;
-
- for (i = 0; i < 5; i++) {
- link[i] = new TopologyUserLinkConfig(name, srcNodeIDType,
- srcSwitchId, srcNodeConnectorIDType, srcPort,
- dstNodeIDType, dstSwitchId, dstNodeConnectorIDType, dstPort);
-
- name = Integer.toString(i + 1);
- srcSwitchId = Integer.toString(i + 1);
- srcPort = Integer.toString(i + 1);
- dstSwitchId = Integer.toString((i + 1)*10);
- dstPort = Integer.toString((i + 1)*10);
-
- link[i].setName(name);
- link[i].setSrcSwitchId(srcSwitchId);
- link[i].setSrcPort(srcPort);
- link[i].setDstSwitchId(dstSwitchId);
- link[i].setDstPort(dstPort);
-
- Assert.assertTrue(link[i].isValid() == false);
-
- link[i].setSrcNodeIDType("OF");
- link[i].setSrcNodeConnectorIDType("OF");
-
- Assert.assertTrue(link[i].isValid() == false);
-
- link[i].setDstNodeIDType("OF");
- link[i].setDstNodeConnectorIDType("OF");
-
- Assert.assertTrue(link[i].isValid() == true);
-
- reverseLink[i] = new TopologyUserLinkConfig(name, dstNodeIDType,
- dstSwitchId, dstNodeConnectorIDType, dstPort,
- srcNodeIDType, srcSwitchId, srcNodeConnectorIDType, srcPort);
-
- topoManagerImpl.addUserLink(link[i]);
- }
- ConcurrentMap<String, TopologyUserLinkConfig> userLinks = topoManagerImpl.getUserLinks();
- TopologyUserLinkConfig resultLink;
-
- for (i = 0; i < 5; i++) {
- resultLink = userLinks.get(((Integer)(i + 1)).toString());
-
- Assert.assertTrue(resultLink.getName().equals(reverseLink[i].getName()));
- Assert.assertTrue(resultLink.getDstSwitchId().equals(reverseLink[i].getSrcSwitchId()));
- Assert.assertTrue(resultLink.getDstPort().equals(reverseLink[i].getSrcPort()));
- Assert.assertTrue(resultLink.getSrcSwitchId().equals(reverseLink[i].getDstSwitchId()));
- Assert.assertTrue(resultLink.getSrcPort().equals(reverseLink[i].getDstPort()));
- }
- }
-
- @Test
- public void testHostLinkMethods() throws ConstructionException, UnknownHostException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- topoManagerImpl.nonClusterObjectCreate();
- int hostCounter = 0;
-
- State state;
- Bandwidth bw;
- Latency l;
- Set<Property> props = new HashSet<Property>();
- state = new State(State.EDGE_UP);
- bw = new Bandwidth(Bandwidth.BW100Gbps);
- l = new Latency(Latency.LATENCY100ns);
- props.add(state);
- props.add(bw);
- props.add(l);
-
- EthernetAddress ea;
- InetAddress ip;
- Host[] h = new Host[5];
- NodeConnector[] nc = new NodeConnector[5];
-
- /* Adding host, nodeConnector to hostsDB for the i = 0,1,2,3. No host
- * added for i = 4
- */
- for (int i = 0; i < 5; i++) {
- if (hostCounter < 4) {
- ea = new EthernetAddress(new byte[]{(byte)0x0, (byte)0x0,
- (byte)0x0, (byte)0x0,
- (byte)0x0, (byte)i});
- String stringIP = new StringBuilder().append(i + 1).append(".").append(i+10).append(".").append(i+20).append(".").append(i+30).toString();
- ip = InetAddress.getByName(stringIP);
- h[hostCounter] = new Host(ea, ip);
- } else {
- h[hostCounter] = null;
- }
- hostCounter++;
- nc[i] = NodeConnectorCreator.createOFNodeConnector((short)(i + 1), NodeCreator.createOFNode((long)(i + 1)));
- topoManagerImpl.updateHostLink(nc[i], h[i], UpdateType.ADDED, props);
- }
-
- for (int i = 0; i < 5; i++) {
- Host host = topoManagerImpl.getHostAttachedToNodeConnector(nc[i]);
- if (i == 4)
- Assert.assertTrue(host == null);
- else
- Assert.assertTrue(host.equals(h[i]));
- }
-
- Set<NodeConnector> ncSet = topoManagerImpl.getNodeConnectorWithHost();
- for (int i = 0; i < 5; i++) {
- Assert.assertTrue(ncSet.remove(nc[i]));
- }
- Assert.assertTrue(ncSet.isEmpty());
- }
-
- @Test
- public void testGetNodesWithNodeConnectorHost() throws ConstructionException, UnknownHostException {
- TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
- topoManagerImpl.nonClusterObjectCreate();
- int hostCounter = 0;
-
- State state;
- Bandwidth bw;
- Latency l;
- Set<Property> props = new HashSet<Property>();
- state = new State(State.EDGE_UP);
- bw = new Bandwidth(Bandwidth.BW100Gbps);
- l = new Latency(Latency.LATENCY100ns);
- props.add(state);
- props.add(bw);
- props.add(l);
-
- EthernetAddress ea;
- InetAddress ip;
- Host[] h = new Host[5];
- NodeConnector[] nc = new NodeConnector[5];
-
- /*Adding host, nodeconnector, properties of edge to hostsDB for the first three nodes only*/
- for (int i = 1; i < 6; i++) {
- if (i < 4) {
- ea = new EthernetAddress(new byte[]{(byte)0x0, (byte)0x0,
- (byte)0x0, (byte)0x0,
- (byte)0x0, (byte)i});
- String stringIP = new StringBuilder().append(i).append(".").append(i+10).append(".").append(i+20).append(".").append(i+30).toString();
- ip = InetAddress.getByName(stringIP);
- h[hostCounter] = new Host(ea, ip);
- }
- else {
- h[hostCounter] = null;
- }
- hostCounter++;
- nc[i - 1] = NodeConnectorCreator.createOFNodeConnector((short)i, NodeCreator.createOFNode((long)i));
- topoManagerImpl.updateHostLink(nc[i - 1], h[i - 1], UpdateType.ADDED, props);
- }
-
- /*Get the nodes which have host connected to its nodeConnector*/
- Map<Node, Set<NodeConnector>> nodeNCmap = topoManagerImpl.getNodesWithNodeConnectorHost();
- for (int i = 1; i < 6; i++) {
- Node node = nc[i - 1].getNode();
- Set<NodeConnector> ncSet = nodeNCmap.get(nc[i - 1].getNode());
-
- Assert.assertTrue(ncSet == nodeNCmap.remove(node));
- }
-
- Assert.assertTrue(nodeNCmap.isEmpty());
- }
-}
+public class TopologyManagerImplTest {
+
+ /*
+ * Sets the node, edges and properties for edges here: Edge <SwitchId :
+ * NodeConnectorId> : <1:1>--><11:11>; <1:2>--><11:12>; <3:3>--><13:13>;
+ * <3:4>--><13:14>; <5:5>--><15:15>; <5:6>--><15:16>; Method used by two
+ * tests: testGetNodeEdges and testGetEdges
+ *
+ * @param topoManagerImpl
+ *
+ * @throws ConstructionException
+ */
+ public void setNodeEdges(TopologyManagerImpl topoManagerImpl)
+ throws ConstructionException {
+ topoManagerImpl.nonClusterObjectCreate();
+
+ State state;
+ Bandwidth bw;
+ Latency l;
+
+ Set<Property> props = new HashSet<Property>();
+ state = new State(State.EDGE_UP);
+ bw = new Bandwidth(Bandwidth.BW100Gbps);
+ l = new Latency(Latency.LATENCY100ns);
+ props.add(state);
+ props.add(bw);
+ props.add(l);
+
+ for (short i = 1; i < 6; i = (short) (i + 2)) {
+ List<TopoEdgeUpdate> topoedgeupdateList = new ArrayList<TopoEdgeUpdate>();
+ NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(
+ i, NodeCreator.createOFNode((long) i));
+ NodeConnector tailnc1 = NodeConnectorCreator
+ .createOFNodeConnector((short) (i + 10),
+ NodeCreator.createOFNode((long) (i + 10)));
+ Edge e1 = new Edge(headnc1, tailnc1);
+ TopoEdgeUpdate teu1 = new TopoEdgeUpdate(e1, props,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu1);
+
+ NodeConnector headnc2 = NodeConnectorCreator.createOFNodeConnector(
+ (short) (i + 1), headnc1.getNode());
+ NodeConnector tailnc2 = NodeConnectorCreator.createOFNodeConnector(
+ (short) (i + 11), tailnc1.getNode());
+ Edge e2 = new Edge(headnc2, tailnc2);
+ TopoEdgeUpdate teu2 = new TopoEdgeUpdate(e2, props,
+ UpdateType.ADDED);
+ topoedgeupdateList.add(teu2);
+ topoManagerImpl.edgeUpdate(topoedgeupdateList);
+
+ }
+
+ }
+
+ @Test
+ public void testGetNodeEdges() throws ConstructionException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ setNodeEdges(topoManagerImpl);
+
+ Map<Node, Set<Edge>> nodeEdgeMap = topoManagerImpl.getNodeEdges();
+ for (Iterator<Map.Entry<Node, Set<Edge>>> i = nodeEdgeMap.entrySet()
+ .iterator(); i.hasNext();) {
+ Map.Entry<Node, Set<Edge>> entry = i.next();
+ Node node = entry.getKey();
+ Long nodeId = ((Long) node.getID()).longValue();
+ Assert.assertTrue((node.getType().equals(NodeIDType.OPENFLOW)));
+
+ Set<Edge> edges = entry.getValue();
+ for (Edge edge : edges) {
+ Long headNcId = ((Short) edge.getHeadNodeConnector().getID())
+ .longValue();
+ Long tailNcId = ((Short) edge.getTailNodeConnector().getID())
+ .longValue();
+ if (nodeId == 1 || nodeId == 3 || nodeId == 5) {
+ Assert.assertTrue((headNcId.equals(nodeId) && tailNcId
+ .equals(nodeId + 10))
+ || (headNcId.equals(nodeId + 10) && tailNcId
+ .equals(nodeId))
+ || (headNcId.equals(nodeId + 1) && tailNcId
+ .equals(nodeId + 11))
+ || (headNcId.equals(nodeId + 11) && tailNcId
+ .equals(nodeId + 1)));
+ } else if (nodeId == 11 || nodeId == 13 || nodeId == 15) {
+ Assert.assertTrue((headNcId.equals(nodeId) && tailNcId
+ .equals(nodeId - 10))
+ || (headNcId.equals(nodeId) && tailNcId
+ .equals(nodeId - 10))
+ || (headNcId.equals(nodeId - 9) && tailNcId
+ .equals(nodeId + 1))
+ || (headNcId.equals(nodeId + 1) && tailNcId
+ .equals(nodeId - 9)));
+ }
+ }
+ i.remove();
+ }
+ Assert.assertTrue(nodeEdgeMap.isEmpty());
+ }
+
+ @Test
+ public void testGetEdges() throws ConstructionException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ setNodeEdges(topoManagerImpl);
+
+ Map<Edge, Set<Property>> edgeProperty = topoManagerImpl.getEdges();
+
+ for (Iterator<Map.Entry<Edge, Set<Property>>> i = edgeProperty
+ .entrySet().iterator(); i.hasNext();) {
+ Map.Entry<Edge, Set<Property>> entry = i.next();
+ Edge e = entry.getKey();
+ NodeConnector headnc = e.getHeadNodeConnector();
+ NodeConnector tailnc = e.getTailNodeConnector();
+
+ Long headNodeId = (Long) headnc.getNode().getID();
+
+ Long headNcId = ((Short) headnc.getID()).longValue();
+ Long tailNcId = ((Short) tailnc.getID()).longValue();
+
+ if (headNodeId == 1 || headNodeId == 3 || headNodeId == 5) {
+ Assert.assertTrue((headNcId.equals(headNodeId) && tailNcId
+ .equals(headNodeId + 10))
+ || (headNcId.equals(headNodeId + 10) && tailNcId
+ .equals(headNodeId))
+ || (headNcId.equals(headNodeId + 1) && tailNcId
+ .equals(headNodeId + 11))
+ || (headNcId.equals(headNodeId + 11) && tailNcId
+ .equals(headNodeId + 1)));
+ } else if (headNodeId == 11 || headNodeId == 13 || headNodeId == 15) {
+ Assert.assertTrue((headNcId.equals(headNodeId) && tailNcId
+ .equals(headNodeId - 10))
+ || (headNcId.equals(headNodeId) && tailNcId
+ .equals(headNodeId - 10))
+ || (headNcId.equals(headNodeId - 9) && tailNcId
+ .equals(headNodeId + 1))
+ || (headNcId.equals(headNodeId + 1) && tailNcId
+ .equals(headNodeId - 9)));
+ }
+
+ Set<Property> prop = entry.getValue();
+ for (Property p : prop) {
+ String pName;
+ long pValue;
+ if (p instanceof Bandwidth) {
+ Bandwidth b = (Bandwidth) p;
+ pName = Bandwidth.BandwidthPropName;
+ pValue = b.getValue();
+ Assert.assertTrue(pName.equals(p.getName())
+ && pValue == Bandwidth.BW100Gbps);
+ continue;
+ }
+ if (p instanceof Latency) {
+ Latency l = (Latency) p;
+ pName = Latency.LatencyPropName;
+ pValue = l.getValue();
+ Assert.assertTrue(pName.equals(p.getName())
+ && pValue == Latency.LATENCY100ns);
+ continue;
+ }
+ if (p instanceof State) {
+ State state = (State) p;
+ pName = State.StatePropName;
+ pValue = state.getValue();
+ Assert.assertTrue(pName.equals(p.getName())
+ && pValue == State.EDGE_UP);
+ continue;
+ }
+ }
+ i.remove();
+ }
+ Assert.assertTrue(edgeProperty.isEmpty());
+ }
+
+ @Test
+ public void testAddDeleteUserLink() {
+ TopologyUserLinkConfig link1 = new TopologyUserLinkConfig("default1",
+ "OF", "1", "OF", "2", "OF", "1", "OF", "2");
+ TopologyUserLinkConfig link2 = new TopologyUserLinkConfig("default1",
+ "OF", "10", "OF", "20", "OF", "10", "OF", "20");
+ TopologyUserLinkConfig link3 = new TopologyUserLinkConfig("default2",
+ "OF", "1", "OF", "2", "OF", "1", "OF", "2");
+ TopologyUserLinkConfig link4 = new TopologyUserLinkConfig("default20",
+ "OF", "10", "OF", "20", "OF", "10", "OF", "20");
+
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ topoManagerImpl.nonClusterObjectCreate();
+
+ Assert.assertTrue(topoManagerImpl.addUserLink(link1).isSuccess());
+ Assert.assertTrue(topoManagerImpl.addUserLink(link2).getCode() == StatusCode.CONFLICT);
+ Assert.assertTrue(topoManagerImpl.addUserLink(link3).getCode() == StatusCode.CONFLICT);
+ Assert.assertTrue(topoManagerImpl.addUserLink(link4).isSuccess());
+
+ Assert.assertTrue(topoManagerImpl.deleteUserLink(null).getCode() == StatusCode.BADREQUEST);
+ Assert.assertTrue(topoManagerImpl.deleteUserLink(link1.getName())
+ .isSuccess());
+ Assert.assertTrue(topoManagerImpl.deleteUserLink(link4.getName())
+ .isSuccess());
+ Assert.assertTrue(topoManagerImpl.getUserLinks().isEmpty());
+
+ }
+
+ @Test
+ public void testGetUserLink() {
+ TopologyUserLinkConfig[] link = new TopologyUserLinkConfig[5];
+ TopologyUserLinkConfig[] reverseLink = new TopologyUserLinkConfig[5];
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ topoManagerImpl.nonClusterObjectCreate();
+
+ String name = null;
+ String srcNodeIDType = null;
+ String srcSwitchId = null;
+ String srcNodeConnectorIDType = null;
+ String srcPort = null;
+ String dstNodeIDType = null;
+ String dstSwitchId = null;
+ String dstNodeConnectorIDType = null;
+ String dstPort = null;
+
+ /* Creating userlinks and checking for their validity */
+ link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
+ srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
+ dstNodeConnectorIDType, dstPort);
+ Assert.assertTrue(link[0].isValid() == false);
+
+ srcSwitchId = "1";
+ link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
+ srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
+ dstNodeConnectorIDType, dstPort);
+ Assert.assertTrue(link[0].isValid() == false);
+ dstSwitchId = "2";
+ link[0] = new TopologyUserLinkConfig(name, srcNodeIDType, srcSwitchId,
+ srcNodeConnectorIDType, srcPort, dstNodeIDType, dstSwitchId,
+ dstNodeConnectorIDType, dstPort);
+ Assert.assertTrue(link[0].isValid() == false);
+
+ Integer i;
+
+ for (i = 0; i < 5; i++) {
+ link[i] = new TopologyUserLinkConfig(name, srcNodeIDType,
+ srcSwitchId, srcNodeConnectorIDType, srcPort,
+ dstNodeIDType, dstSwitchId, dstNodeConnectorIDType, dstPort);
+
+ name = Integer.toString(i + 1);
+ srcSwitchId = Integer.toString(i + 1);
+ srcPort = Integer.toString(i + 1);
+ dstSwitchId = Integer.toString((i + 1) * 10);
+ dstPort = Integer.toString((i + 1) * 10);
+
+ link[i].setName(name);
+ link[i].setSrcSwitchId(srcSwitchId);
+ link[i].setSrcPort(srcPort);
+ link[i].setDstSwitchId(dstSwitchId);
+ link[i].setDstPort(dstPort);
+
+ Assert.assertTrue(link[i].isValid() == false);
+
+ link[i].setSrcNodeIDType("OF");
+ link[i].setSrcNodeConnectorIDType("OF");
+
+ Assert.assertTrue(link[i].isValid() == false);
+
+ link[i].setDstNodeIDType("OF");
+ link[i].setDstNodeConnectorIDType("OF");
+
+ Assert.assertTrue(link[i].isValid() == true);
+
+ reverseLink[i] = new TopologyUserLinkConfig(name, dstNodeIDType,
+ dstSwitchId, dstNodeConnectorIDType, dstPort,
+ srcNodeIDType, srcSwitchId, srcNodeConnectorIDType, srcPort);
+
+ topoManagerImpl.addUserLink(link[i]);
+ }
+ ConcurrentMap<String, TopologyUserLinkConfig> userLinks = topoManagerImpl
+ .getUserLinks();
+ TopologyUserLinkConfig resultLink;
+
+ for (i = 0; i < 5; i++) {
+ resultLink = userLinks.get(((Integer) (i + 1)).toString());
+
+ Assert.assertTrue(resultLink.getName().equals(
+ reverseLink[i].getName()));
+ Assert.assertTrue(resultLink.getDstSwitchId().equals(
+ reverseLink[i].getSrcSwitchId()));
+ Assert.assertTrue(resultLink.getDstPort().equals(
+ reverseLink[i].getSrcPort()));
+ Assert.assertTrue(resultLink.getSrcSwitchId().equals(
+ reverseLink[i].getDstSwitchId()));
+ Assert.assertTrue(resultLink.getSrcPort().equals(
+ reverseLink[i].getDstPort()));
+ }
+ }
+
+ @Test
+ public void testHostLinkMethods() throws ConstructionException,
+ UnknownHostException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ topoManagerImpl.nonClusterObjectCreate();
+ int hostCounter = 0;
+
+ State state;
+ Bandwidth bw;
+ Latency l;
+ Set<Property> props = new HashSet<Property>();
+ state = new State(State.EDGE_UP);
+ bw = new Bandwidth(Bandwidth.BW100Gbps);
+ l = new Latency(Latency.LATENCY100ns);
+ props.add(state);
+ props.add(bw);
+ props.add(l);
+
+ EthernetAddress ea;
+ InetAddress ip;
+ Host[] h = new Host[5];
+ NodeConnector[] nc = new NodeConnector[5];
+
+ /*
+ * Adding host, nodeConnector to hostsDB for the i = 0,1,2,3. No host
+ * added for i = 4
+ */
+ for (int i = 0; i < 5; i++) {
+ if (hostCounter < 4) {
+ ea = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) i });
+ String stringIP = new StringBuilder().append(i + 1).append(".")
+ .append(i + 10).append(".").append(i + 20).append(".")
+ .append(i + 30).toString();
+ ip = InetAddress.getByName(stringIP);
+ h[hostCounter] = new Host(ea, ip);
+ } else {
+ h[hostCounter] = null;
+ }
+ hostCounter++;
+ nc[i] = NodeConnectorCreator.createOFNodeConnector((short) (i + 1),
+ NodeCreator.createOFNode((long) (i + 1)));
+ topoManagerImpl
+ .updateHostLink(nc[i], h[i], UpdateType.ADDED, props);
+ }
+
+ for (int i = 0; i < 5; i++) {
+ Host host = topoManagerImpl.getHostAttachedToNodeConnector(nc[i]);
+ if (i == 4)
+ Assert.assertTrue(host == null);
+ else
+ Assert.assertTrue(host.equals(h[i]));
+ }
+
+ Set<NodeConnector> ncSet = topoManagerImpl.getNodeConnectorWithHost();
+ for (int i = 0; i < 5; i++) {
+ Assert.assertTrue(ncSet.remove(nc[i]));
+ }
+ Assert.assertTrue(ncSet.isEmpty());
+ }
+
+ @Test
+ public void testGetNodesWithNodeConnectorHost()
+ throws ConstructionException, UnknownHostException {
+ TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+ topoManagerImpl.nonClusterObjectCreate();
+ int hostCounter = 0;
+
+ State state;
+ Bandwidth bw;
+ Latency l;
+ Set<Property> props = new HashSet<Property>();
+ state = new State(State.EDGE_UP);
+ bw = new Bandwidth(Bandwidth.BW100Gbps);
+ l = new Latency(Latency.LATENCY100ns);
+ props.add(state);
+ props.add(bw);
+ props.add(l);
+
+ EthernetAddress ea;
+ InetAddress ip;
+ Host[] h = new Host[5];
+ NodeConnector[] nc = new NodeConnector[5];
+
+ /*
+ * Adding host, nodeconnector, properties of edge to hostsDB for the
+ * first three nodes only
+ */
+ for (int i = 1; i < 6; i++) {
+ if (i < 4) {
+ ea = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) i });
+ String stringIP = new StringBuilder().append(i).append(".")
+ .append(i + 10).append(".").append(i + 20).append(".")
+ .append(i + 30).toString();
+ ip = InetAddress.getByName(stringIP);
+ h[hostCounter] = new Host(ea, ip);
+ } else {
+ h[hostCounter] = null;
+ }
+ hostCounter++;
+ nc[i - 1] = NodeConnectorCreator.createOFNodeConnector((short) i,
+ NodeCreator.createOFNode((long) i));
+ topoManagerImpl.updateHostLink(nc[i - 1], h[i - 1],
+ UpdateType.ADDED, props);
+ }
+
+ /* Get the nodes which have host connected to its nodeConnector */
+ Map<Node, Set<NodeConnector>> nodeNCmap = topoManagerImpl
+ .getNodesWithNodeConnectorHost();
+ for (int i = 1; i < 6; i++) {
+ Node node = nc[i - 1].getNode();
+ Set<NodeConnector> ncSet = nodeNCmap.get(nc[i - 1].getNode());
+
+ Assert.assertTrue(ncSet == nodeNCmap.remove(node));
+ }
+
+ Assert.assertTrue(nodeNCmap.isEmpty());
+ }
+}
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>