Merge "Package names for enclosed Types of TOs were changed to fully qualified. Fully...
authorEd Warnicke <eaw@cisco.com>
Sun, 21 Jul 2013 05:51:17 +0000 (05:51 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Sun, 21 Jul 2013 05:51:17 +0000 (05:51 +0000)
15 files changed:
opendaylight/arphandler/src/main/java/org/opendaylight/controller/arphandler/internal/ArpHandler.java
opendaylight/clustering/services_implementation/src/main/java/org/opendaylight/controller/clustering/services_implementation/internal/Activator.java
opendaylight/clustering/services_implementation/src/main/java/org/opendaylight/controller/clustering/services_implementation/internal/ClusterManager.java
opendaylight/distribution/opendaylight/src/main/resources/configuration/config.ini
opendaylight/forwarding/staticrouting/src/main/java/org/opendaylight/controller/forwarding/staticrouting/internal/StaticRoutingImplementation.java
opendaylight/hosttracker/implementation/src/main/java/org/opendaylight/controller/hosttracker/internal/HostTracker.java
opendaylight/northbound/subnets/src/main/java/org/opendaylight/controller/subnets/northbound/SubnetsNorthboundJAXRS.java
opendaylight/protocol_plugins/openflow/src/main/java/org/opendaylight/controller/protocol_plugin/openflow/internal/DiscoveryService.java
opendaylight/protocol_plugins/openflow/src/main/java/org/opendaylight/controller/protocol_plugin/openflow/internal/OFStatisticsManager.java
opendaylight/sal/api/src/main/java/org/opendaylight/controller/sal/utils/ObjectWriter.java
opendaylight/samples/simpleforwarding/pom.xml
opendaylight/switchmanager/implementation/src/main/java/org/opendaylight/controller/switchmanager/internal/SwitchManagerImpl.java
opendaylight/topologymanager/src/main/java/org/opendaylight/controller/topologymanager/TopologyUserLinkConfig.java
opendaylight/topologymanager/src/main/java/org/opendaylight/controller/topologymanager/internal/Activator.java
opendaylight/topologymanager/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java

index 6e625a0eb7ff963b02c13b7c89b36f0045f4807f..811c7aca8370525d13bcabd2ef46d898bcc4a88f 100644 (file)
@@ -21,9 +21,7 @@ import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import java.util.concurrent.ConcurrentMap;
 
 import org.opendaylight.controller.hosttracker.IfHostListener;
 import org.opendaylight.controller.hosttracker.IfIptoHost;
@@ -47,6 +45,8 @@ import org.opendaylight.controller.sal.utils.NetUtils;
 import org.opendaylight.controller.switchmanager.ISwitchManager;
 import org.opendaylight.controller.switchmanager.Subnet;
 import org.opendaylight.controller.topologymanager.ITopologyManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ArpHandler implements IHostFinder, IListenDataPacket {
     private static final Logger logger = LoggerFactory
@@ -57,8 +57,8 @@ public class ArpHandler implements IHostFinder, IListenDataPacket {
     private IDataPacketService dataPacketService = null;
     private Set<IfHostListener> hostListener = Collections
             .synchronizedSet(new HashSet<IfHostListener>());
-    private ConcurrentHashMap<InetAddress, Set<HostNodeConnector>> arpRequestors;
-    private ConcurrentHashMap<InetAddress, Short> countDownTimers;
+    private ConcurrentMap<InetAddress, Set<HostNodeConnector>> arpRequestors;
+    private ConcurrentMap<InetAddress, Short> countDownTimers;
     private Timer periodicTimer;
 
     void setHostListener(IfHostListener s) {
index cb96ad5c8ece76fab65b518f9ddea8eb4b67c374..5a39e9ec4f6d3c1501d04a74869e41b7c2c8b36b 100644 (file)
@@ -10,6 +10,7 @@
 package org.opendaylight.controller.clustering.services_implementation.internal;
 
 import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
+import org.opendaylight.controller.sal.core.IContainerAware;
 
 import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
 import org.opendaylight.controller.clustering.services.IClusterContainerServices;
@@ -30,6 +31,7 @@ public class Activator extends ComponentActivatorAbstractBase {
      * ComponentActivatorAbstractBase.
      *
      */
+    @Override
     public void init() {
     }
 
@@ -38,6 +40,7 @@ public class Activator extends ComponentActivatorAbstractBase {
      * cleanup done by ComponentActivatorAbstractBase
      *
      */
+    @Override
     public void destroy() {
     }
 
@@ -50,6 +53,7 @@ public class Activator extends ComponentActivatorAbstractBase {
      * instantiated in order to get an fully working implementation
      * Object
      */
+    @Override
     public Object[] getGlobalImplementations() {
         Object[] res = { ClusterManager.class, ClusterGlobalManager.class };
         return res;
@@ -64,6 +68,7 @@ public class Activator extends ComponentActivatorAbstractBase {
      * instantiated in order to get an fully working implementation
      * Object
      */
+    @Override
     public Object[] getImplementations() {
         Object[] res = { ClusterContainerManager.class };
         return res;
@@ -82,6 +87,7 @@ public class Activator extends ComponentActivatorAbstractBase {
      * also optional per-container different behavior if needed, usually
      * should not be the case though.
      */
+    @Override
     public void configureInstance(Component c, Object imp, String containerName) {
         if (imp.equals(ClusterContainerManager.class)) {
             c.setInterface(new String[] { IClusterContainerServices.class.getName() },
@@ -118,10 +124,11 @@ public class Activator extends ComponentActivatorAbstractBase {
      * needed as long as the same routine can configure multiple
      * implementations
      */
+    @Override
     public void configureGlobalInstance(Component c, Object imp) {
         if (imp.equals(ClusterManager.class)) {
             // export the service for Apps and Plugins
-            c.setInterface(new String[] { IClusterServices.class.getName() }, null);
+            c.setInterface(new String[] { IClusterServices.class.getName(), IContainerAware.class.getName() }, null);
         }
 
         if (imp.equals(ClusterGlobalManager.class)) {
index 0b2610797fe507f210bf09af989fa4ee643c21ab..c3fd30ae9b65629c8e83bb7f789229b0b6bc5870 100644 (file)
@@ -9,8 +9,6 @@
 
 package org.opendaylight.controller.clustering.services_implementation.internal;
 
-import java.io.PrintWriter;
-import java.io.StringWriter;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
 import java.net.SocketException;
@@ -54,10 +52,11 @@ import org.opendaylight.controller.clustering.services.IClusterServices;
 import org.opendaylight.controller.clustering.services.IGetUpdates;
 import org.opendaylight.controller.clustering.services.IListenRoleChange;
 import org.opendaylight.controller.clustering.services.ListenRoleChangeAddException;
+import org.opendaylight.controller.sal.core.IContainerAware;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class ClusterManager implements IClusterServices {
+public class ClusterManager implements IClusterServices, IContainerAware {
     protected static final Logger logger = LoggerFactory
             .getLogger(ClusterManager.class);
     private DefaultCacheManager cm;
@@ -99,10 +98,10 @@ public class ClusterManager implements IClusterServices {
             try {
                 Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
                 while (e.hasMoreElements()) {
-                    NetworkInterface n = (NetworkInterface) e.nextElement();
+                    NetworkInterface n = e.nextElement();
                     Enumeration<InetAddress> ee = n.getInetAddresses();
                     while (ee.hasMoreElements()) {
-                        InetAddress i = (InetAddress) ee.nextElement();
+                        InetAddress i = ee.nextElement();
                         myAddresses.add(i);
                     }
                 }
@@ -562,25 +561,29 @@ public class ClusterManager implements IClusterServices {
         return null;
     }
 
+    @Override
     public List<InetAddress> getClusteredControllers() {
         EmbeddedCacheManager manager = this.cm;
         if (manager == null) {
             return null;
         }
         List<Address> controllers = manager.getMembers();
-        if ((controllers == null) || controllers.size() == 0)
+        if ((controllers == null) || controllers.size() == 0) {
             return null;
+        }
 
         List<InetAddress> clusteredControllers = new ArrayList<InetAddress>();
         for (Address a : controllers) {
             InetAddress inetAddress = addressToInetAddress(a);
             if (inetAddress != null
-                    && !inetAddress.getHostAddress().equals(loopbackAddress))
+                    && !inetAddress.getHostAddress().equals(loopbackAddress)) {
                 clusteredControllers.add(inetAddress);
+            }
         }
         return clusteredControllers;
     }
 
+    @Override
     public InetAddress getMyAddress() {
         EmbeddedCacheManager manager = this.cm;
         if (manager == null) {
@@ -660,4 +663,21 @@ public class ClusterManager implements IClusterServices {
             }
         }
     }
+
+    private void removeContainerCaches(String containerName) {
+        logger.info("Destroying caches for container {}", containerName);
+        for (String cacheName : this.getCacheList(containerName)) {
+            this.destroyCache(containerName, cacheName);
+        }
+    }
+
+    @Override
+    public void containerCreate(String arg0) {
+        // no op
+    }
+
+    @Override
+    public void containerDestroy(String container) {
+        removeContainerCaches(container);
+    }
 }
index 6e8a3555234ef8cd668a048fd299eddac823656e..b99b4bfdcaf40aefc2aae1daa0c340f97e96d218 100644 (file)
@@ -50,6 +50,8 @@ org.eclipse.gemini.web.tomcat.config.path=configuration/tomcat-server.xml
 # of.messageResponseTimer=2000
 # The switch liveness timeout value (default 60500 msec)
 # of.switchLivenessTimeout=60500
+# The size of the queue holding pending statistics requests (default 64). For large networks of n switches, it is recommended to set the queue size to n
+# of.statsQueueSize = 64
 # The flow statistics polling interval in second (default 10 sec)
 # of.flowStatsPollInterval=10
 # The port statistics polling interval in second (default 5 sec)
@@ -64,8 +66,8 @@ org.eclipse.gemini.web.tomcat.config.path=configuration/tomcat-server.xml
 # of.discoveryInterval=300
 # The timeout multiple of discovery interval
 # of.discoveryTimeoutMultiple=2
-# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 10 sec)
-# of.discoveryThreshold=10
+# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 30 sec)
+# of.discoveryThreshold=30
 # The maximum number of ports handled in one discovery batch (default 1024)
 # of.discoveryBatchMaxPorts=1024
 
index 8819be39885d950e9f6aafc7ed4ad737247a2037..4afd4fb8e444136562b2d59fff79728c74bc763f 100644 (file)
@@ -44,12 +44,12 @@ import org.opendaylight.controller.forwarding.staticrouting.StaticRouteConfig;
 import org.opendaylight.controller.hosttracker.IfIptoHost;
 import org.opendaylight.controller.hosttracker.IfNewHostNotify;
 import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
-import org.opendaylight.controller.sal.utils.StatusCode;
 import org.opendaylight.controller.sal.utils.GlobalConstants;
 import org.opendaylight.controller.sal.utils.IObjectReader;
 import org.opendaylight.controller.sal.utils.ObjectReader;
 import org.opendaylight.controller.sal.utils.ObjectWriter;
 import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -206,22 +206,6 @@ public class StaticRoutingImplementation implements IfNewHostNotify,
         }
     }
 
-    @SuppressWarnings("deprecation")
-        private void destroyCaches() {
-        if (this.clusterContainerService == null) {
-            log
-                    .info("un-initialized clusterContainerService, can't destroy cache");
-            return;
-        }
-
-        clusterContainerService.destroyCache("forwarding.staticrouting.routes");
-        clusterContainerService
-                .destroyCache("forwarding.staticrouting.configs");
-        clusterContainerService
-                .destroyCache("forwarding.staticrouting.configSaveEvent");
-
-    }
-
     @Override
     public void entryCreated(Long key, String cacheName, boolean local) {
     }
@@ -488,7 +472,6 @@ public class StaticRoutingImplementation implements IfNewHostNotify,
         log.debug("Destroy all the Static Routing Rules given we are "
                 + "shutting down");
 
-        destroyCaches();
         gatewayProbeTimer.cancel();
 
         // Clear the listener so to be ready in next life
index 8468c5b284aee7230d2264974f33e3d428f07127..6b7bac03e9bdcfc472ad3dce7cc2c10d1f8dd096 100644 (file)
@@ -212,17 +212,6 @@ public class HostTracker implements IfIptoHost, IfHostListener, ISwitchManagerAw
         inactiveStaticHosts = new ConcurrentHashMap<NodeConnector, HostNodeConnector>();
     }
 
-    @SuppressWarnings("deprecation")
-    private void destroyCache() {
-        if (this.clusterContainerService == null) {
-            logger.error("un-initialized clusterMger, can't destroy cache");
-            return;
-        }
-        logger.debug("Destroying Cache for HostTracker");
-        this.clusterContainerService.destroyCache("hostTrackerAH");
-        this.clusterContainerService.destroyCache("hostTrackerIH");
-        nonClusterObjectCreate();
-    }
 
     public void shutDown() {
     }
@@ -1343,7 +1332,6 @@ public class HostTracker implements IfIptoHost, IfHostListener, ISwitchManagerAw
      *
      */
     void destroy() {
-        destroyCache();
     }
 
     /**
index 6869fd5fbea37595c33eec0d2f04c950db5e754a..e3c2189bf91550bd7c8d35526ec0d75413a88a6a 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.subnets.northbound;
 
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import javax.ws.rs.Consumes;
@@ -31,7 +32,6 @@ import org.codehaus.enunciate.jaxrs.TypeHint;
 import org.opendaylight.controller.northbound.commons.RestMessages;
 import org.opendaylight.controller.northbound.commons.exception.InternalServerErrorException;
 import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
-import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
 import org.opendaylight.controller.northbound.commons.exception.UnauthorizedException;
 import org.opendaylight.controller.northbound.commons.utils.NorthboundUtils;
 import org.opendaylight.controller.sal.authorization.Privilege;
@@ -44,8 +44,7 @@ import org.slf4j.LoggerFactory;
 
 @Path("/")
 public class SubnetsNorthboundJAXRS {
-    protected static final Logger logger = LoggerFactory
-            .getLogger(SubnetsNorthboundJAXRS.class);
+    protected static final Logger logger = LoggerFactory.getLogger(SubnetsNorthboundJAXRS.class);
 
     private String username;
 
@@ -71,20 +70,15 @@ public class SubnetsNorthboundJAXRS {
     @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
     @StatusCodes({ @ResponseCode(code = 404, condition = "The containerName passed was not found") })
     @TypeHint(SubnetConfigs.class)
-    public SubnetConfigs listSubnets(
-            @PathParam("containerName") String containerName) {
-        if (!NorthboundUtils.isAuthorized(
-                getUserName(), containerName, Privilege.READ, this)) {
-            throw new UnauthorizedException(
-                    "User is not authorized to perform this operation on container "
-                            + containerName);
+    public SubnetConfigs listSubnets(@PathParam("containerName") String containerName) {
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.READ, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
         }
         ISwitchManager switchManager = null;
-        switchManager = (ISwitchManager) ServiceHelper.getInstance(
-                ISwitchManager.class, containerName, this);
+        switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
         if (switchManager == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.NOCONTAINER.toString());
+            throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
         }
         return new SubnetConfigs(switchManager.getSubnetsConfigList());
     }
@@ -103,30 +97,25 @@ public class SubnetsNorthboundJAXRS {
     @GET
     @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
     @StatusCodes({
-            @ResponseCode(code = 404, condition = "The containerName passed was not found"),
-            @ResponseCode(code = 404, condition = "Subnet does not exist") })
-    @TypeHint(SubnetConfig.class)
+        @ResponseCode(code = 404, condition = "The containerName passed was not found"),
+        @ResponseCode(code = 404, condition = "Subnet does not exist") })
+        @TypeHint(SubnetConfig.class)
     public SubnetConfig listSubnet(
-            @PathParam("containerName") String containerName,
-            @PathParam("subnetName") String subnetName) {
+        @PathParam("containerName") String containerName,
+        @PathParam("subnetName") String subnetName) {
 
-        if (!NorthboundUtils.isAuthorized(
-                getUserName(), containerName, Privilege.READ, this)) {
-            throw new UnauthorizedException(
-                    "User is not authorized to perform this operation on container "
-                            + containerName);
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.READ, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
         }
         ISwitchManager switchManager = null;
-        switchManager = (ISwitchManager) ServiceHelper.getInstance(
-                ISwitchManager.class, containerName, this);
+        switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
         if (switchManager == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.NOCONTAINER.toString());
+            throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
         }
         SubnetConfig res = switchManager.getSubnetConfig(subnetName);
         if (res == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.NOSUBNET.toString());
+            throw new ResourceNotFoundException(RestMessages.NOSUBNET.toString());
         } else {
             return res;
         }
@@ -147,37 +136,30 @@ public class SubnetsNorthboundJAXRS {
     @Path("/{containerName}/{subnetName}")
     @POST
     @StatusCodes({
-            @ResponseCode(code = 404, condition = "Invalid Data passed"),
-            @ResponseCode(code = 201, condition = "Subnet added"),
-            @ResponseCode(code = 500, condition = "Addition of subnet failed") })
-    public Response addSubnet(@PathParam("containerName") String containerName,
-            @PathParam("subnetName") String subnetName,
-            @QueryParam("subnet") String subnet) {
-
-        if (!NorthboundUtils.isAuthorized(
-                getUserName(), containerName, Privilege.WRITE, this)) {
-            throw new UnauthorizedException(
-                    "User is not authorized to perform this operation on container "
-                            + containerName);
+        @ResponseCode(code = 404, condition = "Invalid Data passed"),
+        @ResponseCode(code = 201, condition = "Subnet added"),
+        @ResponseCode(code = 500, condition = "Addition of subnet failed") })
+    public Response addSubnet(
+        @PathParam("containerName") String containerName,
+        @PathParam("subnetName") String subnetName, @QueryParam("subnet") String subnet) {
+
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
         }
         if (subnetName == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.INVALIDDATA.toString());
+            throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
         }
         if (subnet == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.INVALIDDATA.toString());
+            throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
         }
         ISwitchManager switchManager = null;
-        switchManager = (ISwitchManager) ServiceHelper.getInstance(
-                ISwitchManager.class, containerName, this);
+        switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
         if (switchManager == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.NOCONTAINER.toString());
+            throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
         }
 
-        SubnetConfig cfgObject = new SubnetConfig(subnetName, subnet,
-                new ArrayList<String>(0));
+        SubnetConfig cfgObject = new SubnetConfig(subnetName, subnet, new ArrayList<String>(0));
         Status status = switchManager.addSubnet(cfgObject);
         if (status.isSuccess()) {
             return Response.status(Response.Status.CREATED).build();
@@ -197,29 +179,24 @@ public class SubnetsNorthboundJAXRS {
     @Path("/{containerName}/{subnetName}")
     @DELETE
     @StatusCodes({
-            @ResponseCode(code = 404, condition = "The containerName passed was not found"),
-            @ResponseCode(code = 500, condition = "Removal of subnet failed") })
+        @ResponseCode(code = 404, condition = "The containerName passed was not found"),
+        @ResponseCode(code = 500, condition = "Removal of subnet failed") })
     public Response removeSubnet(
-            @PathParam("containerName") String containerName,
-            @PathParam("subnetName") String subnetName) {
+        @PathParam("containerName") String containerName,
+        @PathParam("subnetName") String subnetName) {
         if (subnetName == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.INVALIDDATA.toString());
+            throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
         }
 
-        if (!NorthboundUtils.isAuthorized(
-                getUserName(), containerName, Privilege.WRITE, this)) {
-            throw new UnauthorizedException(
-                    "User is not authorized to perform this operation on container "
-                            + containerName);
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
         }
 
         ISwitchManager switchManager = null;
-        switchManager = (ISwitchManager) ServiceHelper.getInstance(
-                ISwitchManager.class, containerName, this);
+        switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
         if (switchManager == null) {
-            throw new ResourceNotFoundException(
-                    RestMessages.NOCONTAINER.toString());
+            throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
         }
         Status status = switchManager.removeSubnet(subnetName);
         if (status.isSuccess()) {
@@ -244,22 +221,21 @@ public class SubnetsNorthboundJAXRS {
     @POST
     @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
     @StatusCodes({
-            @ResponseCode(code = 202, condition = "Operation successful"),
-            @ResponseCode(code = 400, condition = "Invalid request, i.e., requested changing the subnet name"),
-            @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
-            @ResponseCode(code = 500, condition = "Internal server error")})
+        @ResponseCode(code = 202, condition = "Operation successful"),
+        @ResponseCode(code = 400, condition = "Invalid request, i.e., requested changing the subnet name"),
+        @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+        @ResponseCode(code = 500, condition = "Internal server error") })
     public Response modifySubnet(@PathParam("containerName") String containerName,
-                                 @PathParam("subnetName") String name,
-                                 @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+        @PathParam("subnetName") String name,
+        @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
 
-        if (!NorthboundUtils.isAuthorized(getUserName(), containerName,
-                                          Privilege.WRITE, this)) {
-            throw new UnauthorizedException(
-                "User is not authorized to perform this operation on container " + containerName);
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
         }
 
-        ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class,
-                                                                                  containerName, this);
+        ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName,
+                this);
         if (switchManager == null) {
             throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
         }
@@ -271,96 +247,171 @@ public class SubnetsNorthboundJAXRS {
 
         // make sure that the name matches an existing subnet and we're not
         // changing the name or subnet IP/mask
-        if (existingConf == null){
+        if (existingConf == null) {
             // don't have a subnet by that name
             return Response.status(Response.Status.NOT_FOUND).build();
 
-        }else if( !existingConf.getName().equals(subnetConf.getName())
-            || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
+        } else if (!existingConf.getName().equals(subnetConf.getName())
+                || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
             // can't change the name of a subnet
             return Response.status(Response.Status.BAD_REQUEST).build();
 
-        }else{
+        } else {
             // create a set for fast lookups
             Set<String> newPorts = new HashSet<String>(subnetConf.getNodePorts());
 
             // go through the current ports and (1) remove ports that aren't
             // there anymore and (2) remove ports that are still there from the
             // set of ports to add
-            for(String s : existingConf.getNodePorts()){
-                if(newPorts.contains(s)){
+            for (String s : existingConf.getNodePorts()) {
+                if (newPorts.contains(s)) {
                     newPorts.remove(s);
-                }else{
+                } else {
                     Status st = switchManager.removePortsFromSubnet(name, s);
                     successful = successful && st.isSuccess();
                 }
             }
 
             // add any remaining ports
-            for(String s : newPorts){
+            for (String s : newPorts) {
                 Status st = switchManager.addPortsToSubnet(name, s);
                 successful = successful && st.isSuccess();
             }
         }
 
-        if(successful){
+        if (successful) {
             return Response.status(Response.Status.ACCEPTED).build();
-        }else{
+        } else {
             return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
         }
     }
 
-    /*
-     *
-     * Add or remove switch ports to a subnet POST subnets/green/sw
+    /**
      *
-     * @param model
+     * Add ports to a subnet
      *
      * @param containerName
-     *
+     *            Name of the Container
      * @param name
+     *            Name of the SubnetConfig to be modified
+     * @param subnetConfigData
+     *            the {@link SubnetConfig} structure in JSON passed as a POST
+     *            parameter
+     * @return If the operation is successful or not
+     */
+    @Path("/{containerName}/{subnetName}/add")
+    @POST
+    @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+    @StatusCodes({
+        @ResponseCode(code = 202, condition = "Operation successful"),
+        @ResponseCode(code = 400, condition = "Invalid request"),
+        @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+        @ResponseCode(code = 500, condition = "Internal server error") })
+    public Response addNodePorts(
+            @PathParam("containerName") String containerName,
+            @PathParam("subnetName") String name,
+            @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+
+        SubnetConfig subnetConf = subnetConfigData.getValue();
+        return addOrDeletePorts(containerName, name, subnetConf, "add");
+    }
+
+    /**
      *
-     * @param subnet: the subnet name name
-     *
-     * @param switchports: datapath ID/port list =>
-     * xx:xx:xx:xx:xx:xx:xx:xx/a,b,c-m,r-t,y
-     *
-     * @return
-     *
-     * @RequestMapping(value = "/{containerName}/{name}", method =
-     * RequestMethod.POST)
-     *
-     * public View addSwitchports(Map<String, Object> model,
-     *
-     * @PathVariable(value = "containerName") String containerName,
-     *
-     * @PathVariable(value = "name") String name,
-     *
-     * @RequestParam(value = "nodeports") String nodePorts,
-     *
-     * @RequestParam(value = "action") String action) {
-     *
-     * checkDefaultDisabled(containerName); ISwitchManager switchManager = null;
-     * try { BundleContext bCtx = FrameworkUtil.getBundle(this.getClass())
-     * .getBundleContext();
-     *
-     * ServiceReference[] services = bCtx.getServiceReferences(
-     * ISwitchManager.class.getName(), "(containerName=" + containerName + ")");
-     *
-     * if (services != null) { switchManager = (ISwitchManager)
-     * bCtx.getService(services[0]); logger.debug("Switch manager reference is:"
-     * + switchManager); } } catch (Exception e) {
-     * logger.error("Switch Manager reference is NULL"); }
-     *
-     * checkContainerExists(switchManager);
-     *
-     * String ret; if (action.equals("add")) { ret =
-     * switchManager.addPortsToSubnet(name, nodePorts); } else if
-     * (action.equals("remove")) { ret =
-     * switchManager.removePortsFromSubnet(name, nodePorts); } else { throw new
-     * UnsupportedMediaTypeException(RestMessages.UNKNOWNACTION .toString() +
-     * ": " + action); }
+     * Delete ports from a subnet
      *
-     * return returnViewOrThrowConflicEx(model, ret); }
+     * @param containerName
+     *            Name of the Container
+     * @param name
+     *            Name of the SubnetConfig to be modified
+     * @param subnetConfigData
+     *            the {@link SubnetConfig} structure in JSON passed as a POST
+     *            parameter
+     * @return If the operation is successful or not
      */
+    @Path("/{containerName}/{subnetName}/delete")
+    @POST
+    @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+    @StatusCodes({
+        @ResponseCode(code = 202, condition = "Operation successful"),
+        @ResponseCode(code = 400, condition = "Invalid request"),
+        @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+        @ResponseCode(code = 500, condition = "Internal server error") })
+    public Response deleteNodePorts(
+            @PathParam("containerName") String containerName,
+            @PathParam("subnetName") String name,
+            @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+
+        SubnetConfig subnetConf = subnetConfigData.getValue();
+        return addOrDeletePorts(containerName, name, subnetConf, "delete");
+    }
+
+    /**
+    *
+    * Add/Delete ports to/from a subnet
+    *
+    * @param containerName
+    *            Name of the Container
+    * @param name
+    *            Name of the SubnetConfig to be modified
+    * @param subnetConfig
+    *            the {@link SubnetConfig} structure
+    * @param action
+    *            add or delete
+    * @return If the operation is successful or not
+    */
+    private Response addOrDeletePorts(String containerName, String name, SubnetConfig subnetConf, String action) {
+
+        if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+            throw new UnauthorizedException("User is not authorized to perform this operation on container "
+                    + containerName);
+        }
+
+        ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName,
+                this);
+        if (switchManager == null) {
+            throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
+        }
+
+        SubnetConfig existingConf = switchManager.getSubnetConfig(name);
+
+        // make sure that the name matches an existing subnet and we're not
+        // changing the name or subnet IP/mask
+        if (existingConf == null) {
+            // don't have a subnet by that name
+            return Response.status(Response.Status.NOT_FOUND).build();
+        } else if (!existingConf.getName().equals(subnetConf.getName())
+                || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
+            // can't change the name of a subnet
+            return Response.status(Response.Status.BAD_REQUEST).build();
+        } else {
+            Status st;
+            boolean successful = true;
+            List<String> ports = subnetConf.getNodePorts();
+
+            if (action.equals("add")) {
+                // add new ports
+                ports.removeAll(existingConf.getNodePorts());
+                for (String port : ports) {
+                    st = switchManager.addPortsToSubnet(name, port);
+                    successful = successful && st.isSuccess();
+                }
+            } else if (action.equals("delete")) {
+                // delete existing ports
+                ports.retainAll(existingConf.getNodePorts());
+                for (String port : ports) {
+                    st = switchManager.removePortsFromSubnet(name, port);
+                    successful = successful && st.isSuccess();
+                }
+            } else {
+                return Response.status(Response.Status.BAD_REQUEST).build();
+            }
+
+            if (successful) {
+                return Response.status(Response.Status.ACCEPTED).build();
+            } else {
+                return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
+            }
+        }
+    }
 }
index ebd5a68341b9caa5cc3166f415c3d0ed212d8bf4..b3cfde9c146983b4ac44f115178d5af16da606a6 100644 (file)
@@ -167,7 +167,7 @@ public class DiscoveryService implements IInventoryShimExternalListener, IDataPa
     public enum DiscoveryPeriod {
         INTERVAL        (300),
         AGEOUT          (120),
-        THRESHOLD       (10);
+        THRESHOLD       (30);
 
         private int time;   // sec
         private int tick;   // tick
index 3c02c1762875eceb1ba226f8b333584af9b852bf..bae49d511a2f853f61d428f0751ea7b231539831 100644 (file)
@@ -155,6 +155,20 @@ IInventoryShimExternalListener, CommandProvider {
         }
     }
 
+    private short getStatsQueueSize() {
+        String statsQueueSizeStr = System.getProperty("of.statsQueueSize");
+        short statsQueueSize = INITIAL_SIZE;
+        if (statsQueueSizeStr != null) {
+            try {
+                statsQueueSize = Short.parseShort(statsQueueSizeStr);
+                if (statsQueueSize <= 0) {
+                    statsQueueSize = INITIAL_SIZE;
+                }
+            } catch (Exception e) {
+            }
+        }
+        return statsQueueSize;
+    }
     /**
      * Function called by the dependency manager when all the required
      * dependencies are satisfied
@@ -166,8 +180,8 @@ IInventoryShimExternalListener, CommandProvider {
         portStatistics = new ConcurrentHashMap<Long, List<OFStatistics>>();
         tableStatistics = new ConcurrentHashMap<Long, List<OFStatistics>>();
         dummyList = new ArrayList<OFStatistics>(1);
+        pendingStatsRequests = new LinkedBlockingQueue<StatsRequest>(getStatsQueueSize());
         statisticsTimerTicks = new ConcurrentHashMap<Long, StatisticsTicks>(INITIAL_SIZE);
-        pendingStatsRequests = new LinkedBlockingQueue<StatsRequest>(INITIAL_SIZE);
         switchPortStatsUpdated = new LinkedBlockingQueue<Long>(INITIAL_SIZE);
         switchSupportsVendorExtStats = new ConcurrentHashMap<Long, Boolean>(INITIAL_SIZE);
         txRates = new HashMap<Long, Map<Short, TxRates>>(INITIAL_SIZE);
index 730a9b0368f6b853b250d758f7ca95a7be940a82..8e1dcd69f999fec5f2619c1f8e65d0fc511212f4 100644 (file)
@@ -61,6 +61,6 @@ public class ObjectWriter {
                 }
             }
         }
-        return new Status(StatusCode.SUCCESS, null);
+        return new Status(StatusCode.SUCCESS);
     }
 }
index 14e440df029f1bb71b901a647d12c2cf548b617e..f12331cbb296565602f0a7902ae00e86b2bc4f50 100644 (file)
@@ -41,7 +41,7 @@
               org.apache.felix.dm
             </Import-Package>
             <Export-Package>
-              org.opendaylight.controller.sample.simpleforwarding
+              org.opendaylight.controller.samples.simpleforwarding
             </Export-Package>
             <Bundle-Activator>
               org.opendaylight.controller.samples.simpleforwarding.internal.Activator
index 9ff3f9185053076398a6bd601b5dc8f56bae2b3f..c24c93c65ddaa738136c065713461fe62e659b4c 100644 (file)
@@ -93,13 +93,15 @@ CommandProvider {
     private String subnetFileName = null, spanFileName = null,
             switchConfigFileName = null;
     private final List<NodeConnector> spanNodeConnectors = new CopyOnWriteArrayList<NodeConnector>();
-    private ConcurrentMap<InetAddress, Subnet> subnets; // set of Subnets keyed by the InetAddress
+    // set of Subnets keyed by the InetAddress
+    private ConcurrentMap<InetAddress, Subnet> subnets;
     private ConcurrentMap<String, SubnetConfig> subnetsConfigList;
     private ConcurrentMap<SpanConfig, SpanConfig> spanConfigList;
-    private ConcurrentMap<String, SwitchConfig> nodeConfigList; // manually configured parameters for the node like name and tier
+    // manually configured parameters for the node such as name, tier, mode
+    private ConcurrentMap<String, SwitchConfig> nodeConfigList;
     private ConcurrentMap<Long, String> configSaveEvent;
-    private ConcurrentMap<Node, Map<String, Property>> nodeProps; // properties are maintained in global container only
-    private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps; // properties are maintained in global container only
+    private ConcurrentMap<Node, Map<String, Property>> nodeProps;
+    private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps;
     private ConcurrentMap<Node, Map<String, NodeConnector>> nodeConnectorNames;
     private IInventoryService inventoryService;
     private final Set<ISwitchManagerAware> switchManagerAware = Collections
@@ -662,40 +664,30 @@ CommandProvider {
             modeChange = true;
         }
 
-        try {
-            String nodeId = cfgObject.getNodeId();
-            Node node = Node.fromString(nodeId);
-            Map<String, Property> propMapCurr = nodeProps.get(node);
-            Map<String, Property> propMap = new HashMap<String, Property>();
-            if (propMapCurr != null) {
-                for (String s : propMapCurr.keySet()) {
-                    propMap.put(s, propMapCurr.get(s).clone());
-                }
-            }
-            Property desc = new Description(cfgObject.getNodeDescription());
-            propMap.put(desc.getName(), desc);
-            Property tier = new Tier(Integer.parseInt(cfgObject.getTier()));
-            propMap.put(tier.getName(), tier);
+        String nodeId = cfgObject.getNodeId();
+        Node node = Node.fromString(nodeId);
+        Map<String, Property> propMapCurr = nodeProps.get(node);
+        if (propMapCurr == null) {
+            return;
+        }
+        Map<String, Property> propMap = new HashMap<String, Property>();
+        for (String s : propMapCurr.keySet()) {
+            propMap.put(s, propMapCurr.get(s).clone());
+        }
+        Property desc = new Description(cfgObject.getNodeDescription());
+        propMap.put(desc.getName(), desc);
+        Property tier = new Tier(Integer.parseInt(cfgObject.getTier()));
+        propMap.put(tier.getName(), tier);
 
-            if (propMapCurr == null) {
-                if (nodeProps.putIfAbsent(node, propMap) != null) {
-                    // TODO rollback using Transactionality
-                    return;
-                }
-            } else {
-                if (!nodeProps.replace(node, propMapCurr, propMap)) {
-                    // TODO rollback using Transactionality
-                    return;
-                }
-            }
+        if (!nodeProps.replace(node, propMapCurr, propMap)) {
+            // TODO rollback using Transactionality
+            return;
+        }
 
-            log.info("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
+        log.info("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
 
-            if (modeChange) {
-                notifyModeChange(node, cfgObject.isProactive());
-            }
-        } catch (Exception e) {
-            log.debug("updateSwitchConfig: {}", e.getMessage());
+        if (modeChange) {
+            notifyModeChange(node, cfgObject.isProactive());
         }
     }
 
index f485fdd0dbf140e3c4ac51beb7fec0cc5ddce48a..1b1e6f49d153d3b57ccfeb8e120bf2c3d6c7581d 100644 (file)
 package org.opendaylight.controller.topologymanager;
 
 import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
+
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.core.Node.NodeIDType;
-import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
 import org.opendaylight.controller.sal.core.NodeConnector;
-import org.opendaylight.controller.sal.utils.GUIField;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -129,7 +123,7 @@ public class TopologyUserLinkConfig implements Serializable {
 
         if (!isValidNodeConnector(srcNodeConnector) ||
                 !isValidNodeConnector(dstNodeConnector)) {
-            logger.warn("Invalid NodeConnector");
+            logger.debug("Invalid NodeConnector in user link: {}", this);
             return false;
         }
 
index d0d9bfaa5ea144256dceeaae179e9ebef43e6221..0da1a2ee39ab73255aca9149095f15ceb6ac8c45 100644 (file)
@@ -9,8 +9,12 @@
 
 package org.opendaylight.controller.topologymanager.internal;
 
-import org.apache.felix.dm.Component;
+import java.util.Dictionary;
+import java.util.HashSet;
+import java.util.Hashtable;
+import java.util.Set;
 
+import org.apache.felix.dm.Component;
 import org.opendaylight.controller.clustering.services.IClusterContainerServices;
 import org.opendaylight.controller.configuration.IConfigurationContainerAware;
 import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
@@ -74,9 +78,14 @@ public class Activator extends ComponentActivatorAbstractBase {
     public void configureInstance(Component c, Object imp, String containerName) {
         if (imp.equals(TopologyManagerImpl.class)) {
             // export the service needed to listen to topology updates
+            Dictionary<String, Set<String>> props = new Hashtable<String, Set<String>>();
+            Set<String> propSet = new HashSet<String>();
+            propSet.add("topologymanager.configSaveEvent");
+            props.put("cachenames", propSet);
+
             c.setInterface(new String[] { IListenTopoUpdates.class.getName(),
                     ITopologyManager.class.getName(),
-                    IConfigurationContainerAware.class.getName() }, null);
+                    IConfigurationContainerAware.class.getName() }, props);
 
             c.add(createContainerServiceDependency(containerName).setService(
                     ITopologyService.class).setCallbacks("setTopoService",
index ce39fed5fcc4960efff1d8959335ced6be066893..a043cbe92595ff011483296bc1fc27be6bb7a9ee 100644 (file)
@@ -12,7 +12,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.util.ArrayList;
-import java.util.Collections;
+import java.util.Date;
 import java.util.Dictionary;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
 
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.felix.dm.Component;
@@ -65,31 +66,34 @@ import org.slf4j.LoggerFactory;
 public class TopologyManagerImpl implements ITopologyManager,
 IConfigurationContainerAware, IListenTopoUpdates, IObjectReader,
 CommandProvider {
-    private static final Logger log = LoggerFactory
-            .getLogger(TopologyManagerImpl.class);
-    private ITopologyService topoService = null;
-    private IClusterContainerServices clusterContainerService = null;
+    private static final Logger log = LoggerFactory.getLogger(TopologyManagerImpl.class);
+    private static final String SAVE = "Save";
+    private ITopologyService topoService;
+    private IClusterContainerServices clusterContainerService;
     // DB of all the Edges with properties which constitute our topology
-    private ConcurrentMap<Edge, Set<Property>> edgesDB = null;
+    private ConcurrentMap<Edge, Set<Property>> edgesDB;
     // DB of all NodeConnector which are part of ISL Edges, meaning they
     // are connected to another NodeConnector on the other side of an ISL link.
     // NodeConnector of a Production Edge is not part of this DB.
-    private ConcurrentMap<NodeConnector, Set<Property>> nodeConnectorsDB = null;
+    private ConcurrentMap<NodeConnector, Set<Property>> nodeConnectorsDB;
     // DB of all the NodeConnectors with an Host attached to it
-    private ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>> hostsDB = null;
+    private ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>> hostsDB;
     // Topology Manager Aware listeners
-    private Set<ITopologyManagerAware> topologyManagerAware = Collections
-            .synchronizedSet(new HashSet<ITopologyManagerAware>());
+    private Set<ITopologyManagerAware> topologyManagerAware =
+            new CopyOnWriteArraySet<ITopologyManagerAware>();;
 
     private static String ROOT = GlobalConstants.STARTUPHOME.toString();
-    private String userLinksFileName = null;
-    private ConcurrentMap<String, TopologyUserLinkConfig> userLinks;
+    private String userLinksFileName;
+    private ConcurrentMap<String, TopologyUserLinkConfig> userLinksDB;
+    private ConcurrentMap<Long, String> configSaveEvent;
+
 
     void nonClusterObjectCreate() {
         edgesDB = new ConcurrentHashMap<Edge, Set<Property>>();
         hostsDB = new ConcurrentHashMap<NodeConnector, ImmutablePair<Host, Set<Property>>>();
-        userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
         nodeConnectorsDB = new ConcurrentHashMap<NodeConnector, Set<Property>>();
+        userLinksDB = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
+        configSaveEvent = new ConcurrentHashMap<Long, String>();
     }
 
     void setTopologyManagerAware(ITopologyManagerAware s) {
@@ -136,8 +140,12 @@ CommandProvider {
      *
      */
     void init(Component c) {
+
+        allocateCaches();
+        retrieveCaches();
+
         String containerName = null;
-        Dictionary props = c.getServiceProperties();
+        Dictionary<?, ?> props = c.getServiceProperties();
         if (props != null) {
             containerName = (String) props.get("containerName");
         } else {
@@ -145,57 +153,103 @@ CommandProvider {
             containerName = "UNKNOWN";
         }
 
-        if (this.clusterContainerService == null) {
-            log.error("Cluster Services is null, not expected!");
-            return;
-        }
+        userLinksFileName = ROOT + "userTopology_" + containerName + ".conf";
+        registerWithOSGIConsole();
+        loadConfiguration();
+    }
 
-        if (this.topoService == null) {
-            log.error("Topology Services is null, not expected!");
+    @SuppressWarnings({ "unchecked", "deprecation" })
+    private void allocateCaches(){
+        if (this.clusterContainerService == null) {
+            nonClusterObjectCreate();
+            log.error("Cluster Services unavailable, allocated non-cluster caches!");
             return;
         }
 
         try {
-            this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService
-                    .createCache("topologymanager.edgesDB", EnumSet
-                            .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+            this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService.createCache(
+                    "topologymanager.edgesDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
         } catch (CacheExistException cee) {
-            log.error("topologymanager.edgesDB Cache already exists - "
-                    + "destroy and recreate if needed");
+            log.debug("topologymanager.edgesDB Cache already exists - destroy and recreate if needed");
         } catch (CacheConfigException cce) {
-            log.error("topologymanager.edgesDB Cache configuration invalid - "
-                    + "check cache mode");
+            log.error("topologymanager.edgesDB Cache configuration invalid - check cache mode");
         }
 
         try {
             this.hostsDB = (ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>>) this.clusterContainerService
-                    .createCache("topologymanager.hostsDB", EnumSet
-                            .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+                    .createCache("topologymanager.hostsDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
         } catch (CacheExistException cee) {
-            log.error("topologymanager.hostsDB Cache already exists - "
-                    + "destroy and recreate if needed");
+            log.debug("topologymanager.hostsDB Cache already exists - destroy and recreate if needed");
         } catch (CacheConfigException cce) {
-            log.error("topologymanager.hostsDB Cache configuration invalid - "
-                    + "check cache mode");
+            log.error("topologymanager.hostsDB Cache configuration invalid - check cache mode");
         }
 
         try {
             this.nodeConnectorsDB = (ConcurrentMap<NodeConnector, Set<Property>>) this.clusterContainerService
-                    .createCache("topologymanager.nodeConnectorDB", EnumSet
-                            .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+                    .createCache("topologymanager.nodeConnectorDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
         } catch (CacheExistException cee) {
-            log.error("topologymanager.nodeConnectorDB Cache already exists"
-                    + " - destroy and recreate if needed");
+            log.debug("topologymanager.nodeConnectorDB Cache already exists - destroy and recreate if needed");
         } catch (CacheConfigException cce) {
-            log.error("topologymanager.nodeConnectorDB Cache configuration "
-                    + "invalid - check cache mode");
+            log.error("topologymanager.nodeConnectorDB Cache configuration invalid - check cache mode");
         }
 
-        userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
+        try {
+            this.userLinksDB = (ConcurrentMap<String, TopologyUserLinkConfig>) this.clusterContainerService
+                    .createCache("topologymanager.userLinksDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheExistException cee) {
+            log.debug("topologymanager.userLinksDB Cache already exists - destroy and recreate if needed");
+        } catch (CacheConfigException cce) {
+            log.error("topologymanager.userLinksDB Cache configuration invalid - check cache mode");
+        }
+
+        try {
+            this.configSaveEvent = (ConcurrentMap<Long, String>) this.clusterContainerService
+                    .createCache("topologymanager.configSaveEvent", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheExistException cee) {
+            log.debug("topologymanager.configSaveEvent Cache already exists - destroy and recreate if needed");
+        } catch (CacheConfigException cce) {
+            log.error("topologymanager.configSaveEvent Cache configuration invalid - check cache mode");
+        }
+
+    }
+
+    @SuppressWarnings({ "unchecked", "deprecation" })
+    private void retrieveCaches() {
+        if (this.clusterContainerService == null) {
+            log.error("Cluster Services is null, can't retrieve caches.");
+            return;
+        }
+
+        this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService
+                .getCache("topologymanager.edgesDB");
+        if (edgesDB == null) {
+            log.error("Failed to get cache for topologymanager.edgesDB");
+        }
+
+        this.hostsDB = (ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>>) this.clusterContainerService
+                .getCache("topologymanager.hostsDB");
+        if (hostsDB == null) {
+            log.error("Failed to get cache for topologymanager.hostsDB");
+        }
+
+        this.nodeConnectorsDB = (ConcurrentMap<NodeConnector, Set<Property>>) this.clusterContainerService
+                .getCache("topologymanager.nodeConnectorDB");
+        if (nodeConnectorsDB == null) {
+            log.error("Failed to get cache for topologymanager.nodeConnectorDB");
+        }
+
+        this.userLinksDB = (ConcurrentMap<String, TopologyUserLinkConfig>) this.clusterContainerService
+                .getCache("topologymanager.userLinksDB");
+        if (userLinksDB == null) {
+            log.error("Failed to get cache for topologymanager.userLinksDB");
+        }
+
+        this.configSaveEvent = (ConcurrentMap<Long, String>) this.clusterContainerService
+                .getCache("topologymanager.configSaveEvent");
+        if (configSaveEvent == null) {
+            log.error("Failed to get cache for topologymanager.configSaveEvent");
+        }
 
-        userLinksFileName = ROOT + "userTopology_" + containerName + ".conf";
-        registerWithOSGIConsole();
-        loadConfiguration();
     }
 
     /**
@@ -217,62 +271,38 @@ CommandProvider {
      *
      */
     void destroy() {
-        if (this.clusterContainerService == null) {
-            log.error("Cluster Services is null, not expected!");
-            this.edgesDB = null;
-            this.hostsDB = null;
-            this.nodeConnectorsDB = null;
-            return;
-        }
-        this.clusterContainerService.destroyCache("topologymanager.edgesDB");
-        this.edgesDB = null;
-        this.clusterContainerService.destroyCache("topologymanager.hostsDB");
-        this.hostsDB = null;
-        this.clusterContainerService
-        .destroyCache("topologymanager.nodeConnectorDB");
-        this.nodeConnectorsDB = null;
-        log.debug("Topology Manager DB Deallocated");
     }
 
     @SuppressWarnings("unchecked")
     private void loadConfiguration() {
         ObjectReader objReader = new ObjectReader();
-        ConcurrentMap<String, TopologyUserLinkConfig> confList = (ConcurrentMap<String, TopologyUserLinkConfig>) objReader
-                .read(this, userLinksFileName);
-
-        if (confList == null) {
-            return;
-        }
+        ConcurrentMap<String, TopologyUserLinkConfig> confList =
+                (ConcurrentMap<String, TopologyUserLinkConfig>) objReader.read(this, userLinksFileName);
 
-        for (TopologyUserLinkConfig conf : confList.values()) {
-            addUserLink(conf);
+        if (confList != null) {
+            for (TopologyUserLinkConfig conf : confList.values()) {
+                addUserLink(conf);
+            }
         }
     }
 
     @Override
     public Status saveConfig() {
-        // Publish the save config event to the cluster nodes
-        /**
-         * Get the CLUSTERING SERVICES WORKING BEFORE TRYING THIS
-         *
-         * configSaveEvent.put(new Date().getTime(), SAVE);
-         */
+        // Publish the save config event to the cluster
+        configSaveEvent.put(new Date().getTime(), SAVE );
         return saveConfigInternal();
     }
 
     public Status saveConfigInternal() {
-        Status retS;
         ObjectWriter objWriter = new ObjectWriter();
 
-        retS = objWriter
-                .write(new ConcurrentHashMap<String, TopologyUserLinkConfig>(
-                        userLinks), userLinksFileName);
+        Status saveStatus = objWriter.write(
+                new ConcurrentHashMap<String, TopologyUserLinkConfig>(userLinksDB), userLinksFileName);
 
-        if (retS.isSuccess()) {
-            return retS;
-        } else {
-            return new Status(StatusCode.INTERNALERROR, "Save failed");
+        if (! saveStatus.isSuccess()) {
+            return new Status(StatusCode.INTERNALERROR, "Topology save failed: " + saveStatus.getDescription());
         }
+        return saveStatus;
     }
 
     @Override
@@ -281,31 +311,25 @@ CommandProvider {
             return null;
         }
 
-        HashMap<Node, Set<Edge>> res = new HashMap<Node, Set<Edge>>();
-        for (Edge key : this.edgesDB.keySet()) {
+        Map<Node, Set<Edge>> res = new HashMap<Node, Set<Edge>>();
+        for (Edge edge : this.edgesDB.keySet()) {
             // Lets analyze the tail
-            Node node = key.getTailNodeConnector().getNode();
+            Node node = edge.getTailNodeConnector().getNode();
             Set<Edge> nodeEdges = res.get(node);
             if (nodeEdges == null) {
                 nodeEdges = new HashSet<Edge>();
+                res.put(node, nodeEdges);
             }
-            nodeEdges.add(key);
-            // We need to re-add to the MAP even if the element was
-            // already there so in case of clustered services the map
-            // gets updated in the cluster
-            res.put(node, nodeEdges);
+            nodeEdges.add(edge);
 
             // Lets analyze the head
-            node = key.getHeadNodeConnector().getNode();
+            node = edge.getHeadNodeConnector().getNode();
             nodeEdges = res.get(node);
             if (nodeEdges == null) {
                 nodeEdges = new HashSet<Edge>();
+                res.put(node, nodeEdges);
             }
-            nodeEdges.add(key);
-            // We need to re-add to the MAP even if the element was
-            // already there so in case of clustered services the map
-            // gets updated in the cluster
-            res.put(node, nodeEdges);
+            nodeEdges.add(edge);
         }
 
         return res;
@@ -341,10 +365,8 @@ CommandProvider {
      * @return true if it is a production link
      */
     public boolean isProductionLink(Edge e) {
-        return (e.getHeadNodeConnector().getType()
-                .equals(NodeConnector.NodeConnectorIDType.PRODUCTION) || e
-                .getTailNodeConnector().getType()
-                .equals(NodeConnector.NodeConnectorIDType.PRODUCTION));
+        return (e.getHeadNodeConnector().getType().equals(NodeConnector.NodeConnectorIDType.PRODUCTION)
+                || e.getTailNodeConnector().getType().equals(NodeConnector.NodeConnectorIDType.PRODUCTION));
     }
 
     /**
@@ -360,31 +382,20 @@ CommandProvider {
             return null;
         }
 
-        HashMap<Edge, Set<Property>> res = new HashMap<Edge, Set<Property>>();
-        for (Edge key : this.edgesDB.keySet()) {
+        Map<Edge, Set<Property>> edgeMap = new HashMap<Edge, Set<Property>>();
+        Set<Property> props;
+        for (Map.Entry<Edge, Set<Property>> edgeEntry : edgesDB.entrySet()) {
             // Sets of props are copied because the composition of
             // those properties could change with time
-            HashSet<Property> prop = new HashSet<Property>(
-                    this.edgesDB.get(key));
+            props = new HashSet<Property>(edgeEntry.getValue());
             // We can simply reuse the key because the object is
             // immutable so doesn't really matter that we are
             // referencing the only owned by a different table, the
             // meaning is the same because doesn't change with time.
-            res.put(key, prop);
+            edgeMap.put(edgeEntry.getKey(), props);
         }
 
-        return res;
-    }
-
-    // TODO remove with spring-dm removal
-    /**
-     * @param set
-     *            the topologyAware to set
-     */
-    public void setTopologyAware(Set<Object> set) {
-        for (Object s : set) {
-            setTopologyManagerAware((ITopologyManagerAware) s);
-        }
+        return edgeMap;
     }
 
     @Override
@@ -393,7 +404,7 @@ CommandProvider {
             return null;
         }
 
-        return (this.hostsDB.keySet());
+        return (new HashSet<NodeConnector>(this.hostsDB.keySet()));
     }
 
     @Override
@@ -402,90 +413,92 @@ CommandProvider {
             return null;
         }
         HashMap<Node, Set<NodeConnector>> res = new HashMap<Node, Set<NodeConnector>>();
-
-        for (NodeConnector p : this.hostsDB.keySet()) {
-            Node n = p.getNode();
-            Set<NodeConnector> pSet = res.get(n);
-            if (pSet == null) {
+        Node node;
+        Set<NodeConnector> portSet;
+        for (NodeConnector nc : this.hostsDB.keySet()) {
+            node = nc.getNode();
+            portSet = res.get(node);
+            if (portSet == null) {
                 // Create the HashSet if null
-                pSet = new HashSet<NodeConnector>();
-                res.put(n, pSet);
+                portSet = new HashSet<NodeConnector>();
+                res.put(node, portSet);
             }
 
             // Keep updating the HashSet, given this is not a
             // clustered map we can just update the set without
             // worrying to update the hashmap.
-            pSet.add(p);
+            portSet.add(nc);
         }
 
         return (res);
     }
 
     @Override
-    public Host getHostAttachedToNodeConnector(NodeConnector p) {
-        if (this.hostsDB == null) {
+    public Host getHostAttachedToNodeConnector(NodeConnector port) {
+        ImmutablePair<Host, Set<Property>> host;
+        if (this.hostsDB == null || (host = this.hostsDB.get(port)) == null) {
             return null;
         }
-        if (this.hostsDB.get(p) == null)
-            return null;
-
-        return (this.hostsDB.get(p).getLeft());
+        return host.getLeft();
     }
 
     @Override
-    public void updateHostLink(NodeConnector p, Host h, UpdateType t,
-            Set<Property> props) {
-        if (this.hostsDB == null) {
-            return;
+    public void updateHostLink(NodeConnector port, Host h, UpdateType t, Set<Property> props) {
+
+        // Clone the property set in case non null else just
+        // create an empty one. Caches allocated via infinispan
+        // don't allow null values
+        if (props == null) {
+            props = new HashSet<Property>();
+        } else {
+            props = new HashSet<Property>(props);
         }
+        ImmutablePair<Host, Set<Property>> thisHost = new ImmutablePair<Host, Set<Property>>(h, props);
 
         switch (t) {
         case ADDED:
         case CHANGED:
-            // Clone the property set in case non null else just
-            // create an empty one. Caches allocated via infinispan
-            // don't allow null values
-            if (props == null) {
-                props = new HashSet<Property>();
-            } else {
-                props = new HashSet<Property>(props);
-            }
-
-            this.hostsDB.put(p, new ImmutablePair(h, props));
+            this.hostsDB.put(port, thisHost);
             break;
         case REMOVED:
-            this.hostsDB.remove(p);
+            //remove only if hasn't been concurrently modified
+            this.hostsDB.remove(port, thisHost);
             break;
         }
     }
 
-    private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type,
-            Set<Property> props) {
+    private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
         switch (type) {
         case ADDED:
             // Make sure the props are non-null
             if (props == null) {
-                props = (Set<Property>) new HashSet();
+                props = new HashSet<Property>();
             } else {
-                // Copy the set so noone is going to change the content
-                props = (Set<Property>) new HashSet(props);
+                props = new HashSet<Property>(props);
+            }
+
+            //in case of node switch-over to a different cluster controller,
+            //let's retain edge props
+            Set<Property> currentProps = this.edgesDB.get(e);
+            if (currentProps != null){
+                props.addAll(currentProps);
             }
 
             // Now make sure there is the creation timestamp for the
-            // edge, if not there timestamp with the first update
+            // edge, if not therestamp with the first update
             boolean found_create = false;
             for (Property prop : props) {
                 if (prop instanceof TimeStamp) {
                     TimeStamp t = (TimeStamp) prop;
                     if (t.getTimeStampName().equals("creation")) {
                         found_create = true;
+                        break;
                     }
                 }
             }
 
             if (!found_create) {
-                TimeStamp t = new TimeStamp(System.currentTimeMillis(),
-                        "creation");
+                TimeStamp t = new TimeStamp(System.currentTimeMillis(), "creation");
                 props.add(t);
             }
 
@@ -498,10 +511,8 @@ CommandProvider {
             // for now.
             // The DB only contains ISL ports
             if (isISLink(e)) {
-                this.nodeConnectorsDB.put(e.getHeadNodeConnector(),
-                        new HashSet<Property>());
-                this.nodeConnectorsDB.put(e.getTailNodeConnector(),
-                        new HashSet<Property>());
+                this.nodeConnectorsDB.put(e.getHeadNodeConnector(), new HashSet<Property>(1));
+                this.nodeConnectorsDB.put(e.getTailNodeConnector(), new HashSet<Property>(1));
             }
             log.trace("Edge {}  {}", e.toString(), type.name());
             break;
@@ -521,29 +532,29 @@ CommandProvider {
             log.trace("Edge {}  {}", e.toString(), type.name());
             break;
         case CHANGED:
-            Set<Property> old_props = this.edgesDB.get(e);
+            Set<Property> oldProps = this.edgesDB.get(e);
 
             // When property changes lets make sure we can change it
             // all except the creation time stamp because that should
             // be changed only when the edge is destroyed and created
             // again
-            TimeStamp tc = null;
-            for (Property prop : old_props) {
+            TimeStamp timeStamp = null;
+            for (Property prop : oldProps) {
                 if (prop instanceof TimeStamp) {
-                    TimeStamp t = (TimeStamp) prop;
-                    if (t.getTimeStampName().equals("creation")) {
-                        tc = t;
+                    TimeStamp tsProp = (TimeStamp) prop;
+                    if (tsProp.getTimeStampName().equals("creation")) {
+                        timeStamp = tsProp;
+                        break;
                     }
                 }
             }
 
             // Now lets make sure new properties are non-null
-            // Make sure the props are non-null
             if (props == null) {
-                props = (Set<Property>) new HashSet();
+                props = new HashSet<Property>();
             } else {
                 // Copy the set so noone is going to change the content
-                props = (Set<Property>) new HashSet(props);
+                props = new HashSet<Property>(props);
             }
 
             // Now lets remove the creation property if exist in the
@@ -554,13 +565,14 @@ CommandProvider {
                     TimeStamp t = (TimeStamp) prop;
                     if (t.getTimeStampName().equals("creation")) {
                         i.remove();
+                        break;
                     }
                 }
             }
 
             // Now lets add the creation timestamp in it
-            if (tc != null) {
-                props.add(tc);
+            if (timeStamp != null) {
+                props.add(timeStamp);
             }
 
             // Finally update
@@ -587,7 +599,7 @@ CommandProvider {
             try {
                 s.edgeUpdate(teuList);
             } catch (Exception exc) {
-                log.error("Exception on callback", exc);
+                log.error("Exception on edge update:", exc);
             }
         }
 
@@ -601,79 +613,78 @@ CommandProvider {
 
 
     private Edge getLinkTuple(TopologyUserLinkConfig link) {
-        Edge linkTuple = null;
         NodeConnector srcNodeConnector = NodeConnector.fromString(link.getSrcNodeConnector());
         NodeConnector dstNodeConnector = NodeConnector.fromString(link.getDstNodeConnector());
-        if (srcNodeConnector == null || dstNodeConnector == null) return null;
         try {
-            linkTuple = new Edge(srcNodeConnector, dstNodeConnector);
+            return new Edge(srcNodeConnector, dstNodeConnector);
         } catch (Exception e) {
+            return null;
         }
-        return linkTuple;
     }
 
     @Override
     public ConcurrentMap<String, TopologyUserLinkConfig> getUserLinks() {
-        return userLinks;
+        return new ConcurrentHashMap<String, TopologyUserLinkConfig>(userLinksDB);
     }
 
     @Override
-    public Status addUserLink(TopologyUserLinkConfig link) {
-        if (!link.isValid()) {
+    public Status addUserLink(TopologyUserLinkConfig userLink) {
+        if (!userLink.isValid()) {
             return new Status(StatusCode.BADREQUEST,
-                    "Configuration Invalid. Please check the parameters");
+                    "User link configuration invalid.");
         }
-        if (userLinks.get(link.getName()) != null) {
-            return new Status(StatusCode.CONFLICT, "Link with name : "
-                    + link.getName()
-                    + " already exists. Please use another name");
+        userLink.setStatus(TopologyUserLinkConfig.STATUS.LINKDOWN);
+
+        //Check if this link already configured
+        //NOTE: infinispan cache doesn't support Map.containsValue()
+        // (which is linear time in most ConcurrentMap impl anyway)
+        for (TopologyUserLinkConfig existingLink : userLinksDB.values()) {
+            if (existingLink.equals(userLink)) {
+                return new Status(StatusCode.CONFLICT, "Link configuration exists");
+            }
         }
-        if (userLinks.containsValue(link)) {
-            return new Status(StatusCode.CONFLICT, "Link configuration exists");
+        //attempt put, if mapping for this key already existed return conflict
+        if (userLinksDB.putIfAbsent(userLink.getName(), userLink) != null) {
+            return new Status(StatusCode.CONFLICT, "Link with name : " + userLink.getName()
+                    + " already exists. Please use another name");
         }
 
-        link.setStatus(TopologyUserLinkConfig.STATUS.LINKDOWN);
-        userLinks.put(link.getName(), link);
-
-        Edge linkTuple = getLinkTuple(link);
+        Edge linkTuple = getLinkTuple(userLink);
         if (linkTuple != null) {
             if (!isProductionLink(linkTuple)) {
                 edgeUpdate(linkTuple, UpdateType.ADDED, new HashSet<Property>());
             }
 
-            linkTuple = getReverseLinkTuple(link);
+            linkTuple = getReverseLinkTuple(userLink);
             if (linkTuple != null) {
-                link.setStatus(TopologyUserLinkConfig.STATUS.SUCCESS);
+                userLink.setStatus(TopologyUserLinkConfig.STATUS.SUCCESS);
                 if (!isProductionLink(linkTuple)) {
                     edgeUpdate(linkTuple, UpdateType.ADDED, new HashSet<Property>());
                 }
             }
         }
-        return new Status(StatusCode.SUCCESS, null);
+        return new Status(StatusCode.SUCCESS);
     }
 
     @Override
     public Status deleteUserLink(String linkName) {
         if (linkName == null) {
-            return new Status(StatusCode.BADREQUEST,
-                    "A valid linkName is required to Delete a link");
+            return new Status(StatusCode.BADREQUEST, "User link name cannot be null.");
         }
 
-        TopologyUserLinkConfig link = userLinks.get(linkName);
-
-        Edge linkTuple = getLinkTuple(link);
-        userLinks.remove(linkName);
-        if (linkTuple != null) {
-            if (!isProductionLink(linkTuple)) {
+        TopologyUserLinkConfig link = userLinksDB.remove(linkName);
+        Edge linkTuple;
+        if (link != null && (linkTuple = getLinkTuple(link)) != null) {
+            if (! isProductionLink(linkTuple)) {
                 edgeUpdate(linkTuple, UpdateType.REMOVED, null);
             }
 
             linkTuple = getReverseLinkTuple(link);
-            if ((linkTuple != null) && !isProductionLink(linkTuple)) {
+            if (isProductionLink(linkTuple)) {
                 edgeUpdate(linkTuple, UpdateType.REMOVED, null);
             }
         }
-        return new Status(StatusCode.SUCCESS, null);
+        return new Status(StatusCode.SUCCESS);
     }
 
     private void registerWithOSGIConsole() {
@@ -695,8 +706,8 @@ CommandProvider {
     }
 
     public void _printUserLink(CommandInterpreter ci) {
-        for (String name : this.userLinks.keySet()) {
-            TopologyUserLinkConfig linkConfig = userLinks.get(name);
+        for (String name : this.userLinksDB.keySet()) {
+            TopologyUserLinkConfig linkConfig = userLinksDB.get(name);
             ci.println("Name : " + name);
             ci.println(linkConfig);
             ci.println("Edge " + getLinkTuple(linkConfig));
@@ -770,7 +781,6 @@ CommandProvider {
     @Override
     public Object readObject(ObjectInputStream ois)
             throws FileNotFoundException, IOException, ClassNotFoundException {
-        // TODO Auto-generated method stub
         return ois.readObject();
     }