import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import java.util.concurrent.ConcurrentMap;
import org.opendaylight.controller.hosttracker.IfHostListener;
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.switchmanager.ISwitchManager;
import org.opendaylight.controller.switchmanager.Subnet;
import org.opendaylight.controller.topologymanager.ITopologyManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ArpHandler implements IHostFinder, IListenDataPacket {
private static final Logger logger = LoggerFactory
private IDataPacketService dataPacketService = null;
private Set<IfHostListener> hostListener = Collections
.synchronizedSet(new HashSet<IfHostListener>());
- private ConcurrentHashMap<InetAddress, Set<HostNodeConnector>> arpRequestors;
- private ConcurrentHashMap<InetAddress, Short> countDownTimers;
+ private ConcurrentMap<InetAddress, Set<HostNodeConnector>> arpRequestors;
+ private ConcurrentMap<InetAddress, Short> countDownTimers;
private Timer periodicTimer;
void setHostListener(IfHostListener s) {
package org.opendaylight.controller.clustering.services_implementation.internal;
import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
+import org.opendaylight.controller.sal.core.IContainerAware;
import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
* ComponentActivatorAbstractBase.
*
*/
+ @Override
public void init() {
}
* cleanup done by ComponentActivatorAbstractBase
*
*/
+ @Override
public void destroy() {
}
* instantiated in order to get an fully working implementation
* Object
*/
+ @Override
public Object[] getGlobalImplementations() {
Object[] res = { ClusterManager.class, ClusterGlobalManager.class };
return res;
* instantiated in order to get an fully working implementation
* Object
*/
+ @Override
public Object[] getImplementations() {
Object[] res = { ClusterContainerManager.class };
return res;
* also optional per-container different behavior if needed, usually
* should not be the case though.
*/
+ @Override
public void configureInstance(Component c, Object imp, String containerName) {
if (imp.equals(ClusterContainerManager.class)) {
c.setInterface(new String[] { IClusterContainerServices.class.getName() },
* needed as long as the same routine can configure multiple
* implementations
*/
+ @Override
public void configureGlobalInstance(Component c, Object imp) {
if (imp.equals(ClusterManager.class)) {
// export the service for Apps and Plugins
- c.setInterface(new String[] { IClusterServices.class.getName() }, null);
+ c.setInterface(new String[] { IClusterServices.class.getName(), IContainerAware.class.getName() }, null);
}
if (imp.equals(ClusterGlobalManager.class)) {
package org.opendaylight.controller.clustering.services_implementation.internal;
-import java.io.PrintWriter;
-import java.io.StringWriter;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import org.opendaylight.controller.clustering.services.IGetUpdates;
import org.opendaylight.controller.clustering.services.IListenRoleChange;
import org.opendaylight.controller.clustering.services.ListenRoleChangeAddException;
+import org.opendaylight.controller.sal.core.IContainerAware;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class ClusterManager implements IClusterServices {
+public class ClusterManager implements IClusterServices, IContainerAware {
protected static final Logger logger = LoggerFactory
.getLogger(ClusterManager.class);
private DefaultCacheManager cm;
try {
Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
while (e.hasMoreElements()) {
- NetworkInterface n = (NetworkInterface) e.nextElement();
+ NetworkInterface n = e.nextElement();
Enumeration<InetAddress> ee = n.getInetAddresses();
while (ee.hasMoreElements()) {
- InetAddress i = (InetAddress) ee.nextElement();
+ InetAddress i = ee.nextElement();
myAddresses.add(i);
}
}
return null;
}
+ @Override
public List<InetAddress> getClusteredControllers() {
EmbeddedCacheManager manager = this.cm;
if (manager == null) {
return null;
}
List<Address> controllers = manager.getMembers();
- if ((controllers == null) || controllers.size() == 0)
+ if ((controllers == null) || controllers.size() == 0) {
return null;
+ }
List<InetAddress> clusteredControllers = new ArrayList<InetAddress>();
for (Address a : controllers) {
InetAddress inetAddress = addressToInetAddress(a);
if (inetAddress != null
- && !inetAddress.getHostAddress().equals(loopbackAddress))
+ && !inetAddress.getHostAddress().equals(loopbackAddress)) {
clusteredControllers.add(inetAddress);
+ }
}
return clusteredControllers;
}
+ @Override
public InetAddress getMyAddress() {
EmbeddedCacheManager manager = this.cm;
if (manager == null) {
}
}
}
+
+ private void removeContainerCaches(String containerName) {
+ logger.info("Destroying caches for container {}", containerName);
+ for (String cacheName : this.getCacheList(containerName)) {
+ this.destroyCache(containerName, cacheName);
+ }
+ }
+
+ @Override
+ public void containerCreate(String arg0) {
+ // no op
+ }
+
+ @Override
+ public void containerDestroy(String container) {
+ removeContainerCaches(container);
+ }
}
# of.messageResponseTimer=2000
# The switch liveness timeout value (default 60500 msec)
# of.switchLivenessTimeout=60500
+# The size of the queue holding pending statistics requests (default 64). For large networks of n switches, it is recommended to set the queue size to n
+# of.statsQueueSize = 64
# The flow statistics polling interval in second (default 10 sec)
# of.flowStatsPollInterval=10
# The port statistics polling interval in second (default 5 sec)
# of.discoveryInterval=300
# The timeout multiple of discovery interval
# of.discoveryTimeoutMultiple=2
-# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 10 sec)
-# of.discoveryThreshold=10
+# For newly added ports, allow one more retry if the elapsed time exceeds this threshold (default 30 sec)
+# of.discoveryThreshold=30
# The maximum number of ports handled in one discovery batch (default 1024)
# of.discoveryBatchMaxPorts=1024
import org.opendaylight.controller.hosttracker.IfIptoHost;
import org.opendaylight.controller.hosttracker.IfNewHostNotify;
import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;
-import org.opendaylight.controller.sal.utils.StatusCode;
import org.opendaylight.controller.sal.utils.GlobalConstants;
import org.opendaylight.controller.sal.utils.IObjectReader;
import org.opendaylight.controller.sal.utils.ObjectReader;
import org.opendaylight.controller.sal.utils.ObjectWriter;
import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
}
- @SuppressWarnings("deprecation")
- private void destroyCaches() {
- if (this.clusterContainerService == null) {
- log
- .info("un-initialized clusterContainerService, can't destroy cache");
- return;
- }
-
- clusterContainerService.destroyCache("forwarding.staticrouting.routes");
- clusterContainerService
- .destroyCache("forwarding.staticrouting.configs");
- clusterContainerService
- .destroyCache("forwarding.staticrouting.configSaveEvent");
-
- }
-
@Override
public void entryCreated(Long key, String cacheName, boolean local) {
}
log.debug("Destroy all the Static Routing Rules given we are "
+ "shutting down");
- destroyCaches();
gatewayProbeTimer.cancel();
// Clear the listener so to be ready in next life
inactiveStaticHosts = new ConcurrentHashMap<NodeConnector, HostNodeConnector>();
}
- @SuppressWarnings("deprecation")
- private void destroyCache() {
- if (this.clusterContainerService == null) {
- logger.error("un-initialized clusterMger, can't destroy cache");
- return;
- }
- logger.debug("Destroying Cache for HostTracker");
- this.clusterContainerService.destroyCache("hostTrackerAH");
- this.clusterContainerService.destroyCache("hostTrackerIH");
- nonClusterObjectCreate();
- }
public void shutDown() {
}
*
*/
void destroy() {
- destroyCache();
}
/**
import java.util.ArrayList;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import javax.ws.rs.Consumes;
import org.opendaylight.controller.northbound.commons.RestMessages;
import org.opendaylight.controller.northbound.commons.exception.InternalServerErrorException;
import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
-import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
import org.opendaylight.controller.northbound.commons.exception.UnauthorizedException;
import org.opendaylight.controller.northbound.commons.utils.NorthboundUtils;
import org.opendaylight.controller.sal.authorization.Privilege;
@Path("/")
public class SubnetsNorthboundJAXRS {
- protected static final Logger logger = LoggerFactory
- .getLogger(SubnetsNorthboundJAXRS.class);
+ protected static final Logger logger = LoggerFactory.getLogger(SubnetsNorthboundJAXRS.class);
private String username;
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@StatusCodes({ @ResponseCode(code = 404, condition = "The containerName passed was not found") })
@TypeHint(SubnetConfigs.class)
- public SubnetConfigs listSubnets(
- @PathParam("containerName") String containerName) {
- if (!NorthboundUtils.isAuthorized(
- getUserName(), containerName, Privilege.READ, this)) {
- throw new UnauthorizedException(
- "User is not authorized to perform this operation on container "
- + containerName);
+ public SubnetConfigs listSubnets(@PathParam("containerName") String containerName) {
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.READ, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
}
ISwitchManager switchManager = null;
- switchManager = (ISwitchManager) ServiceHelper.getInstance(
- ISwitchManager.class, containerName, this);
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
- throw new ResourceNotFoundException(
- RestMessages.NOCONTAINER.toString());
+ throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
}
return new SubnetConfigs(switchManager.getSubnetsConfigList());
}
@GET
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@StatusCodes({
- @ResponseCode(code = 404, condition = "The containerName passed was not found"),
- @ResponseCode(code = 404, condition = "Subnet does not exist") })
- @TypeHint(SubnetConfig.class)
+ @ResponseCode(code = 404, condition = "The containerName passed was not found"),
+ @ResponseCode(code = 404, condition = "Subnet does not exist") })
+ @TypeHint(SubnetConfig.class)
public SubnetConfig listSubnet(
- @PathParam("containerName") String containerName,
- @PathParam("subnetName") String subnetName) {
+ @PathParam("containerName") String containerName,
+ @PathParam("subnetName") String subnetName) {
- if (!NorthboundUtils.isAuthorized(
- getUserName(), containerName, Privilege.READ, this)) {
- throw new UnauthorizedException(
- "User is not authorized to perform this operation on container "
- + containerName);
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.READ, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
}
ISwitchManager switchManager = null;
- switchManager = (ISwitchManager) ServiceHelper.getInstance(
- ISwitchManager.class, containerName, this);
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
- throw new ResourceNotFoundException(
- RestMessages.NOCONTAINER.toString());
+ throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
}
SubnetConfig res = switchManager.getSubnetConfig(subnetName);
if (res == null) {
- throw new ResourceNotFoundException(
- RestMessages.NOSUBNET.toString());
+ throw new ResourceNotFoundException(RestMessages.NOSUBNET.toString());
} else {
return res;
}
@Path("/{containerName}/{subnetName}")
@POST
@StatusCodes({
- @ResponseCode(code = 404, condition = "Invalid Data passed"),
- @ResponseCode(code = 201, condition = "Subnet added"),
- @ResponseCode(code = 500, condition = "Addition of subnet failed") })
- public Response addSubnet(@PathParam("containerName") String containerName,
- @PathParam("subnetName") String subnetName,
- @QueryParam("subnet") String subnet) {
-
- if (!NorthboundUtils.isAuthorized(
- getUserName(), containerName, Privilege.WRITE, this)) {
- throw new UnauthorizedException(
- "User is not authorized to perform this operation on container "
- + containerName);
+ @ResponseCode(code = 404, condition = "Invalid Data passed"),
+ @ResponseCode(code = 201, condition = "Subnet added"),
+ @ResponseCode(code = 500, condition = "Addition of subnet failed") })
+ public Response addSubnet(
+ @PathParam("containerName") String containerName,
+ @PathParam("subnetName") String subnetName, @QueryParam("subnet") String subnet) {
+
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
}
if (subnetName == null) {
- throw new ResourceNotFoundException(
- RestMessages.INVALIDDATA.toString());
+ throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
}
if (subnet == null) {
- throw new ResourceNotFoundException(
- RestMessages.INVALIDDATA.toString());
+ throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
}
ISwitchManager switchManager = null;
- switchManager = (ISwitchManager) ServiceHelper.getInstance(
- ISwitchManager.class, containerName, this);
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
- throw new ResourceNotFoundException(
- RestMessages.NOCONTAINER.toString());
+ throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
}
- SubnetConfig cfgObject = new SubnetConfig(subnetName, subnet,
- new ArrayList<String>(0));
+ SubnetConfig cfgObject = new SubnetConfig(subnetName, subnet, new ArrayList<String>(0));
Status status = switchManager.addSubnet(cfgObject);
if (status.isSuccess()) {
return Response.status(Response.Status.CREATED).build();
@Path("/{containerName}/{subnetName}")
@DELETE
@StatusCodes({
- @ResponseCode(code = 404, condition = "The containerName passed was not found"),
- @ResponseCode(code = 500, condition = "Removal of subnet failed") })
+ @ResponseCode(code = 404, condition = "The containerName passed was not found"),
+ @ResponseCode(code = 500, condition = "Removal of subnet failed") })
public Response removeSubnet(
- @PathParam("containerName") String containerName,
- @PathParam("subnetName") String subnetName) {
+ @PathParam("containerName") String containerName,
+ @PathParam("subnetName") String subnetName) {
if (subnetName == null) {
- throw new ResourceNotFoundException(
- RestMessages.INVALIDDATA.toString());
+ throw new ResourceNotFoundException(RestMessages.INVALIDDATA.toString());
}
- if (!NorthboundUtils.isAuthorized(
- getUserName(), containerName, Privilege.WRITE, this)) {
- throw new UnauthorizedException(
- "User is not authorized to perform this operation on container "
- + containerName);
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
}
ISwitchManager switchManager = null;
- switchManager = (ISwitchManager) ServiceHelper.getInstance(
- ISwitchManager.class, containerName, this);
+ switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName, this);
if (switchManager == null) {
- throw new ResourceNotFoundException(
- RestMessages.NOCONTAINER.toString());
+ throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
}
Status status = switchManager.removeSubnet(subnetName);
if (status.isSuccess()) {
@POST
@Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@StatusCodes({
- @ResponseCode(code = 202, condition = "Operation successful"),
- @ResponseCode(code = 400, condition = "Invalid request, i.e., requested changing the subnet name"),
- @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
- @ResponseCode(code = 500, condition = "Internal server error")})
+ @ResponseCode(code = 202, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Invalid request, i.e., requested changing the subnet name"),
+ @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+ @ResponseCode(code = 500, condition = "Internal server error") })
public Response modifySubnet(@PathParam("containerName") String containerName,
- @PathParam("subnetName") String name,
- @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+ @PathParam("subnetName") String name,
+ @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
- if (!NorthboundUtils.isAuthorized(getUserName(), containerName,
- Privilege.WRITE, this)) {
- throw new UnauthorizedException(
- "User is not authorized to perform this operation on container " + containerName);
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
}
- ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class,
- containerName, this);
+ ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName,
+ this);
if (switchManager == null) {
throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
}
// make sure that the name matches an existing subnet and we're not
// changing the name or subnet IP/mask
- if (existingConf == null){
+ if (existingConf == null) {
// don't have a subnet by that name
return Response.status(Response.Status.NOT_FOUND).build();
- }else if( !existingConf.getName().equals(subnetConf.getName())
- || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
+ } else if (!existingConf.getName().equals(subnetConf.getName())
+ || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
// can't change the name of a subnet
return Response.status(Response.Status.BAD_REQUEST).build();
- }else{
+ } else {
// create a set for fast lookups
Set<String> newPorts = new HashSet<String>(subnetConf.getNodePorts());
// go through the current ports and (1) remove ports that aren't
// there anymore and (2) remove ports that are still there from the
// set of ports to add
- for(String s : existingConf.getNodePorts()){
- if(newPorts.contains(s)){
+ for (String s : existingConf.getNodePorts()) {
+ if (newPorts.contains(s)) {
newPorts.remove(s);
- }else{
+ } else {
Status st = switchManager.removePortsFromSubnet(name, s);
successful = successful && st.isSuccess();
}
}
// add any remaining ports
- for(String s : newPorts){
+ for (String s : newPorts) {
Status st = switchManager.addPortsToSubnet(name, s);
successful = successful && st.isSuccess();
}
}
- if(successful){
+ if (successful) {
return Response.status(Response.Status.ACCEPTED).build();
- }else{
+ } else {
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
}
}
- /*
- *
- * Add or remove switch ports to a subnet POST subnets/green/sw
+ /**
*
- * @param model
+ * Add ports to a subnet
*
* @param containerName
- *
+ * Name of the Container
* @param name
+ * Name of the SubnetConfig to be modified
+ * @param subnetConfigData
+ * the {@link SubnetConfig} structure in JSON passed as a POST
+ * parameter
+ * @return If the operation is successful or not
+ */
+ @Path("/{containerName}/{subnetName}/add")
+ @POST
+ @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+ @StatusCodes({
+ @ResponseCode(code = 202, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Invalid request"),
+ @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+ @ResponseCode(code = 500, condition = "Internal server error") })
+ public Response addNodePorts(
+ @PathParam("containerName") String containerName,
+ @PathParam("subnetName") String name,
+ @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+
+ SubnetConfig subnetConf = subnetConfigData.getValue();
+ return addOrDeletePorts(containerName, name, subnetConf, "add");
+ }
+
+ /**
*
- * @param subnet: the subnet name name
- *
- * @param switchports: datapath ID/port list =>
- * xx:xx:xx:xx:xx:xx:xx:xx/a,b,c-m,r-t,y
- *
- * @return
- *
- * @RequestMapping(value = "/{containerName}/{name}", method =
- * RequestMethod.POST)
- *
- * public View addSwitchports(Map<String, Object> model,
- *
- * @PathVariable(value = "containerName") String containerName,
- *
- * @PathVariable(value = "name") String name,
- *
- * @RequestParam(value = "nodeports") String nodePorts,
- *
- * @RequestParam(value = "action") String action) {
- *
- * checkDefaultDisabled(containerName); ISwitchManager switchManager = null;
- * try { BundleContext bCtx = FrameworkUtil.getBundle(this.getClass())
- * .getBundleContext();
- *
- * ServiceReference[] services = bCtx.getServiceReferences(
- * ISwitchManager.class.getName(), "(containerName=" + containerName + ")");
- *
- * if (services != null) { switchManager = (ISwitchManager)
- * bCtx.getService(services[0]); logger.debug("Switch manager reference is:"
- * + switchManager); } } catch (Exception e) {
- * logger.error("Switch Manager reference is NULL"); }
- *
- * checkContainerExists(switchManager);
- *
- * String ret; if (action.equals("add")) { ret =
- * switchManager.addPortsToSubnet(name, nodePorts); } else if
- * (action.equals("remove")) { ret =
- * switchManager.removePortsFromSubnet(name, nodePorts); } else { throw new
- * UnsupportedMediaTypeException(RestMessages.UNKNOWNACTION .toString() +
- * ": " + action); }
+ * Delete ports from a subnet
*
- * return returnViewOrThrowConflicEx(model, ret); }
+ * @param containerName
+ * Name of the Container
+ * @param name
+ * Name of the SubnetConfig to be modified
+ * @param subnetConfigData
+ * the {@link SubnetConfig} structure in JSON passed as a POST
+ * parameter
+ * @return If the operation is successful or not
*/
+ @Path("/{containerName}/{subnetName}/delete")
+ @POST
+ @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+ @StatusCodes({
+ @ResponseCode(code = 202, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Invalid request"),
+ @ResponseCode(code = 404, condition = "The containerName or subnetName is not found"),
+ @ResponseCode(code = 500, condition = "Internal server error") })
+ public Response deleteNodePorts(
+ @PathParam("containerName") String containerName,
+ @PathParam("subnetName") String name,
+ @TypeHint(SubnetConfig.class) JAXBElement<SubnetConfig> subnetConfigData) {
+
+ SubnetConfig subnetConf = subnetConfigData.getValue();
+ return addOrDeletePorts(containerName, name, subnetConf, "delete");
+ }
+
+ /**
+ *
+ * Add/Delete ports to/from a subnet
+ *
+ * @param containerName
+ * Name of the Container
+ * @param name
+ * Name of the SubnetConfig to be modified
+ * @param subnetConfig
+ * the {@link SubnetConfig} structure
+ * @param action
+ * add or delete
+ * @return If the operation is successful or not
+ */
+ private Response addOrDeletePorts(String containerName, String name, SubnetConfig subnetConf, String action) {
+
+ if (!NorthboundUtils.isAuthorized(getUserName(), containerName, Privilege.WRITE, this)) {
+ throw new UnauthorizedException("User is not authorized to perform this operation on container "
+ + containerName);
+ }
+
+ ISwitchManager switchManager = (ISwitchManager) ServiceHelper.getInstance(ISwitchManager.class, containerName,
+ this);
+ if (switchManager == null) {
+ throw new ResourceNotFoundException(RestMessages.NOCONTAINER.toString());
+ }
+
+ SubnetConfig existingConf = switchManager.getSubnetConfig(name);
+
+ // make sure that the name matches an existing subnet and we're not
+ // changing the name or subnet IP/mask
+ if (existingConf == null) {
+ // don't have a subnet by that name
+ return Response.status(Response.Status.NOT_FOUND).build();
+ } else if (!existingConf.getName().equals(subnetConf.getName())
+ || !existingConf.getSubnet().equals(subnetConf.getSubnet())) {
+ // can't change the name of a subnet
+ return Response.status(Response.Status.BAD_REQUEST).build();
+ } else {
+ Status st;
+ boolean successful = true;
+ List<String> ports = subnetConf.getNodePorts();
+
+ if (action.equals("add")) {
+ // add new ports
+ ports.removeAll(existingConf.getNodePorts());
+ for (String port : ports) {
+ st = switchManager.addPortsToSubnet(name, port);
+ successful = successful && st.isSuccess();
+ }
+ } else if (action.equals("delete")) {
+ // delete existing ports
+ ports.retainAll(existingConf.getNodePorts());
+ for (String port : ports) {
+ st = switchManager.removePortsFromSubnet(name, port);
+ successful = successful && st.isSuccess();
+ }
+ } else {
+ return Response.status(Response.Status.BAD_REQUEST).build();
+ }
+
+ if (successful) {
+ return Response.status(Response.Status.ACCEPTED).build();
+ } else {
+ return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
+ }
+ }
+ }
}
public enum DiscoveryPeriod {
INTERVAL (300),
AGEOUT (120),
- THRESHOLD (10);
+ THRESHOLD (30);
private int time; // sec
private int tick; // tick
}
}
+ private short getStatsQueueSize() {
+ String statsQueueSizeStr = System.getProperty("of.statsQueueSize");
+ short statsQueueSize = INITIAL_SIZE;
+ if (statsQueueSizeStr != null) {
+ try {
+ statsQueueSize = Short.parseShort(statsQueueSizeStr);
+ if (statsQueueSize <= 0) {
+ statsQueueSize = INITIAL_SIZE;
+ }
+ } catch (Exception e) {
+ }
+ }
+ return statsQueueSize;
+ }
/**
* Function called by the dependency manager when all the required
* dependencies are satisfied
portStatistics = new ConcurrentHashMap<Long, List<OFStatistics>>();
tableStatistics = new ConcurrentHashMap<Long, List<OFStatistics>>();
dummyList = new ArrayList<OFStatistics>(1);
+ pendingStatsRequests = new LinkedBlockingQueue<StatsRequest>(getStatsQueueSize());
statisticsTimerTicks = new ConcurrentHashMap<Long, StatisticsTicks>(INITIAL_SIZE);
- pendingStatsRequests = new LinkedBlockingQueue<StatsRequest>(INITIAL_SIZE);
switchPortStatsUpdated = new LinkedBlockingQueue<Long>(INITIAL_SIZE);
switchSupportsVendorExtStats = new ConcurrentHashMap<Long, Boolean>(INITIAL_SIZE);
txRates = new HashMap<Long, Map<Short, TxRates>>(INITIAL_SIZE);
}
}
}
- return new Status(StatusCode.SUCCESS, null);
+ return new Status(StatusCode.SUCCESS);
}
}
org.apache.felix.dm
</Import-Package>
<Export-Package>
- org.opendaylight.controller.sample.simpleforwarding
+ org.opendaylight.controller.samples.simpleforwarding
</Export-Package>
<Bundle-Activator>
org.opendaylight.controller.samples.simpleforwarding.internal.Activator
private String subnetFileName = null, spanFileName = null,
switchConfigFileName = null;
private final List<NodeConnector> spanNodeConnectors = new CopyOnWriteArrayList<NodeConnector>();
- private ConcurrentMap<InetAddress, Subnet> subnets; // set of Subnets keyed by the InetAddress
+ // set of Subnets keyed by the InetAddress
+ private ConcurrentMap<InetAddress, Subnet> subnets;
private ConcurrentMap<String, SubnetConfig> subnetsConfigList;
private ConcurrentMap<SpanConfig, SpanConfig> spanConfigList;
- private ConcurrentMap<String, SwitchConfig> nodeConfigList; // manually configured parameters for the node like name and tier
+ // manually configured parameters for the node such as name, tier, mode
+ private ConcurrentMap<String, SwitchConfig> nodeConfigList;
private ConcurrentMap<Long, String> configSaveEvent;
- private ConcurrentMap<Node, Map<String, Property>> nodeProps; // properties are maintained in global container only
- private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps; // properties are maintained in global container only
+ private ConcurrentMap<Node, Map<String, Property>> nodeProps;
+ private ConcurrentMap<NodeConnector, Map<String, Property>> nodeConnectorProps;
private ConcurrentMap<Node, Map<String, NodeConnector>> nodeConnectorNames;
private IInventoryService inventoryService;
private final Set<ISwitchManagerAware> switchManagerAware = Collections
modeChange = true;
}
- try {
- String nodeId = cfgObject.getNodeId();
- Node node = Node.fromString(nodeId);
- Map<String, Property> propMapCurr = nodeProps.get(node);
- Map<String, Property> propMap = new HashMap<String, Property>();
- if (propMapCurr != null) {
- for (String s : propMapCurr.keySet()) {
- propMap.put(s, propMapCurr.get(s).clone());
- }
- }
- Property desc = new Description(cfgObject.getNodeDescription());
- propMap.put(desc.getName(), desc);
- Property tier = new Tier(Integer.parseInt(cfgObject.getTier()));
- propMap.put(tier.getName(), tier);
+ String nodeId = cfgObject.getNodeId();
+ Node node = Node.fromString(nodeId);
+ Map<String, Property> propMapCurr = nodeProps.get(node);
+ if (propMapCurr == null) {
+ return;
+ }
+ Map<String, Property> propMap = new HashMap<String, Property>();
+ for (String s : propMapCurr.keySet()) {
+ propMap.put(s, propMapCurr.get(s).clone());
+ }
+ Property desc = new Description(cfgObject.getNodeDescription());
+ propMap.put(desc.getName(), desc);
+ Property tier = new Tier(Integer.parseInt(cfgObject.getTier()));
+ propMap.put(tier.getName(), tier);
- if (propMapCurr == null) {
- if (nodeProps.putIfAbsent(node, propMap) != null) {
- // TODO rollback using Transactionality
- return;
- }
- } else {
- if (!nodeProps.replace(node, propMapCurr, propMap)) {
- // TODO rollback using Transactionality
- return;
- }
- }
+ if (!nodeProps.replace(node, propMapCurr, propMap)) {
+ // TODO rollback using Transactionality
+ return;
+ }
- log.info("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
+ log.info("Set Node {}'s Mode to {}", nodeId, cfgObject.getMode());
- if (modeChange) {
- notifyModeChange(node, cfgObject.isProactive());
- }
- } catch (Exception e) {
- log.debug("updateSwitchConfig: {}", e.getMessage());
+ if (modeChange) {
+ notifyModeChange(node, cfgObject.isProactive());
}
}
package org.opendaylight.controller.topologymanager;
import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
+
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.core.Node.NodeIDType;
-import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
import org.opendaylight.controller.sal.core.NodeConnector;
-import org.opendaylight.controller.sal.utils.GUIField;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
if (!isValidNodeConnector(srcNodeConnector) ||
!isValidNodeConnector(dstNodeConnector)) {
- logger.warn("Invalid NodeConnector");
+ logger.debug("Invalid NodeConnector in user link: {}", this);
return false;
}
package org.opendaylight.controller.topologymanager.internal;
-import org.apache.felix.dm.Component;
+import java.util.Dictionary;
+import java.util.HashSet;
+import java.util.Hashtable;
+import java.util.Set;
+import org.apache.felix.dm.Component;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.configuration.IConfigurationContainerAware;
import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
public void configureInstance(Component c, Object imp, String containerName) {
if (imp.equals(TopologyManagerImpl.class)) {
// export the service needed to listen to topology updates
+ Dictionary<String, Set<String>> props = new Hashtable<String, Set<String>>();
+ Set<String> propSet = new HashSet<String>();
+ propSet.add("topologymanager.configSaveEvent");
+ props.put("cachenames", propSet);
+
c.setInterface(new String[] { IListenTopoUpdates.class.getName(),
ITopologyManager.class.getName(),
- IConfigurationContainerAware.class.getName() }, null);
+ IConfigurationContainerAware.class.getName() }, props);
c.add(createContainerServiceDependency(containerName).setService(
ITopologyService.class).setCallbacks("setTopoService",
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.ArrayList;
-import java.util.Collections;
+import java.util.Date;
import java.util.Dictionary;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.felix.dm.Component;
public class TopologyManagerImpl implements ITopologyManager,
IConfigurationContainerAware, IListenTopoUpdates, IObjectReader,
CommandProvider {
- private static final Logger log = LoggerFactory
- .getLogger(TopologyManagerImpl.class);
- private ITopologyService topoService = null;
- private IClusterContainerServices clusterContainerService = null;
+ private static final Logger log = LoggerFactory.getLogger(TopologyManagerImpl.class);
+ private static final String SAVE = "Save";
+ private ITopologyService topoService;
+ private IClusterContainerServices clusterContainerService;
// DB of all the Edges with properties which constitute our topology
- private ConcurrentMap<Edge, Set<Property>> edgesDB = null;
+ private ConcurrentMap<Edge, Set<Property>> edgesDB;
// DB of all NodeConnector which are part of ISL Edges, meaning they
// are connected to another NodeConnector on the other side of an ISL link.
// NodeConnector of a Production Edge is not part of this DB.
- private ConcurrentMap<NodeConnector, Set<Property>> nodeConnectorsDB = null;
+ private ConcurrentMap<NodeConnector, Set<Property>> nodeConnectorsDB;
// DB of all the NodeConnectors with an Host attached to it
- private ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>> hostsDB = null;
+ private ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>> hostsDB;
// Topology Manager Aware listeners
- private Set<ITopologyManagerAware> topologyManagerAware = Collections
- .synchronizedSet(new HashSet<ITopologyManagerAware>());
+ private Set<ITopologyManagerAware> topologyManagerAware =
+ new CopyOnWriteArraySet<ITopologyManagerAware>();;
private static String ROOT = GlobalConstants.STARTUPHOME.toString();
- private String userLinksFileName = null;
- private ConcurrentMap<String, TopologyUserLinkConfig> userLinks;
+ private String userLinksFileName;
+ private ConcurrentMap<String, TopologyUserLinkConfig> userLinksDB;
+ private ConcurrentMap<Long, String> configSaveEvent;
+
void nonClusterObjectCreate() {
edgesDB = new ConcurrentHashMap<Edge, Set<Property>>();
hostsDB = new ConcurrentHashMap<NodeConnector, ImmutablePair<Host, Set<Property>>>();
- userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
nodeConnectorsDB = new ConcurrentHashMap<NodeConnector, Set<Property>>();
+ userLinksDB = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
+ configSaveEvent = new ConcurrentHashMap<Long, String>();
}
void setTopologyManagerAware(ITopologyManagerAware s) {
*
*/
void init(Component c) {
+
+ allocateCaches();
+ retrieveCaches();
+
String containerName = null;
- Dictionary props = c.getServiceProperties();
+ Dictionary<?, ?> props = c.getServiceProperties();
if (props != null) {
containerName = (String) props.get("containerName");
} else {
containerName = "UNKNOWN";
}
- if (this.clusterContainerService == null) {
- log.error("Cluster Services is null, not expected!");
- return;
- }
+ userLinksFileName = ROOT + "userTopology_" + containerName + ".conf";
+ registerWithOSGIConsole();
+ loadConfiguration();
+ }
- if (this.topoService == null) {
- log.error("Topology Services is null, not expected!");
+ @SuppressWarnings({ "unchecked", "deprecation" })
+ private void allocateCaches(){
+ if (this.clusterContainerService == null) {
+ nonClusterObjectCreate();
+ log.error("Cluster Services unavailable, allocated non-cluster caches!");
return;
}
try {
- this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService
- .createCache("topologymanager.edgesDB", EnumSet
- .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService.createCache(
+ "topologymanager.edgesDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
} catch (CacheExistException cee) {
- log.error("topologymanager.edgesDB Cache already exists - "
- + "destroy and recreate if needed");
+ log.debug("topologymanager.edgesDB Cache already exists - destroy and recreate if needed");
} catch (CacheConfigException cce) {
- log.error("topologymanager.edgesDB Cache configuration invalid - "
- + "check cache mode");
+ log.error("topologymanager.edgesDB Cache configuration invalid - check cache mode");
}
try {
this.hostsDB = (ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>>) this.clusterContainerService
- .createCache("topologymanager.hostsDB", EnumSet
- .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ .createCache("topologymanager.hostsDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
} catch (CacheExistException cee) {
- log.error("topologymanager.hostsDB Cache already exists - "
- + "destroy and recreate if needed");
+ log.debug("topologymanager.hostsDB Cache already exists - destroy and recreate if needed");
} catch (CacheConfigException cce) {
- log.error("topologymanager.hostsDB Cache configuration invalid - "
- + "check cache mode");
+ log.error("topologymanager.hostsDB Cache configuration invalid - check cache mode");
}
try {
this.nodeConnectorsDB = (ConcurrentMap<NodeConnector, Set<Property>>) this.clusterContainerService
- .createCache("topologymanager.nodeConnectorDB", EnumSet
- .of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ .createCache("topologymanager.nodeConnectorDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
} catch (CacheExistException cee) {
- log.error("topologymanager.nodeConnectorDB Cache already exists"
- + " - destroy and recreate if needed");
+ log.debug("topologymanager.nodeConnectorDB Cache already exists - destroy and recreate if needed");
} catch (CacheConfigException cce) {
- log.error("topologymanager.nodeConnectorDB Cache configuration "
- + "invalid - check cache mode");
+ log.error("topologymanager.nodeConnectorDB Cache configuration invalid - check cache mode");
}
- userLinks = new ConcurrentHashMap<String, TopologyUserLinkConfig>();
+ try {
+ this.userLinksDB = (ConcurrentMap<String, TopologyUserLinkConfig>) this.clusterContainerService
+ .createCache("topologymanager.userLinksDB", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheExistException cee) {
+ log.debug("topologymanager.userLinksDB Cache already exists - destroy and recreate if needed");
+ } catch (CacheConfigException cce) {
+ log.error("topologymanager.userLinksDB Cache configuration invalid - check cache mode");
+ }
+
+ try {
+ this.configSaveEvent = (ConcurrentMap<Long, String>) this.clusterContainerService
+ .createCache("topologymanager.configSaveEvent", EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheExistException cee) {
+ log.debug("topologymanager.configSaveEvent Cache already exists - destroy and recreate if needed");
+ } catch (CacheConfigException cce) {
+ log.error("topologymanager.configSaveEvent Cache configuration invalid - check cache mode");
+ }
+
+ }
+
+ @SuppressWarnings({ "unchecked", "deprecation" })
+ private void retrieveCaches() {
+ if (this.clusterContainerService == null) {
+ log.error("Cluster Services is null, can't retrieve caches.");
+ return;
+ }
+
+ this.edgesDB = (ConcurrentMap<Edge, Set<Property>>) this.clusterContainerService
+ .getCache("topologymanager.edgesDB");
+ if (edgesDB == null) {
+ log.error("Failed to get cache for topologymanager.edgesDB");
+ }
+
+ this.hostsDB = (ConcurrentMap<NodeConnector, ImmutablePair<Host, Set<Property>>>) this.clusterContainerService
+ .getCache("topologymanager.hostsDB");
+ if (hostsDB == null) {
+ log.error("Failed to get cache for topologymanager.hostsDB");
+ }
+
+ this.nodeConnectorsDB = (ConcurrentMap<NodeConnector, Set<Property>>) this.clusterContainerService
+ .getCache("topologymanager.nodeConnectorDB");
+ if (nodeConnectorsDB == null) {
+ log.error("Failed to get cache for topologymanager.nodeConnectorDB");
+ }
+
+ this.userLinksDB = (ConcurrentMap<String, TopologyUserLinkConfig>) this.clusterContainerService
+ .getCache("topologymanager.userLinksDB");
+ if (userLinksDB == null) {
+ log.error("Failed to get cache for topologymanager.userLinksDB");
+ }
+
+ this.configSaveEvent = (ConcurrentMap<Long, String>) this.clusterContainerService
+ .getCache("topologymanager.configSaveEvent");
+ if (configSaveEvent == null) {
+ log.error("Failed to get cache for topologymanager.configSaveEvent");
+ }
- userLinksFileName = ROOT + "userTopology_" + containerName + ".conf";
- registerWithOSGIConsole();
- loadConfiguration();
}
/**
*
*/
void destroy() {
- if (this.clusterContainerService == null) {
- log.error("Cluster Services is null, not expected!");
- this.edgesDB = null;
- this.hostsDB = null;
- this.nodeConnectorsDB = null;
- return;
- }
- this.clusterContainerService.destroyCache("topologymanager.edgesDB");
- this.edgesDB = null;
- this.clusterContainerService.destroyCache("topologymanager.hostsDB");
- this.hostsDB = null;
- this.clusterContainerService
- .destroyCache("topologymanager.nodeConnectorDB");
- this.nodeConnectorsDB = null;
- log.debug("Topology Manager DB Deallocated");
}
@SuppressWarnings("unchecked")
private void loadConfiguration() {
ObjectReader objReader = new ObjectReader();
- ConcurrentMap<String, TopologyUserLinkConfig> confList = (ConcurrentMap<String, TopologyUserLinkConfig>) objReader
- .read(this, userLinksFileName);
-
- if (confList == null) {
- return;
- }
+ ConcurrentMap<String, TopologyUserLinkConfig> confList =
+ (ConcurrentMap<String, TopologyUserLinkConfig>) objReader.read(this, userLinksFileName);
- for (TopologyUserLinkConfig conf : confList.values()) {
- addUserLink(conf);
+ if (confList != null) {
+ for (TopologyUserLinkConfig conf : confList.values()) {
+ addUserLink(conf);
+ }
}
}
@Override
public Status saveConfig() {
- // Publish the save config event to the cluster nodes
- /**
- * Get the CLUSTERING SERVICES WORKING BEFORE TRYING THIS
- *
- * configSaveEvent.put(new Date().getTime(), SAVE);
- */
+ // Publish the save config event to the cluster
+ configSaveEvent.put(new Date().getTime(), SAVE );
return saveConfigInternal();
}
public Status saveConfigInternal() {
- Status retS;
ObjectWriter objWriter = new ObjectWriter();
- retS = objWriter
- .write(new ConcurrentHashMap<String, TopologyUserLinkConfig>(
- userLinks), userLinksFileName);
+ Status saveStatus = objWriter.write(
+ new ConcurrentHashMap<String, TopologyUserLinkConfig>(userLinksDB), userLinksFileName);
- if (retS.isSuccess()) {
- return retS;
- } else {
- return new Status(StatusCode.INTERNALERROR, "Save failed");
+ if (! saveStatus.isSuccess()) {
+ return new Status(StatusCode.INTERNALERROR, "Topology save failed: " + saveStatus.getDescription());
}
+ return saveStatus;
}
@Override
return null;
}
- HashMap<Node, Set<Edge>> res = new HashMap<Node, Set<Edge>>();
- for (Edge key : this.edgesDB.keySet()) {
+ Map<Node, Set<Edge>> res = new HashMap<Node, Set<Edge>>();
+ for (Edge edge : this.edgesDB.keySet()) {
// Lets analyze the tail
- Node node = key.getTailNodeConnector().getNode();
+ Node node = edge.getTailNodeConnector().getNode();
Set<Edge> nodeEdges = res.get(node);
if (nodeEdges == null) {
nodeEdges = new HashSet<Edge>();
+ res.put(node, nodeEdges);
}
- nodeEdges.add(key);
- // We need to re-add to the MAP even if the element was
- // already there so in case of clustered services the map
- // gets updated in the cluster
- res.put(node, nodeEdges);
+ nodeEdges.add(edge);
// Lets analyze the head
- node = key.getHeadNodeConnector().getNode();
+ node = edge.getHeadNodeConnector().getNode();
nodeEdges = res.get(node);
if (nodeEdges == null) {
nodeEdges = new HashSet<Edge>();
+ res.put(node, nodeEdges);
}
- nodeEdges.add(key);
- // We need to re-add to the MAP even if the element was
- // already there so in case of clustered services the map
- // gets updated in the cluster
- res.put(node, nodeEdges);
+ nodeEdges.add(edge);
}
return res;
* @return true if it is a production link
*/
public boolean isProductionLink(Edge e) {
- return (e.getHeadNodeConnector().getType()
- .equals(NodeConnector.NodeConnectorIDType.PRODUCTION) || e
- .getTailNodeConnector().getType()
- .equals(NodeConnector.NodeConnectorIDType.PRODUCTION));
+ return (e.getHeadNodeConnector().getType().equals(NodeConnector.NodeConnectorIDType.PRODUCTION)
+ || e.getTailNodeConnector().getType().equals(NodeConnector.NodeConnectorIDType.PRODUCTION));
}
/**
return null;
}
- HashMap<Edge, Set<Property>> res = new HashMap<Edge, Set<Property>>();
- for (Edge key : this.edgesDB.keySet()) {
+ Map<Edge, Set<Property>> edgeMap = new HashMap<Edge, Set<Property>>();
+ Set<Property> props;
+ for (Map.Entry<Edge, Set<Property>> edgeEntry : edgesDB.entrySet()) {
// Sets of props are copied because the composition of
// those properties could change with time
- HashSet<Property> prop = new HashSet<Property>(
- this.edgesDB.get(key));
+ props = new HashSet<Property>(edgeEntry.getValue());
// We can simply reuse the key because the object is
// immutable so doesn't really matter that we are
// referencing the only owned by a different table, the
// meaning is the same because doesn't change with time.
- res.put(key, prop);
+ edgeMap.put(edgeEntry.getKey(), props);
}
- return res;
- }
-
- // TODO remove with spring-dm removal
- /**
- * @param set
- * the topologyAware to set
- */
- public void setTopologyAware(Set<Object> set) {
- for (Object s : set) {
- setTopologyManagerAware((ITopologyManagerAware) s);
- }
+ return edgeMap;
}
@Override
return null;
}
- return (this.hostsDB.keySet());
+ return (new HashSet<NodeConnector>(this.hostsDB.keySet()));
}
@Override
return null;
}
HashMap<Node, Set<NodeConnector>> res = new HashMap<Node, Set<NodeConnector>>();
-
- for (NodeConnector p : this.hostsDB.keySet()) {
- Node n = p.getNode();
- Set<NodeConnector> pSet = res.get(n);
- if (pSet == null) {
+ Node node;
+ Set<NodeConnector> portSet;
+ for (NodeConnector nc : this.hostsDB.keySet()) {
+ node = nc.getNode();
+ portSet = res.get(node);
+ if (portSet == null) {
// Create the HashSet if null
- pSet = new HashSet<NodeConnector>();
- res.put(n, pSet);
+ portSet = new HashSet<NodeConnector>();
+ res.put(node, portSet);
}
// Keep updating the HashSet, given this is not a
// clustered map we can just update the set without
// worrying to update the hashmap.
- pSet.add(p);
+ portSet.add(nc);
}
return (res);
}
@Override
- public Host getHostAttachedToNodeConnector(NodeConnector p) {
- if (this.hostsDB == null) {
+ public Host getHostAttachedToNodeConnector(NodeConnector port) {
+ ImmutablePair<Host, Set<Property>> host;
+ if (this.hostsDB == null || (host = this.hostsDB.get(port)) == null) {
return null;
}
- if (this.hostsDB.get(p) == null)
- return null;
-
- return (this.hostsDB.get(p).getLeft());
+ return host.getLeft();
}
@Override
- public void updateHostLink(NodeConnector p, Host h, UpdateType t,
- Set<Property> props) {
- if (this.hostsDB == null) {
- return;
+ public void updateHostLink(NodeConnector port, Host h, UpdateType t, Set<Property> props) {
+
+ // Clone the property set in case non null else just
+ // create an empty one. Caches allocated via infinispan
+ // don't allow null values
+ if (props == null) {
+ props = new HashSet<Property>();
+ } else {
+ props = new HashSet<Property>(props);
}
+ ImmutablePair<Host, Set<Property>> thisHost = new ImmutablePair<Host, Set<Property>>(h, props);
switch (t) {
case ADDED:
case CHANGED:
- // Clone the property set in case non null else just
- // create an empty one. Caches allocated via infinispan
- // don't allow null values
- if (props == null) {
- props = new HashSet<Property>();
- } else {
- props = new HashSet<Property>(props);
- }
-
- this.hostsDB.put(p, new ImmutablePair(h, props));
+ this.hostsDB.put(port, thisHost);
break;
case REMOVED:
- this.hostsDB.remove(p);
+ //remove only if hasn't been concurrently modified
+ this.hostsDB.remove(port, thisHost);
break;
}
}
- private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type,
- Set<Property> props) {
+ private TopoEdgeUpdate edgeUpdate(Edge e, UpdateType type, Set<Property> props) {
switch (type) {
case ADDED:
// Make sure the props are non-null
if (props == null) {
- props = (Set<Property>) new HashSet();
+ props = new HashSet<Property>();
} else {
- // Copy the set so noone is going to change the content
- props = (Set<Property>) new HashSet(props);
+ props = new HashSet<Property>(props);
+ }
+
+ //in case of node switch-over to a different cluster controller,
+ //let's retain edge props
+ Set<Property> currentProps = this.edgesDB.get(e);
+ if (currentProps != null){
+ props.addAll(currentProps);
}
// Now make sure there is the creation timestamp for the
- // edge, if not there timestamp with the first update
+ // edge, if not there, stamp with the first update
boolean found_create = false;
for (Property prop : props) {
if (prop instanceof TimeStamp) {
TimeStamp t = (TimeStamp) prop;
if (t.getTimeStampName().equals("creation")) {
found_create = true;
+ break;
}
}
}
if (!found_create) {
- TimeStamp t = new TimeStamp(System.currentTimeMillis(),
- "creation");
+ TimeStamp t = new TimeStamp(System.currentTimeMillis(), "creation");
props.add(t);
}
// for now.
// The DB only contains ISL ports
if (isISLink(e)) {
- this.nodeConnectorsDB.put(e.getHeadNodeConnector(),
- new HashSet<Property>());
- this.nodeConnectorsDB.put(e.getTailNodeConnector(),
- new HashSet<Property>());
+ this.nodeConnectorsDB.put(e.getHeadNodeConnector(), new HashSet<Property>(1));
+ this.nodeConnectorsDB.put(e.getTailNodeConnector(), new HashSet<Property>(1));
}
log.trace("Edge {} {}", e.toString(), type.name());
break;
log.trace("Edge {} {}", e.toString(), type.name());
break;
case CHANGED:
- Set<Property> old_props = this.edgesDB.get(e);
+ Set<Property> oldProps = this.edgesDB.get(e);
// When property changes lets make sure we can change it
// all except the creation time stamp because that should
// be changed only when the edge is destroyed and created
// again
- TimeStamp tc = null;
- for (Property prop : old_props) {
+ TimeStamp timeStamp = null;
+ for (Property prop : oldProps) {
if (prop instanceof TimeStamp) {
- TimeStamp t = (TimeStamp) prop;
- if (t.getTimeStampName().equals("creation")) {
- tc = t;
+ TimeStamp tsProp = (TimeStamp) prop;
+ if (tsProp.getTimeStampName().equals("creation")) {
+ timeStamp = tsProp;
+ break;
}
}
}
// Now lets make sure new properties are non-null
- // Make sure the props are non-null
if (props == null) {
- props = (Set<Property>) new HashSet();
+ props = new HashSet<Property>();
} else {
// Copy the set so noone is going to change the content
- props = (Set<Property>) new HashSet(props);
+ props = new HashSet<Property>(props);
}
// Now lets remove the creation property if exist in the
TimeStamp t = (TimeStamp) prop;
if (t.getTimeStampName().equals("creation")) {
i.remove();
+ break;
}
}
}
// Now lets add the creation timestamp in it
- if (tc != null) {
- props.add(tc);
+ if (timeStamp != null) {
+ props.add(timeStamp);
}
// Finally update
try {
s.edgeUpdate(teuList);
} catch (Exception exc) {
- log.error("Exception on callback", exc);
+ log.error("Exception on edge update:", exc);
}
}
private Edge getLinkTuple(TopologyUserLinkConfig link) {
- Edge linkTuple = null;
NodeConnector srcNodeConnector = NodeConnector.fromString(link.getSrcNodeConnector());
NodeConnector dstNodeConnector = NodeConnector.fromString(link.getDstNodeConnector());
- if (srcNodeConnector == null || dstNodeConnector == null) return null;
try {
- linkTuple = new Edge(srcNodeConnector, dstNodeConnector);
+ return new Edge(srcNodeConnector, dstNodeConnector);
} catch (Exception e) {
+ return null;
}
- return linkTuple;
}
@Override
public ConcurrentMap<String, TopologyUserLinkConfig> getUserLinks() {
- return userLinks;
+ return new ConcurrentHashMap<String, TopologyUserLinkConfig>(userLinksDB);
}
@Override
- public Status addUserLink(TopologyUserLinkConfig link) {
- if (!link.isValid()) {
+ public Status addUserLink(TopologyUserLinkConfig userLink) {
+ if (!userLink.isValid()) {
return new Status(StatusCode.BADREQUEST,
- "Configuration Invalid. Please check the parameters");
+ "User link configuration invalid.");
}
- if (userLinks.get(link.getName()) != null) {
- return new Status(StatusCode.CONFLICT, "Link with name : "
- + link.getName()
- + " already exists. Please use another name");
+ userLink.setStatus(TopologyUserLinkConfig.STATUS.LINKDOWN);
+
+ //Check if this link already configured
+ //NOTE: infinispan cache doesn't support Map.containsValue()
+ // (which is linear time in most ConcurrentMap impl anyway)
+ for (TopologyUserLinkConfig existingLink : userLinksDB.values()) {
+ if (existingLink.equals(userLink)) {
+ return new Status(StatusCode.CONFLICT, "Link configuration exists");
+ }
}
- if (userLinks.containsValue(link)) {
- return new Status(StatusCode.CONFLICT, "Link configuration exists");
+ //attempt put, if mapping for this key already existed return conflict
+ if (userLinksDB.putIfAbsent(userLink.getName(), userLink) != null) {
+ return new Status(StatusCode.CONFLICT, "Link with name : " + userLink.getName()
+ + " already exists. Please use another name");
}
- link.setStatus(TopologyUserLinkConfig.STATUS.LINKDOWN);
- userLinks.put(link.getName(), link);
-
- Edge linkTuple = getLinkTuple(link);
+ Edge linkTuple = getLinkTuple(userLink);
if (linkTuple != null) {
if (!isProductionLink(linkTuple)) {
edgeUpdate(linkTuple, UpdateType.ADDED, new HashSet<Property>());
}
- linkTuple = getReverseLinkTuple(link);
+ linkTuple = getReverseLinkTuple(userLink);
if (linkTuple != null) {
- link.setStatus(TopologyUserLinkConfig.STATUS.SUCCESS);
+ userLink.setStatus(TopologyUserLinkConfig.STATUS.SUCCESS);
if (!isProductionLink(linkTuple)) {
edgeUpdate(linkTuple, UpdateType.ADDED, new HashSet<Property>());
}
}
}
- return new Status(StatusCode.SUCCESS, null);
+ return new Status(StatusCode.SUCCESS);
}
@Override
public Status deleteUserLink(String linkName) {
if (linkName == null) {
- return new Status(StatusCode.BADREQUEST,
- "A valid linkName is required to Delete a link");
+ return new Status(StatusCode.BADREQUEST, "User link name cannot be null.");
}
- TopologyUserLinkConfig link = userLinks.get(linkName);
-
- Edge linkTuple = getLinkTuple(link);
- userLinks.remove(linkName);
- if (linkTuple != null) {
- if (!isProductionLink(linkTuple)) {
+ TopologyUserLinkConfig link = userLinksDB.remove(linkName);
+ Edge linkTuple;
+ if (link != null && (linkTuple = getLinkTuple(link)) != null) {
+ if (! isProductionLink(linkTuple)) {
edgeUpdate(linkTuple, UpdateType.REMOVED, null);
}
linkTuple = getReverseLinkTuple(link);
- if ((linkTuple != null) && !isProductionLink(linkTuple)) {
+ if (! isProductionLink(linkTuple)) {
edgeUpdate(linkTuple, UpdateType.REMOVED, null);
}
}
- return new Status(StatusCode.SUCCESS, null);
+ return new Status(StatusCode.SUCCESS);
}
private void registerWithOSGIConsole() {
}
public void _printUserLink(CommandInterpreter ci) {
- for (String name : this.userLinks.keySet()) {
- TopologyUserLinkConfig linkConfig = userLinks.get(name);
+ for (String name : this.userLinksDB.keySet()) {
+ TopologyUserLinkConfig linkConfig = userLinksDB.get(name);
ci.println("Name : " + name);
ci.println(linkConfig);
ci.println("Edge " + getLinkTuple(linkConfig));
@Override
public Object readObject(ObjectInputStream ois)
throws FileNotFoundException, IOException, ClassNotFoundException {
- // TODO Auto-generated method stub
return ois.readObject();
}