<Include-Resource>
</Include-Resource>
<Export-Package>
+ org.opendaylight.controller.forwardingrulesmanager.implementation.data
</Export-Package>
<Import-Package>
org.opendaylight.controller.clustering.services,
org.opendaylight.controller.sal.utils,
org.opendaylight.controller.sal.packet,
org.opendaylight.controller.forwardingrulesmanager,
+ org.opendaylight.controller.connectionmanager,
javax.xml.bind.annotation,
javax.xml.bind,
org.apache.felix.dm,
<version>4.8.1</version>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>connectionmanager</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </dependency>
</dependencies>
</project>
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * Class used by the FRM to distribute the forwarding rules programming in the
+ * cluster and to collect back the results of the programming
+ */
+package org.opendaylight.controller.forwardingrulesmanager.implementation.data;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+
+import org.opendaylight.controller.forwardingrulesmanager.FlowEntryInstall;
+import org.opendaylight.controller.sal.core.UpdateType;
+
+/**
+ * Class used by the FRM to distribute the forwarding rules programming in the
+ * cluster and to collect back the results of the programming
+ */
+public final class FlowEntryDistributionOrder implements Serializable {
+ /**
+ * Serialization UID
+ */
+ private static final long serialVersionUID = 416280377113255147L;
+ final private FlowEntryInstall entry;
+ final private UpdateType upType;
+ final private InetAddress requestorController;
+
+ /**
+ * @return the entry
+ */
+ public FlowEntryInstall getEntry() {
+ return entry;
+ }
+
+ /**
+ * @return the upType
+ */
+ public UpdateType getUpType() {
+ return upType;
+ }
+
+ /**
+ * @return the requestorController
+ */
+ public InetAddress getRequestorController() {
+ return requestorController;
+ }
+
+ /**
+ * @param entry
+ * FlowEntryInstall key value
+ * @param upType
+ * UpdateType key value
+ * @param requestorController
+ * identifier of the controller that initiated the request
+ */
+
+ public FlowEntryDistributionOrder(FlowEntryInstall entry, UpdateType upType, InetAddress requestorController) {
+ super();
+ this.entry = entry;
+ this.upType = upType;
+ this.requestorController = requestorController;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = (prime * result) + ((entry == null) ? 0 : entry.hashCode());
+ result = (prime * result) + ((requestorController == null) ? 0 : requestorController.hashCode());
+ result = (prime * result) + ((upType == null) ? 0 : upType.hashCode());
+ return result;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (!(obj instanceof FlowEntryDistributionOrder)) {
+ return false;
+ }
+ FlowEntryDistributionOrder other = (FlowEntryDistributionOrder) obj;
+ if (entry == null) {
+ if (other.entry != null) {
+ return false;
+ }
+ } else if (!entry.equals(other.entry)) {
+ return false;
+ }
+ if (requestorController == null) {
+ if (other.requestorController != null) {
+ return false;
+ }
+ } else if (!requestorController.equals(other.requestorController)) {
+ return false;
+ }
+ if (upType != other.upType) {
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("FlowEntryDistributionOrder [");
+ if (entry != null) {
+ builder.append("entry=")
+ .append(entry)
+ .append(", ");
+ }
+ if (upType != null) {
+ builder.append("upType=")
+ .append(upType)
+ .append(", ");
+ }
+ if (requestorController != null) {
+ builder.append("requestorController=")
+ .append(requestorController);
+ }
+ builder.append("]");
+ return builder.toString();
+ }
+}
package org.opendaylight.controller.forwardingrulesmanager.internal;
+import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import java.util.Dictionary;
import java.util.HashSet;
import java.util.Hashtable;
import org.slf4j.LoggerFactory;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.connectionmanager.IConnectionManager;
public class Activator extends ComponentActivatorAbstractBase {
protected static final Logger logger = LoggerFactory.getLogger(Activator.class);
public void configureInstance(Component c, Object imp, String containerName) {
if (imp.equals(ForwardingRulesManager.class)) {
String interfaces[] = null;
+ Dictionary<String, Object> props = new Hashtable<String, Object>();
+ Set<String> propSet = new HashSet<String>();
+ propSet.add(ForwardingRulesManager.WORKSTATUSCACHE);
+ propSet.add(ForwardingRulesManager.WORKORDERCACHE);
+ props.put("cachenames", propSet);
// export the service
interfaces = new String[] { IContainerListener.class.getName(), ISwitchManagerAware.class.getName(),
IForwardingRulesManager.class.getName(), IInventoryListener.class.getName(),
- IConfigurationContainerAware.class.getName(),
+ IConfigurationContainerAware.class.getName(), ICacheUpdateAware.class.getName(),
IFlowProgrammerListener.class.getName() };
- c.setInterface(interfaces, null);
+ c.setInterface(interfaces, props);
c.add(createContainerServiceDependency(containerName).setService(IFlowProgrammerService.class)
.setCallbacks("setFlowProgrammerService", "unsetFlowProgrammerService").setRequired(true));
.setCallbacks("setFrmAware", "unsetFrmAware").setRequired(false));
c.add(createContainerServiceDependency(containerName).setService(IContainer.class)
.setCallbacks("setIContainer", "unsetIContainer").setRequired(true));
+ c.add(createServiceDependency().setService(IConnectionManager.class)
+ .setCallbacks("setIConnectionManager", "unsetIConnectionManager")
+ .setRequired(true));
}
}
}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * Class which will monitor the completion of a FlowEntryDistributionOrder it
+ * implements a Future interface so it can be inspected by who is waiting for
+ * it.
+ */
+package org.opendaylight.controller.forwardingrulesmanager.internal;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.opendaylight.controller.forwardingrulesmanager.implementation.data.FlowEntryDistributionOrder;
+import org.opendaylight.controller.sal.utils.Status;
+import org.opendaylight.controller.sal.utils.StatusCode;
+
+/**
+ * Class which will monitor the completion of a FlowEntryDistributionOrder it
+ * implements a Future interface so it can be inspected by who is waiting for
+ * it.
+ */
+final class FlowEntryDistributionOrderFutureTask implements Future<Status> {
+ private final FlowEntryDistributionOrder order;
+ private boolean amICancelled;
+ private CountDownLatch waitingLatch;
+ private Status retStatus;
+
+ /**
+ * @param order
+ * for which we are monitoring the execution
+ */
+ FlowEntryDistributionOrderFutureTask(FlowEntryDistributionOrder order) {
+ // Order being monitored
+ this.order = order;
+ this.amICancelled = false;
+ // We need to wait for one completion to happen
+ this.waitingLatch = new CountDownLatch(1);
+ // No return status yet!
+ this.retStatus = new Status(StatusCode.UNDEFINED);
+ }
+
+ @Override
+ public boolean cancel(boolean mayInterruptIfRunning) {
+ return false;
+ }
+
+ @Override
+ public Status get() throws InterruptedException, ExecutionException {
+ // If i'm done lets return the status as many times as caller wants
+ if (this.waitingLatch.getCount() == 0L) {
+ return retStatus;
+ }
+
+ // Wait till someone signal that we are done
+ this.waitingLatch.await();
+
+ // Return the known status
+ return retStatus;
+ }
+
+ @Override
+ public Status get(long timeout, TimeUnit unit) throws InterruptedException,
+ ExecutionException, TimeoutException {
+ // If i'm done lets return the status as many times as caller wants
+ if (this.waitingLatch.getCount() == 0L) {
+ return retStatus;
+ }
+
+ // Wait till someone signal that we are done
+ this.waitingLatch.await(timeout, unit);
+
+ // Return the known status, could also be null if didn't return
+ return retStatus;
+ }
+
+ @Override
+ public boolean isCancelled() {
+ return this.amICancelled;
+ }
+
+ @Override
+ public boolean isDone() {
+ return (this.waitingLatch.getCount() == 0L);
+ }
+
+ /**
+ * Used by the thread that gets back the status for the order so can unblock
+ * an eventual caller waiting on the result to comes back
+ *
+ * @param order
+ * @param retStatus
+ */
+ void gotStatus(FlowEntryDistributionOrder order, Status retStatus) {
+ if (order != this.order) {
+ // Weird we got a call for an order we didn't make
+ return;
+ }
+ this.retStatus = retStatus;
+ // Now we are not waiting any longer
+ this.waitingLatch.countDown();
+ }
+}
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import org.eclipse.osgi.framework.console.CommandInterpreter;
import org.eclipse.osgi.framework.console.CommandProvider;
import org.opendaylight.controller.clustering.services.CacheConfigException;
import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.ICacheUpdateAware;
import org.opendaylight.controller.clustering.services.IClusterContainerServices;
import org.opendaylight.controller.clustering.services.IClusterServices;
import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.connectionmanager.IConnectionManager;
import org.opendaylight.controller.forwardingrulesmanager.FlowConfig;
import org.opendaylight.controller.forwardingrulesmanager.FlowEntry;
import org.opendaylight.controller.forwardingrulesmanager.FlowEntryInstall;
import org.opendaylight.controller.forwardingrulesmanager.PortGroupChangeListener;
import org.opendaylight.controller.forwardingrulesmanager.PortGroupConfig;
import org.opendaylight.controller.forwardingrulesmanager.PortGroupProvider;
+import org.opendaylight.controller.forwardingrulesmanager.implementation.data.FlowEntryDistributionOrder;
import org.opendaylight.controller.sal.action.Action;
import org.opendaylight.controller.sal.action.ActionType;
import org.opendaylight.controller.sal.action.Controller;
* the network. It also maintains the central repository of all the forwarding
* rules installed on the network nodes.
*/
-public class ForwardingRulesManager implements IForwardingRulesManager, PortGroupChangeListener,
- IContainerListener, ISwitchManagerAware, IConfigurationContainerAware, IInventoryListener, IObjectReader,
- CommandProvider, IFlowProgrammerListener {
+public class ForwardingRulesManager implements
+ IForwardingRulesManager,
+ PortGroupChangeListener,
+ IContainerListener,
+ ISwitchManagerAware,
+ IConfigurationContainerAware,
+ IInventoryListener,
+ IObjectReader,
+ ICacheUpdateAware,
+ CommandProvider,
+ IFlowProgrammerListener {
private static final String NODEDOWN = "Node is Down";
private static final String SUCCESS = StatusCode.SUCCESS.toString();
private static final Logger log = LoggerFactory.getLogger(ForwardingRulesManager.class);
private static final String PORTREMOVED = "Port removed";
+ private static final Logger logsync = LoggerFactory.getLogger("FRMsync");
private String frmFileName;
private String portGroupFileName;
private ConcurrentMap<Integer, FlowConfig> staticFlows;
private Thread frmEventHandler;
protected BlockingQueue<FRMEvent> pendingEvents;
+ // Distributes FRM programming in the cluster
+ private IConnectionManager connectionManager;
+
+ /*
+ * Name clustered caches used to support FRM entry distribution these are by
+ * necessity non-transactional as long as need to be able to synchronize
+ * states also while a transaction is in progress
+ */
+ static final String WORKORDERCACHE = "frm.workOrder";
+ static final String WORKSTATUSCACHE = "frm.workStatus";
+
+ /*
+ * Data structure responsible for distributing the FlowEntryInstall requests
+ * in the cluster. The key value is entry that is being either Installed or
+ * Updated or Delete. The value field is the same of the key value in case
+ * of Installation or Deletion, it's the new entry in case of Modification,
+ * this because the clustering caches don't allow null values.
+ *
+ * The logic behind this data structure is that the controller that initiate
+ * the request will place the order here, someone will pick it and then will
+ * remove from this data structure because is being served.
+ *
+ * TODO: We need to have a way to cleanup this data structure if entries are
+ * not picked by anyone, which is always a case can happen especially on
+ * Node disconnect cases.
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, FlowEntryInstall> workOrder;
+
+ /*
+ * Data structure responsible for retrieving the results of the workOrder
+ * submitted to the cluster.
+ *
+ * The logic behind this data structure is that the controller that has
+ * executed the order will then place the result in workStatus signaling
+ * that there was a success or a failure.
+ *
+ * TODO: The workStatus entries need to have a lifetime associated in case
+ * of requestor controller leaving the cluster.
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, Status> workStatus;
+
+ /*
+ * Local Map used to hold the Future which a caller can use to monitor for
+ * completion
+ */
+ private ConcurrentMap<FlowEntryDistributionOrder, FlowEntryDistributionOrderFutureTask> workMonitor =
+ new ConcurrentHashMap<FlowEntryDistributionOrder, FlowEntryDistributionOrderFutureTask>();
+
+ /**
+ * @param e
+ * Entry being installed/updated/removed
+ * @param u
+ * New entry will be placed after the update operation. Valid
+ * only for UpdateType.CHANGED, null for all the other cases
+ * @param t
+ * Type of update
+ * @return a Future object for monitoring the progress of the result, or
+ * null in case the processing should take place locally
+ */
+ private Future<Status> distributeWorkOrder(FlowEntryInstall e, FlowEntryInstall u, UpdateType t) {
+ // A null entry it's an unexpected condition, anyway it's safe to keep
+ // the handling local
+ if (e == null) {
+ return null;
+ }
+
+ Node n = e.getNode();
+ if (!connectionManager.isLocal(n)) {
+ // Create the work order and distribute it
+ FlowEntryDistributionOrder fe =
+ new FlowEntryDistributionOrder(e, t, clusterContainerService.getMyAddress());
+ // First create the monitor job
+ FlowEntryDistributionOrderFutureTask ret = new FlowEntryDistributionOrderFutureTask(fe);
+ logsync.trace("Node {} not local so sending fe {}", n, fe);
+ workMonitor.put(fe, ret);
+ if (t.equals(UpdateType.CHANGED)) {
+ // Then distribute the work
+ workOrder.put(fe, u);
+ } else {
+ // Then distribute the work
+ workOrder.put(fe, e);
+ }
+ logsync.trace("WorkOrder requested");
+ // Now create an Handle to monitor the execution of the operation
+ return ret;
+ }
+
+ logsync.trace("LOCAL Node {} so processing Entry:{} UpdateType:{}", n, e, t);
+
+ return null;
+ }
+
/**
* Adds a flow entry onto the network node It runs various validity checks
* and derive the final container flows merged entries that will be
* contain the unique id assigned to this request
*/
private Status modifyEntryInternal(FlowEntryInstall currentEntries, FlowEntryInstall newEntries, boolean async) {
- // Modify the flow on the network node
- Status status = (async) ? programmer.modifyFlowAsync(currentEntries.getNode(), currentEntries.getInstall()
- .getFlow(), newEntries.getInstall().getFlow()) : programmer.modifyFlow(currentEntries.getNode(),
- currentEntries.getInstall().getFlow(), newEntries.getInstall().getFlow());
+ Future<Status> futureStatus = distributeWorkOrder(currentEntries, newEntries, UpdateType.CHANGED);
+ if (futureStatus != null) {
+ Status retStatus = new Status(StatusCode.UNDEFINED);
+ try {
+ retStatus = futureStatus.get();
+ } catch (InterruptedException e) {
+ log.error("", e);
+ } catch (ExecutionException e) {
+ log.error("", e);
+ }
+ return retStatus;
+ } else {
+ // Modify the flow on the network node
+ Status status = async ? programmer.modifyFlowAsync(currentEntries.getNode(), currentEntries.getInstall()
+ .getFlow(), newEntries.getInstall()
+ .getFlow()) : programmer.modifyFlow(currentEntries.getNode(), currentEntries.getInstall()
+ .getFlow(), newEntries.getInstall()
+ .getFlow());
- if (!status.isSuccess()) {
- log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", newEntries.getInstall(),
- status.getDescription());
- return status;
- }
+ if (!status.isSuccess()) {
+ log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", newEntries.getInstall(),
+ status.getDescription());
+ return status;
+ }
- log.trace("Modified {} => {}", currentEntries.getInstall(), newEntries.getInstall());
+ log.trace("Modified {} => {}", currentEntries.getInstall(), newEntries.getInstall());
- // Update DB
- newEntries.setRequestId(status.getRequestId());
- updateLocalDatabase(currentEntries, false);
- updateLocalDatabase(newEntries, true);
+ // Update DB
+ newEntries.setRequestId(status.getRequestId());
+ updateLocalDatabase(currentEntries, false);
+ updateLocalDatabase(newEntries, true);
- return status;
+ return status;
+ }
}
/**
* contain the unique id assigned to this request
*/
private Status removeEntryInternal(FlowEntryInstall entry, boolean async) {
- // Mark the entry to be deleted (for CC just in case we fail)
- entry.toBeDeleted();
+ Future<Status> futureStatus = distributeWorkOrder(entry, null, UpdateType.REMOVED);
+ if (futureStatus != null) {
+ Status retStatus = new Status(StatusCode.UNDEFINED);
+ try {
+ retStatus = futureStatus.get();
+ } catch (InterruptedException e) {
+ log.error("", e);
+ } catch (ExecutionException e) {
+ log.error("", e);
+ }
+ return retStatus;
+ } else {
+ // Mark the entry to be deleted (for CC just in case we fail)
+ entry.toBeDeleted();
- // Remove from node
- Status status = (async) ? programmer.removeFlowAsync(entry.getNode(), entry.getInstall().getFlow())
- : programmer.removeFlow(entry.getNode(), entry.getInstall().getFlow());
+ // Remove from node
+ Status status = async ? programmer.removeFlowAsync(entry.getNode(), entry.getInstall()
+ .getFlow()) : programmer.removeFlow(entry.getNode(), entry.getInstall()
+ .getFlow());
- if (!status.isSuccess()) {
- log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", entry.getInstall(),
- status.getDescription());
- return status;
- }
- log.trace("Removed {}", entry.getInstall());
+ if (!status.isSuccess()) {
+ log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", entry.getInstall(),
+ status.getDescription());
+ return status;
+ }
+ log.trace("Removed {}", entry.getInstall());
- // Update DB
- updateLocalDatabase(entry, false);
+ // Update DB
+ updateLocalDatabase(entry, false);
- return status;
+ return status;
+ }
}
/**
* contain the unique id assigned to this request
*/
private Status addEntriesInternal(FlowEntryInstall entry, boolean async) {
- // Install the flow on the network node
- Status status = (async) ? programmer.addFlowAsync(entry.getNode(), entry.getInstall().getFlow()) : programmer
- .addFlow(entry.getNode(), entry.getInstall().getFlow());
+ Future<Status> futureStatus = distributeWorkOrder(entry, null, UpdateType.ADDED);
+ if (futureStatus != null) {
+ Status retStatus = new Status(StatusCode.UNDEFINED);
+ try {
+ retStatus = futureStatus.get();
+ } catch (InterruptedException e) {
+ log.error("", e);
+ } catch (ExecutionException e) {
+ log.error("", e);
+ }
+ return retStatus;
+ } else {
+ // Install the flow on the network node
+ Status status = async ? programmer.addFlowAsync(entry.getNode(), entry.getInstall()
+ .getFlow()) : programmer.addFlow(entry.getNode(), entry.getInstall()
+ .getFlow());
- if (!status.isSuccess()) {
- log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", entry.getInstall(),
- status.getDescription());
- return status;
- }
+ if (!status.isSuccess()) {
+ log.warn("SDN Plugin failed to program the flow: {}. The failure is: {}", entry.getInstall(),
+ status.getDescription());
+ return status;
+ }
- log.trace("Added {}", entry.getInstall());
+ log.trace("Added {}", entry.getInstall());
- // Update DB
- entry.setRequestId(status.getRequestId());
- updateLocalDatabase(entry, true);
+ // Update DB
+ entry.setRequestId(status.getRequestId());
+ updateLocalDatabase(entry, true);
- return status;
+ return status;
+ }
}
/**
clusterContainerService.createCache("frm.TSPolicies",
EnumSet.of(IClusterServices.cacheMode.TRANSACTIONAL));
+ clusterContainerService.createCache(WORKSTATUSCACHE,
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+
+ clusterContainerService.createCache(WORKORDERCACHE,
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+
} catch (CacheConfigException cce) {
log.error("CacheConfigException");
} catch (CacheExistException cce) {
log.error("Retrieval of frm.TSPolicies cache failed for Container {}", container.getName());
}
+ map = clusterContainerService.getCache(WORKORDERCACHE);
+ if (map != null) {
+ workOrder = (ConcurrentMap<FlowEntryDistributionOrder, FlowEntryInstall>) map;
+ } else {
+ log.error("Retrieval of " + WORKORDERCACHE + " cache failed for Container {}", container.getName());
+ }
+
+ map = clusterContainerService.getCache(WORKSTATUSCACHE);
+ if (map != null) {
+ workStatus = (ConcurrentMap<FlowEntryDistributionOrder, Status>) map;
+ } else {
+ log.error("Retrieval of " + WORKSTATUSCACHE + " cache failed for Container {}", container.getName());
+ }
}
private boolean flowConfigExists(FlowConfig config) {
} else if (event instanceof ErrorReportedEvent) {
ErrorReportedEvent errEvent = (ErrorReportedEvent) event;
processErrorEvent(errEvent);
+ } else if (event instanceof WorkOrderEvent) {
+ /*
+ * Take care of handling the remote Work request
+ */
+ WorkOrderEvent work = (WorkOrderEvent) event;
+ FlowEntryDistributionOrder fe = work.getFe();
+ if (fe != null) {
+ logsync.trace("Executing the workOrder {}", fe);
+ Status gotStatus = null;
+ FlowEntryInstall feiCurrent = fe.getEntry();
+ FlowEntryInstall feiNew = workOrder.get(fe.getEntry());
+ switch (fe.getUpType()) {
+ case ADDED:
+ /*
+ * TODO: Not still sure how to handle the
+ * sync entries
+ */
+ gotStatus = addEntriesInternal(feiCurrent, true);
+ break;
+ case CHANGED:
+ gotStatus = modifyEntryInternal(feiCurrent, feiNew, true);
+ break;
+ case REMOVED:
+ gotStatus = removeEntryInternal(feiCurrent, true);
+ break;
+ }
+ // Remove the Order
+ workOrder.remove(fe);
+ logsync.trace(
+ "The workOrder has been executed and now the status is being returned {}", fe);
+ // Place the status
+ workStatus.put(fe, gotStatus);
+ } else {
+ log.warn("Not expected null WorkOrder", work);
+ }
+ } else if (event instanceof WorkStatusCleanup) {
+ /*
+ * Take care of handling the remote Work request
+ */
+ WorkStatusCleanup work = (WorkStatusCleanup) event;
+ FlowEntryDistributionOrder fe = work.getFe();
+ if (fe != null) {
+ logsync.trace("The workStatus {} is being removed", fe);
+ workStatus.remove(fe);
+ } else {
+ log.warn("Not expected null WorkStatus", work);
+ }
} else {
- log.warn("Dequeued unknown event {}", event.getClass().getSimpleName());
+ log.warn("Dequeued unknown event {}", event.getClass()
+ .getSimpleName());
}
} catch (InterruptedException e) {
- log.warn("FRM EventHandler thread interrupted", e);
+ // clear pending events
+ pendingEvents.clear();
}
}
}
*
*/
void destroy() {
+ // Interrupt the thread
+ frmEventHandler.interrupt();
+ // Clear the pendingEvents queue
+ pendingEvents.clear();
frmAware.clear();
+ workMonitor.clear();
}
/**
}
}
+ private class WorkOrderEvent extends FRMEvent {
+ private FlowEntryDistributionOrder fe;
+ private FlowEntryInstall newEntry;
+
+ /**
+ * @param fe
+ * @param newEntry
+ */
+ WorkOrderEvent(FlowEntryDistributionOrder fe, FlowEntryInstall newEntry) {
+ this.fe = fe;
+ this.newEntry = newEntry;
+ }
+
+ /**
+ * @return the fe
+ */
+ public FlowEntryDistributionOrder getFe() {
+ return fe;
+ }
+
+ /**
+ * @return the newEntry
+ */
+ public FlowEntryInstall getNewEntry() {
+ return newEntry;
+ }
+ }
+
+ private class WorkStatusCleanup extends FRMEvent {
+ private FlowEntryDistributionOrder fe;
+
+ /**
+ * @param fe
+ */
+ WorkStatusCleanup(FlowEntryDistributionOrder fe) {
+ this.fe = fe;
+ }
+
+ /**
+ * @return the fe
+ */
+ public FlowEntryDistributionOrder getFe() {
+ return fe;
+ }
+ }
+
/*
* OSGI COMMANDS
*/
return rv;
}
+
+ public void unsetIConnectionManager(IConnectionManager s) {
+ if (s == this.connectionManager) {
+ this.connectionManager = null;
+ }
+ }
+
+ public void setIConnectionManager(IConnectionManager s) {
+ this.connectionManager = s;
+ }
+
+ @Override
+ public void entryCreated(Object key, String cacheName, boolean originLocal) {
+ /*
+ * Do nothing
+ */
+ }
+
+ @Override
+ public void entryUpdated(Object key, Object new_value, String cacheName, boolean originLocal) {
+ if (originLocal) {
+ /*
+ * Local updates are of no interest
+ */
+ return;
+ }
+ if (cacheName.equals(WORKORDERCACHE)) {
+ logsync.trace("Got a WorkOrderCacheUpdate for {}", key);
+ /*
+ * This is the case of one workOrder becoming available, so we need
+ * to dispatch the work to the appropriate handler
+ */
+ FlowEntryDistributionOrder fe = (FlowEntryDistributionOrder) key;
+ FlowEntryInstall fei = fe.getEntry();
+ if (fei == null) {
+ return;
+ }
+ Node n = fei.getNode();
+ if (connectionManager.isLocal(n)) {
+ logsync.trace("workOrder for fe {} processed locally", fe);
+ // I'm the controller in charge for the request, queue it for
+ // processing
+ pendingEvents.offer(new WorkOrderEvent(fe, (FlowEntryInstall) new_value));
+ }
+ } else if (cacheName.equals(WORKSTATUSCACHE)) {
+ logsync.trace("Got a WorkStatusCacheUpdate for {}", key);
+ /*
+ * This is the case of one workOrder being completed and a status
+ * returned
+ */
+ FlowEntryDistributionOrder fe = (FlowEntryDistributionOrder) key;
+ /*
+ * Check if the order was initiated by this controller in that case
+ * we need to actually look at the status returned
+ */
+ if (fe.getRequestorController()
+ .equals(clusterContainerService.getMyAddress())) {
+ FlowEntryDistributionOrderFutureTask fet = workMonitor.get(fe);
+ if (fet != null) {
+ logsync.trace("workStatus response is for us {}", fe);
+ // Signal we got the status
+ fet.gotStatus(fe, workStatus.get(fe));
+ pendingEvents.offer(new WorkStatusCleanup(fe));
+ }
+ }
+ }
+ }
+
+ @Override
+ public void entryDeleted(Object key, String cacheName, boolean originLocal) {
+ /*
+ * Do nothing
+ */
+ }
}
<artifactId>hosttracker.implementation</artifactId>
<version>0.4.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>connectionmanager</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>connectionmanager.implementation</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>configuration</artifactId>
<version>0.4.0-SNAPSHOT</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal.connection</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal.connection.implementation</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>configuration.implementation</artifactId>
import org.opendaylight.controller.forwardingrulesmanager.IForwardingRulesManager;
import org.opendaylight.controller.sal.action.Action;
import org.opendaylight.controller.sal.action.Drop;
+import org.opendaylight.controller.sal.core.ConstructionException;
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.flowprogrammer.Flow;
import org.opendaylight.controller.sal.match.Match;
.versionAsInProject(),
mavenBundle("org.opendaylight.controller",
"hosttracker.implementation").versionAsInProject(),
+ mavenBundle("org.opendaylight.controller",
+ "connectionmanager.implementation").versionAsInProject(),
+ mavenBundle("org.opendaylight.controller",
+ "connectionmanager").versionAsInProject(),
+ mavenBundle("org.opendaylight.controller",
+ "sal.connection").versionAsInProject(),
+ mavenBundle("org.opendaylight.controller",
+ "sal.connection.implementation").versionAsInProject(),
// needed by hosttracker
mavenBundle("org.opendaylight.controller", "topologymanager")
List<Action> actions = new ArrayList<Action>();
actions.add(action);
flow.setActions(actions);
-
- Node node = NodeCreator.createOFNode(1L);
- FlowEntry fe = new FlowEntry("g1", "f1", flow, node);
-
- Status stat = manager.installFlowEntry(fe);
-
- // OF plugin is not there in integration testing mode
- Assert.assertTrue(stat.getCode() == StatusCode.NOSERVICE);
+ Node node;
+ try {
+ // Must use a node published by the stub protocol plugin else
+ // connection manager will not report it as a local node
+ node = new Node("STUB", 51966);
+ FlowEntry fe = new FlowEntry("g1", "f1", flow, node);
+ Status stat = manager.installFlowEntry(fe);
+
+ Assert.assertTrue(stat.getCode() == StatusCode.SUCCESS);
+ } catch (ConstructionException e) {
+ // Got a failure while allocating the node
+ Assert.assertTrue(false);
+ }
}
-
}
package org.opendaylight.controller.hosttracker.hostAware;
-import java.io.Serializable;
import java.net.Inet4Address;
import java.net.Inet6Address;
import java.net.InetAddress;
@XmlRootElement(name = "host")
@XmlAccessorType(XmlAccessType.NONE)
-public class HostNodeConnector extends Host implements Serializable {
+public class HostNodeConnector extends Host {
private static final long serialVersionUID = 1L;
@XmlElement
private NodeConnector nodeConnector;
@Override
public boolean equals(Object obj) {
- if (this == obj)
+ if (this == obj) {
return true;
- if (!super.equals(obj))
+ }
+ if (!super.equals(obj)) {
return false;
- if (getClass() != obj.getClass())
+ }
+ if (getClass() != obj.getClass()) {
return false;
+ }
HostNodeConnector other = (HostNodeConnector) obj;
if (nodeConnector == null) {
- if (other.nodeConnector != null)
+ if (other.nodeConnector != null) {
return false;
- } else if (!nodeConnector.equals(other.nodeConnector))
+ }
+ } else if (!nodeConnector.equals(other.nodeConnector)) {
return false;
- if (staticHost != other.staticHost)
+ }
+ if (staticHost != other.staticHost) {
return false;
- if (vlan != other.vlan)
+ }
+ if (vlan != other.vlan) {
return false;
+ }
return true;
}
EthernetAddress e = (EthernetAddress) getDataLayerAddress();
macaddr = e.getValue();
}
- if (macaddr == null)
+ if (macaddr == null) {
return false;
+ }
return !Arrays.equals(emptyArray, macaddr);
}
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#toString()
+ */
@Override
public String toString() {
- return "HostNodeConnector [nodeConnector=" + nodeConnector + ", vlan="
- + vlan + ", staticHost=" + staticHost + "]";
+ StringBuilder builder = new StringBuilder();
+ builder.append("HostNodeConnector [");
+ if (nodeConnector != null) {
+ builder.append("nodeConnector=")
+ .append(nodeConnector)
+ .append(", ");
+ }
+ builder.append("vlan=")
+ .append(vlan)
+ .append(", staticHost=")
+ .append(staticHost)
+ .append(", arpSendCountDown=")
+ .append(arpSendCountDown)
+ .append("]");
+ return builder.toString();
}
public boolean isV4Host() {
* are done by the ComponentActivatorAbstractBase.
*
*/
+ @Override
public void init() {
}
* ComponentActivatorAbstractBase
*
*/
+ @Override
public void destroy() {
}
* instantiated in order to get an fully working implementation
* Object
*/
+ @Override
public Object[] getImplementations() {
Object[] res = { HostTracker.class };
return res;
* per-container different behavior if needed, usually should not
* be the case though.
*/
+ @Override
public void configureInstance(Component c, Object imp, String containerName) {
if (imp.equals(HostTracker.class)) {
Dictionary<String, Object> props = new Hashtable<String, Object>();
.setRequired(false));
}
}
-
- /**
- * Method which tells how many Global implementations are supported by the
- * bundle. This way we can tune the number of components created. This
- * components will be created ONLY at the time of bundle startup and will be
- * destroyed only at time of bundle destruction, this is the major
- * difference with the implementation retrieved via getImplementations where
- * all of them are assumed to be in a container !
- *
- *
- * @return The list of implementations the bundle will support, in Global
- * version
- */
- protected Object[] getGlobalImplementations() {
- return null;
- }
-
- /**
- * Configure the dependency for a given instance Global
- *
- * @param c
- * Component assigned for this instance, this will be what will
- * be used for configuration
- * @param imp
- * implementation to be configured
- * @param containerName
- * container on which the configuration happens
- */
- protected void configureGlobalInstance(Component c, Object imp) {
- if (imp.equals(HostTracker.class)) {
- }
- }
}
static final String ACTIVE_HOST_CACHE = "hosttracker.ActiveHosts";
static final String INACTIVE_HOST_CACHE = "hosttracker.InactiveHosts";
private static final Logger logger = LoggerFactory.getLogger(HostTracker.class);
- private IHostFinder hostFinder;
- private ConcurrentMap<InetAddress, HostNodeConnector> hostsDB;
+ protected IHostFinder hostFinder;
+ protected ConcurrentMap<InetAddress, HostNodeConnector> hostsDB;
/*
* Following is a list of hosts which have been requested by NB APIs to be
* added, but either the switch or the port is not sup, so they will be
private final Set<IfNewHostNotify> newHostNotify = Collections.synchronizedSet(new HashSet<IfNewHostNotify>());
private ITopologyManager topologyManager;
- private IClusterContainerServices clusterContainerService = null;
- private ISwitchManager switchManager = null;
+ protected IClusterContainerServices clusterContainerService = null;
+ protected ISwitchManager switchManager = null;
private Timer timer;
private Timer arpRefreshTimer;
private String containerName = null;
private ExecutorService executor;
+ protected boolean stopping;
private static class ARPPending {
protected InetAddress hostIP;
protected short sent_count;
hostTrackerCallable = callable;
}
}
+
// This list contains the hosts for which ARP requests are being sent
// periodically
ConcurrentMap<InetAddress, ARPPending> ARPPendingList;
nonClusterObjectCreate();
allocateCache();
retrieveCache();
+ stopping = false;
timer = new Timer();
timer.schedule(new OutStandingARPHandler(), 4000, 4000);
logger.debug("startUp: Caches created, timers started");
}
- @SuppressWarnings("deprecation")
private void allocateCache() {
if (this.clusterContainerService == null) {
logger.error("un-initialized clusterContainerService, can't create cache");
logger.debug("Cache successfully created for HostTracker");
}
- @SuppressWarnings({ "unchecked", "deprecation" })
+ @SuppressWarnings({ "unchecked" })
private void retrieveCache() {
if (this.clusterContainerService == null) {
logger.error("un-initialized clusterContainerService, can't retrieve cache");
failedARPReqList = new ConcurrentHashMap<InetAddress, ARPPending>();
}
-
public void shutDown() {
}
/* Add this host to ARPPending List for any potential retries */
- AddtoARPPendingList(networkAddress);
+ addToARPPendingList(networkAddress);
logger.debug("hostFind(): Host Not Found for IP: {}, Inititated Host Discovery ...",
networkAddress.getHostAddress());
return list;
}
- private void AddtoARPPendingList(InetAddress networkAddr) {
+ private void addToARPPendingList(InetAddress networkAddr) {
ARPPending arphost = new ARPPending();
arphost.setHostIP(networkAddr);
public void setCallableOnPendingARP(InetAddress networkAddr, HostTrackerCallable callable) {
ARPPending arphost;
- for (Entry <InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
if (arphost.getHostIP().equals(networkAddr)) {
arphost.setHostTrackerCallable(callable);
// Remove the arphost from ARPPendingList as it has been learned now
logger.debug("Host Removed from ARPPending List, IP: {}", networkAddr);
HostTrackerCallable htCallable = arphost.getHostTrackerCallable();
- if (htCallable != null)
+ if (htCallable != null) {
htCallable.wakeup();
+ }
return;
}
* It could have been a host from the FailedARPReqList
*/
- if (failedARPReqList.containsKey(networkAddr)) {
+ if (failedARPReqList.containsKey(networkAddr)) {
failedARPReqList.remove(networkAddr);
logger.debug("Host Removed from FailedARPReqList List, IP: {}", networkAddr);
}
replaceHost(networkAddr, removedHost, host);
return;
} else {
- logger.error("Host to be removed not found in hostsDB. Host {}", removedHost);
+ logger.error("Host to be removed not found in hostsDB");
}
}
- if (removedHost == null) {
- // It is a new host
- learnNewHost(host);
- }
+ // It is a new host
+ learnNewHost(host);
/* check if there is an outstanding request for this host */
processPendingARPReqs(networkAddr);
if (hostExists(host)) {
HostNodeConnector existinghost = hostsDB.get(host.getNetworkAddress());
existinghost.initArpSendCountDown();
+ // Update the host
+ hostsDB.put(host.getNetworkAddress(), existinghost);
return;
}
new NotifyHostThread(host).start();
* @param currentTier
* The Tier on which n belongs
*/
+ @SuppressWarnings("unused")
private void updateSwitchTiers(Node n, int currentTier) {
Map<Node, Set<Edge>> ndlinks = topologyManager.getNodeEdges();
if (ndlinks == null) {
}
// This is the case where Tier was never set for this node
Tier t = (Tier) switchManager.getNodeProp(n, Tier.TierPropName);
- if (t == null)
+ if (t == null) {
return true;
- if (t.getValue() == 0)
+ }
+ if (t.getValue() == 0) {
return true;
- else if (t.getValue() > tier)
+ } else if (t.getValue() > tier) {
return true;
+ }
return false;
}
* cleanup is performed during cases such as Topology Change where the
* existing Tier values might become incorrect
*/
+ @SuppressWarnings("unused")
private void clearTiers() {
Set<Node> nodes = null;
if (switchManager == null) {
@Override
public List<List<String>> getHostNetworkHierarchy(InetAddress hostAddress) {
HostNodeConnector host = hostQuery(hostAddress);
- if (host == null)
+ if (host == null) {
return null;
+ }
List<List<String>> hierarchies = new ArrayList<List<String>>();
ArrayList<String> currHierarchy = new ArrayList<String>();
int result = 0;
for (int i = 0; i < hex.length(); i++) {
result = (int) ((dpid >> (i * 8)) & 0xff);
- if (result == 0)
+ if (result == 0) {
continue;
- if (result < 0x30)
+ }
+ if (result < 0x30) {
result += 0x40;
+ }
sb.append(String.format("%c", result));
}
return sb.reverse().toString();
public void subnetNotify(Subnet sub, boolean add) {
logger.debug("Received subnet notification: {} add={}", sub, add);
if (add) {
- for (Entry <InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
ARPPending arphost;
arphost = entry.getValue();
if (hostFinder == null) {
logger.warn("ARPHandler Services are not available on subnet addition");
continue;
}
- logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
- hostFinder.find(arphost.getHostIP());
+ logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
+ hostFinder.find(arphost.getHostIP());
}
}
}
class OutStandingARPHandler extends TimerTask {
@Override
public void run() {
+ if (stopping) {
+ return;
+ }
ARPPending arphost;
-
/* This routine runs every 4 seconds */
logger.trace("Number of Entries in ARP Pending/Failed Lists: ARPPendingList = {}, failedARPReqList = {}",
ARPPendingList.size(), failedARPReqList.size());
- for (Entry <InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
if (hostsDB.containsKey(arphost.getHostIP())) {
- // this host is already learned, shouldn't be in ARPPendingList
+ // this host is already learned, shouldn't be in
+ // ARPPendingList
// Remove it and continue
logger.warn("Learned Host {} found in ARPPendingList", arphost.getHostIP());
ARPPendingList.remove(entry.getKey());
if (arphost.getSent_count() < switchManager.getHostRetryCount()) {
/*
* No reply has been received of first ARP Req, send the
- * next one. Before sending the ARP, check if ARPHandler
- * is available or not
+ * next one. Before sending the ARP, check if ARPHandler is
+ * available or not
*/
if (hostFinder == null) {
logger.warn("ARPHandler Services are not available for Outstanding ARPs");
logger.debug("ARP Sent from ARPPending List, IP: {}", arphost.getHostIP().getHostAddress());
} else if (arphost.getSent_count() >= switchManager.getHostRetryCount()) {
/*
- * ARP requests have been sent without receiving a
- * reply, remove this from the pending list
+ * ARP requests have been sent without receiving a reply,
+ * remove this from the pending list
*/
ARPPendingList.remove(entry.getKey());
logger.debug("ARP reply not received after multiple attempts, removing from Pending List IP: {}",
private class ARPRefreshHandler extends TimerTask {
@Override
- @SuppressWarnings("deprecation")
public void run() {
+ if (stopping) {
+ return;
+ }
if ((clusterContainerService != null) && !clusterContainerService.amICoordinator()) {
return;
}
@Override
public void notifyNode(Node node, UpdateType type, Map<String, Property> propMap) {
- if (node == null)
+ if (node == null) {
return;
+ }
switch (type) {
case REMOVED:
@Override
public void notifyNodeConnector(NodeConnector nodeConnector, UpdateType type, Map<String, Property> propMap) {
- if (nodeConnector == null)
+ if (nodeConnector == null) {
return;
+ }
boolean up = false;
switch (type) {
logger.debug("handleNodeConnectorStatusUp {}", nodeConnector);
- for (Entry <InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
logger.debug("Sending the ARP from FailedARPReqList fors IP: {}", arphost.getHostIP().getHostAddress());
if (hostFinder == null) {
}
public String getContainerName() {
- if (containerName == null)
+ if (containerName == null) {
return GlobalConstants.DEFAULT.toString();
+ }
return containerName;
}
* calls
*
*/
- void stop(){
+ void stop() {
}
void stopping() {
+ stopping = true;
arpRefreshTimer.cancel();
timer.cancel();
- executor.shutdown();
+ executor.shutdownNow();
}
@Override
public void edgeOverUtilized(Edge edge) {
- // TODO Auto-generated method stub
}
@Override
public void edgeUtilBackToNormal(Edge edge) {
- // TODO Auto-generated method stub
}
@Override
- public void entryCreated(InetAddress key, String cacheName,
- boolean originLocal) {
- if (originLocal) return;
+ public void entryCreated(InetAddress key, String cacheName, boolean originLocal) {
+ if (originLocal) {
+ return;
+ }
processPendingARPReqs(key);
}
@Override
- public void entryUpdated(InetAddress key, HostNodeConnector new_value,
- String cacheName, boolean originLocal) {
+ public void entryUpdated(InetAddress key, HostNodeConnector new_value, String cacheName, boolean originLocal) {
}
@Override
- public void entryDeleted(InetAddress key, String cacheName,
- boolean originLocal) {
+ public void entryDeleted(InetAddress key, String cacheName, boolean originLocal) {
}
private void registerWithOSGIConsole() {
@Override
public String getHelp() {
- // TODO Auto-generated method stub
return null;
}
public void _dumpPendingARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry <InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : ARPPendingList.entrySet()) {
arphost = entry.getValue();
ci.println(arphost.getHostIP().toString());
}
public void _dumpFailedARPReqList(CommandInterpreter ci) {
ARPPending arphost;
- for (Entry <InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
+ for (Entry<InetAddress, ARPPending> entry : failedARPReqList.entrySet()) {
arphost = entry.getValue();
ci.println(arphost.getHostIP().toString());
}
\r
import java.net.InetAddress;\r
import java.net.UnknownHostException;\r
-import java.util.concurrent.Future;\r
-\r
import junit.framework.TestCase;\r
\r
import org.junit.Assert;\r
import org.junit.Test;\r
-import org.opendaylight.controller.hosttracker.hostAware.HostNodeConnector;\r
\r
public class HostTrackerTest extends TestCase {\r
\r
\r
long count = htCallable.latch.getCount();\r
htCallable.wakeup();\r
- Assert.assertTrue(htCallable.latch.getCount() == --count);\r
+ Assert.assertTrue(htCallable.latch.getCount() == (count - 1));\r
}\r
\r
@Test\r
\r
InetAddress hostIP_1 = InetAddress.getByName("192.168.0.8");\r
InetAddress hostIP_2 = InetAddress.getByName("192.168.0.18");\r
- Future<HostNodeConnector> dschost = hostTracker.discoverHost(hostIP_1);\r
- dschost = hostTracker.discoverHost(hostIP_2);\r
+ hostTracker.discoverHost(hostIP_1);\r
+ hostTracker.discoverHost(hostIP_2);\r
hostTracker.nonClusterObjectCreate();\r
}\r
\r
package org.opendaylight.controller.sal.utils;
+import java.io.Serializable;
+
/**
* Represents the return object of the osgi service interfaces function calls.
* It contains a code {@code StatusCode} representing the result of the call and
* a string which describes a failure reason (if any) in human readable form.
*/
-public class Status {
+public class Status implements Serializable {
private StatusCode code;
private String description;
private long requestId;
' privilege=' + data.privilege + '>' + nodeNameEntry + '</a>';
var ports = item.ports;
+ var portsMatch = ports.match(/<\/span>/g);
+ var portsLength = 0;
+ if (portsMatch != null) {
+ portsLength = portsMatch.length;
+ }
item.ports = '<span class="nodePorts" style="cursor:pointer;color: #08c" ports='+encodeURIComponent(JSON.stringify(item.ports)) + ' nodeId=' + item.nodeId
+ ' nodeName=' + nodeName
- + '>' + ports.match(/<\/span>/g).length+'</span>';
+ + '>' + portsLength +'</span>';
});
},
delay: 0
formatter: function(items) {
$.each(items, function (index, item) {
var ports = item.ports;
+ var portsMatch = ports.match(/<\/span>/g);
+ var portsLength = 0;
+ if (portsMatch != null) {
+ portsLength = portsMatch.length;
+ }
item.ports = '<span class="nodePorts" style="cursor: pointer;color: #08c" ports='+encodeURIComponent(JSON.stringify(item.ports)) + ' nodeId=' + item.nodeId
+ ' nodeName=' + item.nodeName
- + '>' + ports.match(/<\/span>/g).length+'</span>';
+ + '>' + portsLength +'</span>';
});
},
delay: 0
$dashlet.append($button);
// Delete gateway ip address button
- var button = one.lib.dashlet.button.single("Delete Gateway IP Address(es)",
- one.f.switchmanager.subnetGatewayConfig.id.dashlet.removeIPAddress, "btn-primary", "btn-mini");
+ var button = one.lib.dashlet.button.single("Delete Gateway IP Address",
+ one.f.switchmanager.subnetGatewayConfig.id.dashlet.removeIPAddress, "btn-danger", "btn-mini");
var $button = one.lib.dashlet.button.button(button);
$button.click(function() {
var requestData = {};
checkedCheckBoxes.each(function(index, value) {
gatewaysToDelete.push(checkedCheckBoxes[index].id);
});
+ if (checkedCheckBoxes.size() === 0) {
+ alert('Please select at least one gateway');
+ return false;
+ }
if (gatewaysToDelete.length > 0) {
requestData["gatewaysToDelete"] = gatewaysToDelete.toString();
var url = one.f.switchmanager.rootUrl + "/subnetGateway/delete";
$("#" + one.f.switchmanager.subnetGatewayConfig.id.dashlet.datagrid).find(':checkbox').prop('checked',
$("#"+one.f.switchmanager.subnetGatewayConfig.id.dashlet.selectAll).is(':checked'));
});
- $(".subnetGatewayConfig").click(function(){
+ $(".subnetGatewayConfig").click(function(e){
if (!$('.subnetGatewayConfig[type=checkbox]:not(:checked)').length) {
$("#"+one.f.switchmanager.subnetGatewayConfig.id.dashlet.selectAll)
.prop("checked",
.prop("checked",
false);
}
- event.stopPropagation();
+ e.stopPropagation();
});
});
});
$dashlet.append($button);
// Delete static route button
- var button = one.lib.dashlet.button.single("Delete Static Route(s)", one.f.switchmanager.staticRouteConfig.id.dashlet.remove, "btn-primary", "btn-mini");
+ var button = one.lib.dashlet.button.single("Delete Static Route(s)", one.f.switchmanager.staticRouteConfig.id.dashlet.remove, "btn-danger", "btn-mini");
var $button = one.lib.dashlet.button.button(button);
$button.click(function() {
var requestData = {};
checkedCheckBoxes.each(function(index, value) {
routesToDelete.push(checkedCheckBoxes[index].id);
});
+ if (checkedCheckBoxes.size() === 0) {
+ alert('Please select at least one static route');
+ return false;
+ }
if (routesToDelete.length > 0) {
requestData["routesToDelete"] = routesToDelete.toString();
var url = one.f.switchmanager.rootUrl + "/staticRoute/delete";
$("#" + one.f.switchmanager.staticRouteConfig.id.dashlet.datagrid).find(':checkbox').prop('checked',
$("#"+one.f.switchmanager.staticRouteConfig.id.dashlet.selectAll).is(':checked'));
});
- $(".staticRoute").click(function(){
+ $(".staticRoute").click(function(e){
if (!$('.staticRoute[type=checkbox]:not(:checked)').length) {
$("#"+one.f.switchmanager.staticRouteConfig.id.dashlet.selectAll)
.prop("checked",
.prop("checked",
false);
}
- event.stopPropagation();
+ e.stopPropagation();
});
});
});
$dashlet.append($button);
// Delete span port button
- var button = one.lib.dashlet.button.single("Delete SPAN Port(s)", one.f.switchmanager.spanPortConfig.id.dashlet.remove, "btn-primary", "btn-mini");
+ var button = one.lib.dashlet.button.single("Delete SPAN Port(s)", one.f.switchmanager.spanPortConfig.id.dashlet.remove, "btn-danger", "btn-mini");
var $button = one.lib.dashlet.button.button(button);
$button.click(function() {
checkedCheckBoxes.each(function(index, value) {
spanPortsToDelete += decodeURIComponent(checkedCheckBoxes[index].getAttribute("spanPort")) + "###";
});
+ if (checkedCheckBoxes.size() === 0) {
+ alert('Please select at least one SPAN port');
+ return false;
+ }
var requestData = {};
requestData["spanPortsToDelete"] = spanPortsToDelete;
$("#" + one.f.switchmanager.spanPortConfig.id.dashlet.datagrid).find(':checkbox').prop('checked',
$("#"+one.f.switchmanager.spanPortConfig.id.dashlet.selectAll).is(':checked'));
});
- $(".spanPortConfig").click(function(){
+ $(".spanPortConfig").click(function(e){
if (!$('.spanPortConfig[type=checkbox]:not(:checked)').length) {
$("#"+one.f.switchmanager.spanPortConfig.id.dashlet.selectAll)
.prop("checked",
.prop("checked",
false);
}
- event.stopPropagation();
+ e.stopPropagation();
});
});
});
$modal.modal();
});
$dashlet.append($button);
- var button = one.lib.dashlet.button.single("Remove Flow Entry", one.f.flows.id.dashlet.removeMultiple, "btn-primary", "btn-mini");
+ var button = one.lib.dashlet.button.single("Remove Flow Entry", one.f.flows.id.dashlet.removeMultiple, "btn-danger", "btn-mini");
var $button = one.lib.dashlet.button.button(button);
$button.click(function() {
var checkedCheckBoxes = $('.flowEntry[type=checkbox]:checked');
+ if (checkedCheckBoxes.size() === 0) {
+ alert('Please select at least one flow');
+ return false;
+ }
var requestData = [];
var node = $td.find("span").data("nodeid");
one.f.flows.detail(id, node);
});
- $(".flowEntry").click(function(){
+ $(".flowEntry").click(function(e){
if (!$('.flowEntry[type=checkbox]:not(:checked)').length) {
$("#"+one.f.flows.id.dashlet.datagrid.selectAllFlows)
.prop("checked",
.prop("checked",
false);
}
- event.stopPropagation();
+ e.stopPropagation();
});
});
});
<!-- Login CSS - 2 -->
<link rel="stylesheet/less" type="text/css" href="/css/login.less">
-<c:set var="title" value="OpenDaylight-login" scope="application"/>
+<c:set var="title" value="OpenDaylight - Login" scope="application"/>
<%
String filePath = "/WEB-INF/jsp/customlogin.jsp";
URL fileURL = pageContext.getServletContext().getResource(filePath);