<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>yang-maven-plugin</artifactId>
</plugin>
+ <plugin>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-checkstyle-plugin</artifactId>
+ <configuration>
+ <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
+ </configuration>
+ </plugin>
</plugins>
</build>
import org.opendaylight.yangtools.yang.common.RpcResult;
/**
- * forwardingrules-manager
- * org.opendaylight.openflowplugin.applications.frm
+ * forwardingrules-manager org.opendaylight.openflowplugin.applications.frm
*
- * ForwardingRulesCommiter
- * It represent a contract between DataStore DataTreeModification and relevant
- * SalRpcService for device. Every implementation has to be registered for
- * Configurational/DS tree path.
+ * <p>
+ * ForwardingRulesCommiter It represent a contract between DataStore
+ * DataTreeModification and relevant SalRpcService for device. Every
+ * implementation has to be registered for Configurational/DS tree path.
*/
-public interface ForwardingRulesCommiter<D extends DataObject> extends AutoCloseable,
- ClusteredDataTreeChangeListener<D> {
+public interface ForwardingRulesCommiter<D extends DataObject>
+ extends AutoCloseable, ClusteredDataTreeChangeListener<D> {
/**
- * Method removes DataObject which is identified by InstanceIdentifier
- * from device.
+ * Method removes DataObject which is identified by InstanceIdentifier from
+ * device.
*
- * @param identifier - the whole path to DataObject
- * @param del - DataObject for removing
- * @param nodeIdent Node InstanceIdentifier
+ * @param identifier
+ * - the whole path to DataObject
+ * @param del
+ * - DataObject for removing
+ * @param nodeIdent
+ * Node InstanceIdentifier
*/
- void remove(InstanceIdentifier<D> identifier, D del,
- InstanceIdentifier<FlowCapableNode> nodeIdent);
+ void remove(InstanceIdentifier<D> identifier, D del, InstanceIdentifier<FlowCapableNode> nodeIdent);
/**
- * Method updates the original DataObject to the update DataObject
- * in device. Both are identified by same InstanceIdentifier
+ * Method updates the original DataObject to the update DataObject in device.
+ * Both are identified by same InstanceIdentifier
*
- * @param identifier - the whole path to DataObject
- * @param original - original DataObject (for update)
- * @param update - changed DataObject (contain updates)
- * @param nodeIdent Node InstanceIdentifier
+ * @param identifier
+ * - the whole path to DataObject
+ * @param original
+ * - original DataObject (for update)
+ * @param update
+ * - changed DataObject (contain updates)
+ * @param nodeIdent
+ * Node InstanceIdentifier
*/
- void update(InstanceIdentifier<D> identifier, D original, D update,
- InstanceIdentifier<FlowCapableNode> nodeIdent);
+ void update(InstanceIdentifier<D> identifier, D original, D update, InstanceIdentifier<FlowCapableNode> nodeIdent);
/**
- * Method adds the DataObject which is identified by InstanceIdentifier
- * to device.
+ * Method adds the DataObject which is identified by InstanceIdentifier to
+ * device.
*
- * @param identifier - the whole path to new DataObject
- * @param add - new DataObject
- * @param nodeIdent Node InstanceIdentifier
- * @return A future associated with RPC task. {@code null} is set to the
- * future if this method does not invoke RPC.
+ * @param identifier
+ * - the whole path to new DataObject
+ * @param add
+ * - new DataObject
+ * @param nodeIdent
+ * Node InstanceIdentifier
+ * @return A future associated with RPC task. {@code null} is set to the future
+ * if this method does not invoke RPC.
*/
Future<? extends RpcResult<?>> add(InstanceIdentifier<D> identifier, D add,
InstanceIdentifier<FlowCapableNode> nodeIdent);
-
/**
- * Method creates stale-marked DataObject which is identified by InstanceIdentifier
- * from device.
+ * Method creates stale-marked DataObject which is identified by
+ * InstanceIdentifier from device.
*
- * @param identifier - the whole path to DataObject
- * @param del - DataObject removed. Stale-Mark object to be created from this object
- * @param nodeIdent Node InstanceIdentifier
+ * @param identifier
+ * - the whole path to DataObject
+ * @param del
+ * - DataObject removed. Stale-Mark object to be created from this
+ * object
+ * @param nodeIdent
+ * Node InstanceIdentifier
*/
- void createStaleMarkEntity(InstanceIdentifier<D> identifier, D del,
- InstanceIdentifier<FlowCapableNode> nodeIdent);
-
-
+ void createStaleMarkEntity(InstanceIdentifier<D> identifier, D del, InstanceIdentifier<FlowCapableNode> nodeIdent);
Future<? extends RpcResult<?>> removeWithResult(InstanceIdentifier<D> identifier, D del,
- InstanceIdentifier<FlowCapableNode> nodeIdent);
-
-
+ InstanceIdentifier<FlowCapableNode> nodeIdent);
}
-
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.onf.bundle.service.rev170124.SalBundleService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.onf.bundle.service.rev170124.SalBundleService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * It represent a central point for whole module. Implementation
- * Flow Provider registers the link FlowChangeListener} and it holds all needed
- * services for link FlowChangeListener}.
+ * It represent a central point for whole module. Implementation Flow Provider
+ * registers the link FlowChangeListener} and it holds all needed services for
+ * link FlowChangeListener}.
*
* @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
*/
void start();
/**
- * Method returns information :
- * "is Node with send InstanceIdentifier connected"?
+ * Method returns information : "is Node with send InstanceIdentifier
+ * connected"?.
*
- * @param ident - the key of the node
+ * @param ident
+ * - the key of the node
* @return boolean - true if device is connected
*/
boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident);
/**
- * Method returns information :
- * "is Node with send InstanceIdentifier present in operational data store"?
+ * Method returns information : "is Node with send InstanceIdentifier present in
+ * operational data store"?.
*
- * @param ident - the key of the node
+ * @param ident
+ * - the key of the node
* @return boolean - true if device is present in operational data store
*/
boolean checkNodeInOperationalDataStore(InstanceIdentifier<FlowCapableNode> ident);
/**
- * Method returns generated transaction ID, which is unique for
- * every transaction. ID is composite from prefix ("DOM") and unique number.
+ * Method returns generated transaction ID, which is unique for every
+ * transaction. ID is composite from prefix ("DOM") and unique number.
*
* @return String transactionID for RPC transaction identification
*/
String getNewTransactionId();
/**
- * Method returns Read Transacion. It is need for Node reconciliation only.
+ * Method returns Read Transaction. It is need for Node reconciliation only.
*
* @return ReadOnlyTransaction
*/
ReadOnlyTransaction getReadTranaction();
/**
- * Flow RPC service
+ * Flow RPC service.
*
- * @return
*/
SalFlowService getSalFlowService();
/**
- * Group RPC service
+ * Group RPC service.
*
- * @return
*/
SalGroupService getSalGroupService();
/**
- * Meter RPC service
+ * Meter RPC service.
*
- * @return
*/
SalMeterService getSalMeterService();
/**
- * Table RPC service
+ * Table RPC service.
*
- * @return
*/
SalTableService getSalTableService();
/**
- * Bundle RPC service
+ * Bundle RPC service.
*
* @return salBundleService
*/
SalBundleService getSalBundleService();
/**
- * Content definition method and prevent code duplicity in Reconcil
- * @return ForwardingRulesCommiter<Flow>
+ * Content definition method and prevent code duplicity in Reconcil.
+ *
+ * @return ForwardingRulesCommiter<Flow>.
*/
ForwardingRulesCommiter<Flow> getFlowCommiter();
/**
- * Content definition method and prevent code duplicity in Reconcil
+ * Content definition method and prevent code duplicity in Reconcil.
+ *
* @return ForwardingRulesCommiter<Group>
*/
ForwardingRulesCommiter<Group> getGroupCommiter();
/**
- * Content definition method and prevent code duplicity
+ * Content definition method and prevent code duplicity.
+ *
* @return ForwardingRulesCommiter<Meter>
*/
ForwardingRulesCommiter<Meter> getMeterCommiter();
/**
- * Content definition method and prevent code duplicity
+ * Content definition method and prevent code duplicity.
+ *
* @return ForwardingRulesCommiter<Table>
*/
ForwardingRulesCommiter<TableFeatures> getTableFeaturesCommiter();
/**
* Check if reconciliation is disabled by user.
+ *
* @return true if reconciliation is disabled, else false
*/
boolean isReconciliationDisabled();
/**
* Check if stale marking is enabled for switch reconciliation.
+ *
* @return true if stale marking is enabled, else false
*/
boolean isStaleMarkingEnabled();
/**
* Return number of reconciliation retry are allowed.
+ *
* @return number of retries.
*/
int getReconciliationRetryCount();
/**
- * Method checks if *this* instance of openflowplugin is owner of
- * the given openflow node.
+ * Method checks if *this* instance of openflowplugin is owner of the given
+ * openflow node.
+ *
* @return True if owner, else false
*/
boolean isNodeOwner(InstanceIdentifier<FlowCapableNode> ident);
/**
- * Content definition method and prevent code duplicity
+ * Content definition method and prevent code duplicity.
+ *
* @return FlowNodeConnectorInventoryTranslatorImpl
*/
FlowNodeConnectorInventoryTranslatorImpl getFlowNodeConnectorInventoryTranslatorImpl();
/**
- * holds the value read from the configuration file openflowplugin.cfg file
- * @return True if user enables bundle-based-reconciliation-enabled field in config file or False
+ * holds the value read from the configuration file openflowplugin.cfg file.
+ *
+ * @return True if user enables bundle-based-reconciliation-enabled field in
+ * config file or False
*/
boolean isBundleBasedReconciliationEnabled();
}
-
private static final Map<String, ForwardingRulesProperty> KEY_VALUE_MAP;
/**
- * Get property type from property key
+ * Get property type from property key.
*
* @param key the property key
* @return the property type
}
/**
- * Converts enum name to property key
+ * Converts enum name to property key.
*
* @return the property key
*/
* AbstractChangeListner implemented basic {@link org.opendaylight.controller.md.sal.binding.api.DataTreeModification}
* processing for flow node subDataObject (flows, groups and meters).
*/
-public abstract class AbstractListeningCommiter <T extends DataObject> implements ForwardingRulesCommiter<T> {
+public abstract class AbstractListeningCommiter<T extends DataObject> implements ForwardingRulesCommiter<T> {
private static final Logger LOG = LoggerFactory.getLogger(AbstractListeningCommiter.class);
ForwardingRulesManager provider;
private final Class<T> clazz;
- public AbstractListeningCommiter (ForwardingRulesManager provider, Class<T> clazz) {
+ public AbstractListeningCommiter(ForwardingRulesManager provider, Class<T> clazz) {
this.provider = Preconditions.checkNotNull(provider, "ForwardingRulesManager can not be null!");
this.clazz = Preconditions.checkNotNull(clazz, "Class can not be null!");
}
key.firstIdentifierOf(FlowCapableNode.class);
if (preConfigurationCheck(nodeIdent)) {
switch (mod.getModificationType()) {
- case DELETE:
- remove(key, mod.getDataBefore(), nodeIdent);
- break;
- case SUBTREE_MODIFIED:
- update(key, mod.getDataBefore(), mod.getDataAfter(), nodeIdent);
- break;
- case WRITE:
- if (mod.getDataBefore() == null) {
- add(key, mod.getDataAfter(), nodeIdent);
- } else {
+ case DELETE:
+ remove(key, mod.getDataBefore(), nodeIdent);
+ break;
+ case SUBTREE_MODIFIED:
update(key, mod.getDataBefore(), mod.getDataAfter(), nodeIdent);
- }
- break;
- default:
- throw new IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
+ break;
+ case WRITE:
+ if (mod.getDataBefore() == null) {
+ add(key, mod.getDataAfter(), nodeIdent);
+ } else {
+ update(key, mod.getDataBefore(), mod.getDataAfter(), nodeIdent);
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
}
- }
- else{
+ } else {
if (provider.isStaleMarkingEnabled()) {
LOG.info("Stale-Marking ENABLED and switch {} is NOT connected, storing stale entities",
nodeIdent.toString());
case WRITE:
break;
default:
- throw new IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
+ throw new
+ IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
}
}
}
/**
* Method return wildCardPath for Listener registration
- * and for identify the correct KeyInstanceIdentifier from data;
+ * and for identify the correct KeyInstanceIdentifier from data.
*/
protected abstract InstanceIdentifier<T> getWildCardPath();
// node from operational data store and if it's present it calls flowNodeConnected to explicitly
// trigger the event of new node connected.
- if(!provider.isNodeOwner(nodeIdent)) { return false; }
+ if (!provider.isNodeOwner(nodeIdent)) {
+ return false;
+ }
if (!provider.isNodeActive(nodeIdent)) {
if (provider.checkNodeInOperationalDataStore(nodeIdent)) {
import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-public abstract class AbstractNodeConnectorCommitter <T extends DataObject> implements FlowCapableNodeConnectorCommitter<T> {
- private ForwardingRulesManager provider;
+public abstract class AbstractNodeConnectorCommitter<T extends DataObject>
+ implements FlowCapableNodeConnectorCommitter<T> {
+ private final ForwardingRulesManager provider;
private final Class<T> clazz;
- public AbstractNodeConnectorCommitter (ForwardingRulesManager provider, Class<T> clazz) {
+ public AbstractNodeConnectorCommitter(ForwardingRulesManager provider, Class<T> clazz) {
this.provider = Preconditions.checkNotNull(provider, "ForwardingRulesManager can not be null!");
this.clazz = Preconditions.checkNotNull(clazz, "Class can not be null!");
}
/**
* Method return wildCardPath for Listener registration
- * and for identify the correct KeyInstanceIdentifier from data;
+ * and for identify the correct KeyInstanceIdentifier from data.
*/
protected abstract InstanceIdentifier<T> getWildCardPath();
public DeviceMastership(final NodeId nodeId) {
this.nodeId = nodeId;
this.identifier = ServiceGroupIdentifier.create(nodeId.getValue());
- fcnIID = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId)).augmentation
- (FlowCapableNode.class);
+ fcnIID = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId))
+ .augmentation(FlowCapableNode.class);
}
@Override
public void setDeviceOperationalStatus(boolean inOperDS) {
isDeviceInOperDS.set(inOperDS);
}
+
public void reconcile() {
deviceMastered.set(true);
}
/**
* Manager for clustering service registrations of {@link DeviceMastership}.
*/
-public class DeviceMastershipManager implements ClusteredDataTreeChangeListener<FlowCapableNode>,
- OpendaylightInventoryListener, AutoCloseable{
+public class DeviceMastershipManager
+ implements ClusteredDataTreeChangeListener<FlowCapableNode>, OpendaylightInventoryListener, AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(DeviceMastershipManager.class);
- private static final InstanceIdentifier<FlowCapableNode> II_TO_FLOW_CAPABLE_NODE
- = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class)
- .augmentation(FlowCapableNode.class)
- .build();
+ private static final InstanceIdentifier<FlowCapableNode> II_TO_FLOW_CAPABLE_NODE = InstanceIdentifier
+ .builder(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class).build();
private final ClusterSingletonServiceProvider clusterSingletonService;
private final ListenerRegistration<?> notifListenerRegistration;
private Set<InstanceIdentifier<FlowCapableNode>> activeNodes = Collections.emptySet();
public DeviceMastershipManager(final ClusterSingletonServiceProvider clusterSingletonService,
- final NotificationProviderService notificationService,
- final FlowNodeReconciliation reconcliationAgent,
- final DataBroker dataBroker) {
+ final NotificationProviderService notificationService, final FlowNodeReconciliation reconcliationAgent,
+ final DataBroker dataBroker) {
this.clusterSingletonService = clusterSingletonService;
this.notifListenerRegistration = notificationService.registerNotificationListener(this);
this.reconcliationAgent = reconcliationAgent;
}
/**
- * Temporary solution before Mastership manager from plugin.
- * Remove notification after update.
- * Update node notification should be send only when mastership in plugin was granted.
- * @param notification received notification
+ * Temporary solution before Mastership manager from plugin. Remove notification
+ * after update. Update node notification should be send only when mastership in
+ * plugin was granted.
+ *
+ * @param notification
+ * received notification
*/
@Override
public void onNodeUpdated(NodeUpdated notification) {
LOG.debug("NodeUpdate notification received : {}", notification);
- DeviceMastership membership = deviceMasterships.computeIfAbsent(notification.getId(), device ->
- new DeviceMastership(notification.getId()));
+ DeviceMastership membership = deviceMasterships.computeIfAbsent(notification.getId(),
+ device -> new DeviceMastership(notification.getId()));
membership.reconcile();
}
@Override
public void onNodeConnectorUpdated(NodeConnectorUpdated notification) {
- //Not published by plugin
+ // Not published by plugin
}
@Override
@Override
public void onNodeConnectorRemoved(NodeConnectorRemoved notification) {
- //Not published by plugin
+ // Not published by plugin
}
@Override
for (DataTreeModification<FlowCapableNode> change : changes) {
final InstanceIdentifier<FlowCapableNode> key = change.getRootPath().getRootIdentifier();
final DataObjectModification<FlowCapableNode> mod = change.getRootNode();
- final InstanceIdentifier<FlowCapableNode> nodeIdent =
- key.firstIdentifierOf(FlowCapableNode.class);
+ final InstanceIdentifier<FlowCapableNode> nodeIdent = key.firstIdentifierOf(FlowCapableNode.class);
switch (mod.getModificationType()) {
case DELETE:
}
break;
case SUBTREE_MODIFIED:
- //NO-OP since we do not need to reconcile on Node-updated
+ // NO-OP since we do not need to reconcile on Node-updated
break;
case WRITE:
if (mod.getDataBefore() == null) {
}
public void remove(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode del,
- InstanceIdentifier<FlowCapableNode> nodeIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ if (compareInstanceIdentifierTail(identifier, II_TO_FLOW_CAPABLE_NODE)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Node removed: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
+ LOG.debug("Node removed: {}", nodeIdent.firstKeyOf(Node.class).getId().getValue());
}
- if ( ! nodeIdent.isWildcarded()) {
+ if (!nodeIdent.isWildcarded()) {
if (activeNodes.contains(nodeIdent)) {
synchronized (lockObj) {
if (activeNodes.contains(nodeIdent)) {
- Set<InstanceIdentifier<FlowCapableNode>> set =
- Sets.newHashSet(activeNodes);
+ Set<InstanceIdentifier<FlowCapableNode>> set = Sets.newHashSet(activeNodes);
set.remove(nodeIdent);
activeNodes = Collections.unmodifiableSet(set);
- setNodeOperationalStatus(nodeIdent,false);
+ setNodeOperationalStatus(nodeIdent, false);
}
}
}
}
public void add(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode add,
- InstanceIdentifier<FlowCapableNode> nodeIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ if (compareInstanceIdentifierTail(identifier, II_TO_FLOW_CAPABLE_NODE)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Node added: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
+ LOG.debug("Node added: {}", nodeIdent.firstKeyOf(Node.class).getId().getValue());
}
- if ( ! nodeIdent.isWildcarded()) {
+ if (!nodeIdent.isWildcarded()) {
if (!activeNodes.contains(nodeIdent)) {
synchronized (lockObj) {
if (!activeNodes.contains(nodeIdent)) {
Set<InstanceIdentifier<FlowCapableNode>> set = Sets.newHashSet(activeNodes);
set.add(nodeIdent);
activeNodes = Collections.unmodifiableSet(set);
- setNodeOperationalStatus(nodeIdent,true);
+ setNodeOperationalStatus(nodeIdent, true);
}
}
}
@Override
public void close() {
if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (Exception e) {
- LOG.warn("Error occurred while closing operational Node listener: {}", e.getMessage());
- LOG.debug("Error occurred while closing operational Node listener", e);
- }
+ listenerRegistration.close();
listenerRegistration = null;
}
if (notifListenerRegistration != null) {
}
}
-
private boolean compareInstanceIdentifierTail(InstanceIdentifier<?> identifier1,
- InstanceIdentifier<?> identifier2) {
- return Iterables.getLast(identifier1.getPathArguments()).equals(Iterables.getLast(identifier2.getPathArguments()));
+ InstanceIdentifier<?> identifier2) {
+ return Iterables.getLast(identifier1.getPathArguments())
+ .equals(Iterables.getLast(identifier2.getPathArguments()));
}
private void setNodeOperationalStatus(InstanceIdentifier<FlowCapableNode> nodeIid, boolean status) {
NodeId nodeId = nodeIid.firstKeyOf(Node.class).getId();
- if (nodeId != null ) {
- if (deviceMasterships.containsKey(nodeId) ) {
+ if (nodeId != null) {
+ if (deviceMasterships.containsKey(nodeId)) {
deviceMasterships.get(nodeId).setDeviceOperationalStatus(status);
- LOG.debug("Operational status of device {} is set to {}",nodeId, status);
+ LOG.debug("Operational status of device {} is set to {}", nodeId, status);
}
}
}
- private void registerNodeListener(){
+
+ @SuppressWarnings("IllegalCatch")
+ private void registerNodeListener() {
final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
.child(Node.class).augmentation(FlowCapableNode.class);
- final DataTreeIdentifier<FlowCapableNode> treeId =
- new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, flowNodeWildCardIdentifier);
+ final DataTreeIdentifier<FlowCapableNode> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL,
+ flowNodeWildCardIdentifier);
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(() ->
- dataBroker.registerDataTreeChangeListener(treeId, DeviceMastershipManager.this));
+ listenerRegistration = looper.loopUntilNoException(
+ () -> dataBroker.registerDataTreeChangeListener(treeId, DeviceMastershipManager.this));
} catch (Exception e) {
LOG.warn("Data listener registration failed: {}", e.getMessage());
LOG.debug("Data listener registration failed ", e);
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.slf4j.LoggerFactory;
/**
- * FlowForwarder
- * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
- * for WildCardedPath to {@link Flow} and ForwardingRulesCommiter interface for methods:
- * add, update and remove {@link Flow} processing for
+ * FlowForwarder It implements
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
+ * for WildCardedPath to {@link Flow} and ForwardingRulesCommiter interface for
+ * methods: add, update and remove {@link Flow} processing for
* {@link org.opendaylight.controller.md.sal.binding.api.DataTreeModification}.
*/
public class FlowForwarder extends AbstractListeningCommiter<Flow> {
private final DataBroker dataBroker;
private ListenerRegistration<FlowForwarder> listenerRegistration;
- public FlowForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ public FlowForwarder(final ForwardingRulesManager manager, final DataBroker db) {
super(manager, Flow.class);
dataBroker = Preconditions.checkNotNull(db, "DataBroker can not be null!");
registrationListener(db);
}
+ @SuppressWarnings("IllegalCatch")
private void registrationListener(final DataBroker db) {
- final DataTreeIdentifier<Flow> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, getWildCardPath());
+ final DataTreeIdentifier<Flow> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath());
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<FlowForwarder>>() {
- @Override
- public ListenerRegistration<FlowForwarder> call() throws Exception {
- return db.registerDataTreeChangeListener(treeId, FlowForwarder.this);
- }
- });
+ listenerRegistration = looper
+ .loopUntilNoException(() -> db.registerDataTreeChangeListener(treeId, FlowForwarder.this));
} catch (final Exception e) {
LOG.warn("FRM Flow DataTreeChange listener registration fail!");
LOG.debug("FRM Flow DataTreeChange listener registration fail ..", e);
@Override
public void close() {
if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (final Exception e) {
- LOG.warn("Error by stop FRM FlowChangeListener: {}", e.getMessage());
- LOG.debug("Error by stop FRM FlowChangeListener..", e);
- }
+ listenerRegistration.close();
listenerRegistration = null;
}
}
@Override
- public void remove(final InstanceIdentifier<Flow> identifier,
- final Flow removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void remove(final InstanceIdentifier<Flow> identifier, final Flow removeDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
// removed from datastore. So FRM always needs to set strict flag
// into remove-flow input so that only a flow entry associated with
// a given flow object is removed.
- builder.setTransactionUri(new Uri(provider.getNewTransactionId())).
- setStrict(Boolean.TRUE);
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId())).setStrict(Boolean.TRUE);
provider.getSalFlowService().removeFlow(builder.build());
}
}
-
-
-
- //TODO: Pull this into ForwardingRulesCommiter and override it here
+ // TODO: Pull this into ForwardingRulesCommiter and override it here
@Override
public Future<RpcResult<RemoveFlowOutput>> removeWithResult(final InstanceIdentifier<Flow> identifier,
- final Flow removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ final Flow removeDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
Future<RpcResult<RemoveFlowOutput>> resultFuture = SettableFuture.create();
final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
// removed from datastore. So FRM always needs to set strict flag
// into remove-flow input so that only a flow entry associated with
// a given flow object is removed.
- builder.setTransactionUri(new Uri(provider.getNewTransactionId())).
- setStrict(Boolean.TRUE);
+ builder.setTransactionUri(new Uri(provider.getNewTransactionId())).setStrict(Boolean.TRUE);
resultFuture = provider.getSalFlowService().removeFlow(builder.build());
}
return resultFuture;
}
-
-
@Override
- public void update(final InstanceIdentifier<Flow> identifier,
- final Flow original, final Flow update,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void update(final InstanceIdentifier<Flow> identifier, final Flow original, final Flow update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
if (tableIdValidationPrecondition(tableKey, update)) {
// has been updated. So FRM always needs to set strict flag into
// update-flow input so that only a flow entry associated with
// a given flow object is updated.
- builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).setStrict(Boolean.TRUE).build());
- builder.setOriginalFlow((new OriginalFlowBuilder(original)).setStrict(Boolean.TRUE).build());
+ builder.setUpdatedFlow(new UpdatedFlowBuilder(update).setStrict(Boolean.TRUE).build());
+ builder.setOriginalFlow(new OriginalFlowBuilder(original).setStrict(Boolean.TRUE).build());
provider.getSalFlowService().updateFlow(builder.build());
}
}
@Override
- public Future<RpcResult<AddFlowOutput>> add(
- final InstanceIdentifier<Flow> identifier, final Flow addDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<AddFlowOutput>> add(final InstanceIdentifier<Flow> identifier, final Flow addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
Future<RpcResult<AddFlowOutput>> future;
final TableKey tableKey = identifier.firstKeyOf(Table.class, TableKey.class);
}
@Override
- public void createStaleMarkEntity(InstanceIdentifier<Flow> identifier, Flow del, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void createStaleMarkEntity(InstanceIdentifier<Flow> identifier, Flow del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.debug("Creating Stale-Mark entry for the switch {} for flow {} ", nodeIdent.toString(), del.toString());
StaleFlow staleFlow = makeStaleFlow(identifier, del, nodeIdent);
}
-
-
@Override
protected InstanceIdentifier<Flow> getWildCardPath() {
- return InstanceIdentifier.create(Nodes.class).child(Node.class)
- .augmentation(FlowCapableNode.class).child(Table.class).child(Flow.class);
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
+ .child(Table.class).child(Flow.class);
}
- private static boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
+ private static boolean tableIdValidationPrecondition(final TableKey tableKey, final Flow flow) {
Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
- if (! tableKey.getId().equals(flow.getTableId())) {
- LOG.warn("TableID in URI tableId={} and in palyload tableId={} is not same.",
- flow.getTableId(), tableKey.getId());
+ if (!tableKey.getId().equals(flow.getTableId())) {
+ LOG.warn("TableID in URI tableId={} and in palyload tableId={} is not same.", flow.getTableId(),
+ tableKey.getId());
return false;
}
return true;
}
- private StaleFlow makeStaleFlow(InstanceIdentifier<Flow> identifier, Flow del, InstanceIdentifier<FlowCapableNode> nodeIdent){
- StaleFlowBuilder staleFlowBuilder = new StaleFlowBuilder(del);
+ private StaleFlow makeStaleFlow(InstanceIdentifier<Flow> identifier, Flow del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ StaleFlowBuilder staleFlowBuilder = new StaleFlowBuilder(del);
return staleFlowBuilder.setId(del.getId()).build();
}
- private void persistStaleFlow(StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent){
+ private void persistStaleFlow(StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent) {
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleFlowInstanceIdentifier(staleFlow, nodeIdent), staleFlow, false);
+ writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleFlowInstanceIdentifier(staleFlow, nodeIdent),
+ staleFlow, false);
CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTransaction.submit();
handleStaleFlowResultFuture(submitFuture);
}
@Override
- public void onFailure(Throwable t) {
- LOG.error("Stale Flow creation failed {}", t);
+ public void onFailure(Throwable throwable) {
+ LOG.error("Stale Flow creation failed {}", throwable);
}
});
}
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow> getStaleFlowInstanceIdentifier(StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(Table.class, new TableKey(staleFlow.getTableId()))
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow.class,
- new StaleFlowKey(new FlowId(staleFlow.getId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight
+ .flow.inventory.rev130819.tables.table.StaleFlow> getStaleFlowInstanceIdentifier(
+ StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(Table.class, new TableKey(staleFlow.getTableId())).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow.class,
+ new StaleFlowKey(new FlowId(staleFlow.getId())));
}
}
-
/**
- * Copyright (c) 2015 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
+ * Copyright (c) 2015, 2017 Ericsson India Global Services Pvt Ltd. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import java.math.BigInteger;
-import java.util.concurrent.Callable;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class FlowNodeConnectorInventoryTranslatorImpl extends AbstractNodeConnectorCommitter<FlowCapableNodeConnector> implements FlowNodeConnectorInventoryTranslator {
+public class FlowNodeConnectorInventoryTranslatorImpl extends AbstractNodeConnectorCommitter<FlowCapableNodeConnector>
+ implements FlowNodeConnectorInventoryTranslator {
private static final Logger LOG = LoggerFactory.getLogger(FlowNodeConnectorInventoryTranslatorImpl.class);
.augmentation(FlowCapableNodeConnector.class)
.build();
- private Multimap<BigInteger,String> dpnToPortMultiMap = Multimaps.synchronizedListMultimap(ArrayListMultimap.<BigInteger,String>create());
+ private final Multimap<BigInteger, String> dpnToPortMultiMap = Multimaps
+ .synchronizedListMultimap(ArrayListMultimap.<BigInteger, String>create());
- public FlowNodeConnectorInventoryTranslatorImpl(final ForwardingRulesManager manager, final DataBroker dataBroker){
+ @SuppressWarnings("IllegalCatch")
+ public FlowNodeConnectorInventoryTranslatorImpl(final ForwardingRulesManager manager, final DataBroker dataBroker) {
super(manager, FlowCapableNodeConnector.class);
Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- dataTreeChangeListenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<FlowNodeConnectorInventoryTranslatorImpl>>() {
- @Override
- public ListenerRegistration<FlowNodeConnectorInventoryTranslatorImpl> call() throws Exception {
- return dataBroker.registerDataTreeChangeListener(treeId, FlowNodeConnectorInventoryTranslatorImpl.this);
- }
- });
+ dataTreeChangeListenerRegistration = looper.loopUntilNoException(() -> dataBroker
+ .registerDataTreeChangeListener(treeId, FlowNodeConnectorInventoryTranslatorImpl.this));
} catch (final Exception e) {
LOG.warn(" FlowNodeConnectorInventoryTranslatorImpl listener registration fail!");
LOG.debug("FlowNodeConnectorInventoryTranslatorImpl DataTreeChangeListener registration fail ..", e);
- throw new IllegalStateException("FlowNodeConnectorInventoryTranslatorImpl startup fail! System needs restart.", e);
+ throw new
+ IllegalStateException("FlowNodeConnectorInventoryTranslatorImpl startup fail! System needs restart.", e);
}
}
@Override
- protected InstanceIdentifier<FlowCapableNodeConnector> getWildCardPath(){
+ protected InstanceIdentifier<FlowCapableNodeConnector> getWildCardPath() {
return InstanceIdentifier.create(Nodes.class)
.child(Node.class)
.child(NodeConnector.class)
@Override
public void close() {
if (dataTreeChangeListenerRegistration != null) {
- try {
- dataTreeChangeListenerRegistration.close();
- } catch (final Exception e) {
- LOG.warn("Error by stop FRM FlowNodeConnectorInventoryTranslatorImpl: {}", e.getMessage());
- LOG.debug("Error by stop FRM FlowNodeConnectorInventoryTranslatorImpl..", e);
- }
+ dataTreeChangeListenerRegistration.close();
dataTreeChangeListenerRegistration = null;
}
}
+
@Override
- public void remove(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector del, InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE_CONNECTOR)){
+ public void remove(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector del,
+ InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
+ if (compareInstanceIdentifierTail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
LOG.debug("Node Connector removed");
- String sNodeConnectorIdentifier = nodeConnIdent
- .firstKeyOf(NodeConnector.class, NodeConnectorKey.class).getId().getValue();
- BigInteger nDpId = getDpIdFromPortName(sNodeConnectorIdentifier);
+ String nodeConnectorIdentifier = nodeConnIdent.firstKeyOf(NodeConnector.class, NodeConnectorKey.class)
+ .getId().getValue();
+ BigInteger dpId = getDpIdFromPortName(nodeConnectorIdentifier);
- dpnToPortMultiMap.remove(nDpId, sNodeConnectorIdentifier);
+ dpnToPortMultiMap.remove(dpId, nodeConnectorIdentifier);
}
}
@Override
- public void update(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector original, FlowCapableNodeConnector update, InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE_CONNECTOR)){
+ public void update(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector original,
+ FlowCapableNodeConnector update, InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
+ if (compareInstanceIdentifierTail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
LOG.debug("Node Connector updated");
- //Don't need to do anything as we are not considering updates here
+ // Don't need to do anything as we are not considering updates here
}
}
@Override
- public void add(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector add, InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE_CONNECTOR)){
+ public void add(InstanceIdentifier<FlowCapableNodeConnector> identifier, FlowCapableNodeConnector add,
+ InstanceIdentifier<FlowCapableNodeConnector> nodeConnIdent) {
+ if (compareInstanceIdentifierTail(identifier, II_TO_FLOW_CAPABLE_NODE_CONNECTOR)) {
LOG.debug("Node Connector added");
- String sNodeConnectorIdentifier = nodeConnIdent
+ String nodeConnectorIdentifier = nodeConnIdent
.firstKeyOf(NodeConnector.class, NodeConnectorKey.class).getId().getValue();
- BigInteger nDpId = getDpIdFromPortName(sNodeConnectorIdentifier);
+ BigInteger dpId = getDpIdFromPortName(nodeConnectorIdentifier);
- if(!dpnToPortMultiMap.containsEntry(nDpId,sNodeConnectorIdentifier)) {
- dpnToPortMultiMap.put(nDpId, sNodeConnectorIdentifier);
- }else{
+ if (!dpnToPortMultiMap.containsEntry(dpId, nodeConnectorIdentifier)) {
+ dpnToPortMultiMap.put(dpId, nodeConnectorIdentifier);
+ } else {
LOG.error("Duplicate Event.Node Connector already added");
}
}
private boolean compareInstanceIdentifierTail(InstanceIdentifier<?> identifier1,
InstanceIdentifier<?> identifier2) {
- return Iterables.getLast(identifier1.getPathArguments()).equals(Iterables.getLast(identifier2.getPathArguments()));
+ return Iterables.getLast(identifier1.getPathArguments())
+ .equals(Iterables.getLast(identifier2.getPathArguments()));
}
@Override
- public boolean isNodeConnectorUpdated(BigInteger dpId, String portName){
+ public boolean isNodeConnectorUpdated(BigInteger dpId, String portName) {
return dpnToPortMultiMap.containsEntry(dpId,portName) ;
}
import org.slf4j.LoggerFactory;
/**
- * Default implementation of {@link ForwardingRulesManager}
+ * Default implementation of {@link ForwardingRulesManager}.
*
* @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
*/
public class FlowNodeReconciliationImpl implements FlowNodeReconciliation {
- private static final Logger LOG = LoggerFactory.getLogger(FlowNodeReconciliationImpl.class);
- //The number of nanoseconds to wait for a single group to be added.
- private static final long ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(3);
+ private static final Logger LOG = LoggerFactory.getLogger(FlowNodeReconciliationImpl.class);
- //The maximum number of nanoseconds to wait for completion of add-group RPCs.
- private static final long MAX_ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(20);
+ // The number of nanoseconds to wait for a single group to be added.
+ private static final long ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(3);
+
+ // The maximum number of nanoseconds to wait for completion of add-group RPCs.
+ private static final long MAX_ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(20);
private static final String SEPARATOR = ":";
private static final int THREAD_POOL_SIZE = 4;
private final DataBroker dataBroker;
private final ForwardingRulesManager provider;
private final String serviceName;
- final private int priority;
- final private ResultState resultState;
- private Map<DeviceInfo, ListenableFuture<Boolean>> futureMap = new HashMap<>();
+ private final int priority;
+ private final ResultState resultState;
+ private final Map<DeviceInfo, ListenableFuture<Boolean>> futureMap = new HashMap<>();
private final ExecutorService executor = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
private static final AtomicLong BUNDLE_ID = new AtomicLong();
private static final BundleFlags BUNDLE_FLAGS = new BundleFlags(true, true);
- public FlowNodeReconciliationImpl (final ForwardingRulesManager manager, final DataBroker db,
+ public FlowNodeReconciliationImpl(final ForwardingRulesManager manager, final DataBroker db,
final String serviceName, final int priority, final ResultState resultState) {
this.provider = Preconditions.checkNotNull(manager, "ForwardingRulesManager can not be null!");
dataBroker = Preconditions.checkNotNull(db, "DataBroker can not be null!");
this.serviceName = serviceName;
this.priority = priority;
this.resultState = resultState;
- salBundleService = Preconditions.checkNotNull(manager.getSalBundleService(),"salBundleService can not be null!");
+ salBundleService = Preconditions.checkNotNull(manager.getSalBundleService(),
+ "salBundleService can not be null!");
}
@Override
private ListenableFuture<Boolean> reconcileConfiguration(InstanceIdentifier<FlowCapableNode> connectedNode) {
LOG.info("Triggering reconciliation for device {}", connectedNode.firstKeyOf(Node.class));
if (provider.isStaleMarkingEnabled()) {
- LOG.info("Stale-Marking is ENABLED and proceeding with deletion of "
- + "stale-marked entities on switch {}",
+ LOG.info("Stale-Marking is ENABLED and proceeding with deletion of " + "stale-marked entities on switch {}",
connectedNode.toString());
reconciliationPreProcess(connectedNode);
}
- LOG.debug("Bundle based reconciliation status : {}", provider.isBundleBasedReconciliationEnabled()?"Enable":"Disable");
+ LOG.debug("Bundle based reconciliation status : {}",
+ provider.isBundleBasedReconciliationEnabled() ? "Enable" : "Disable");
if (provider.isBundleBasedReconciliationEnabled()) {
BundleBasedReconciliationTask bundleBasedReconTask = new BundleBasedReconciliationTask(connectedNode);
return JdkFutureAdapters.listenInPoolThread(executor.submit(bundleBasedReconTask));
private class BundleBasedReconciliationTask implements Callable<Boolean> {
final InstanceIdentifier<FlowCapableNode> nodeIdentity;
- public BundleBasedReconciliationTask(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ BundleBasedReconciliationTask(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
nodeIdentity = nodeIdent;
}
@Override
public Boolean call() {
- String sNode = nodeIdentity.firstKeyOf(Node.class, NodeKey.class).getId().getValue();
+ String node = nodeIdentity.firstKeyOf(Node.class, NodeKey.class).getId().getValue();
Optional<FlowCapableNode> flowNode = Optional.absent();
BundleId bundleIdValue = new BundleId(BUNDLE_ID.getAndIncrement());
- BigInteger nDpId = getDpnIdFromNodeName(sNode);
- LOG.debug("Triggering bundle based reconciliation for device :{}", nDpId);
+ BigInteger dpnId = getDpnIdFromNodeName(node);
+ LOG.debug("Triggering bundle based reconciliation for device :{}", dpnId);
ReadOnlyTransaction trans = provider.getReadTranaction();
try {
flowNode = trans.read(LogicalDatastoreType.CONFIGURATION, nodeIdentity).get();
- } catch (Exception e) {
+ } catch (ExecutionException | InterruptedException e) {
LOG.error("Error occurred while reading the configuration data store for node {}", nodeIdentity, e);
}
if (flowNode.isPresent()) {
- LOG.debug("FlowNode present for Datapath ID {}", nDpId);
+ LOG.debug("FlowNode present for Datapath ID {}", dpnId);
final NodeRef nodeRef = new NodeRef(nodeIdentity.firstIdentifierOf(Node.class));
- final ControlBundleInput openBundleInput = new ControlBundleInputBuilder()
- .setNode(nodeRef)
- .setBundleId(bundleIdValue)
- .setFlags(BUNDLE_FLAGS)
- .setType(BundleControlType.ONFBCTOPENREQUEST)
+ final ControlBundleInput openBundleInput = new ControlBundleInputBuilder().setNode(nodeRef)
+ .setBundleId(bundleIdValue).setFlags(BUNDLE_FLAGS).setType(BundleControlType.ONFBCTOPENREQUEST)
.build();
- final ControlBundleInput commitBundleInput = new ControlBundleInputBuilder()
- .setNode(nodeRef)
- .setBundleId(bundleIdValue)
- .setFlags(BUNDLE_FLAGS)
- .setType(BundleControlType.ONFBCTCOMMITREQUEST)
- .build();
+ final ControlBundleInput commitBundleInput = new ControlBundleInputBuilder().setNode(nodeRef)
+ .setBundleId(bundleIdValue).setFlags(BUNDLE_FLAGS)
+ .setType(BundleControlType.ONFBCTCOMMITREQUEST).build();
final AddBundleMessagesInput addBundleMessagesInput = new AddBundleMessagesInputBuilder()
- .setNode(nodeRef)
- .setBundleId(bundleIdValue)
- .setFlags(BUNDLE_FLAGS)
- .setMessages(createMessages(nodeRef, flowNode))
- .build();
+ .setNode(nodeRef).setBundleId(bundleIdValue).setFlags(BUNDLE_FLAGS)
+ .setMessages(createMessages(nodeRef, flowNode)).build();
Future<RpcResult<Void>> openBundle = salBundleService.controlBundle(openBundleInput);
- ListenableFuture<RpcResult<Void>> addBundleMessagesFuture =
- Futures.transformAsync(JdkFutureAdapters.listenInPoolThread(openBundle), rpcResult -> {
- if (rpcResult.isSuccessful()) {
- return JdkFutureAdapters.listenInPoolThread(
- salBundleService.addBundleMessages(addBundleMessagesInput));
- }
- return Futures.immediateFuture(null);
- });
-
- ListenableFuture<RpcResult<Void>> commitBundleFuture =
- Futures.transformAsync(addBundleMessagesFuture, rpcResult -> {
+ ListenableFuture<RpcResult<Void>> addBundleMessagesFuture = Futures
+ .transformAsync(JdkFutureAdapters.listenInPoolThread(openBundle), rpcResult -> {
if (rpcResult.isSuccessful()) {
- return JdkFutureAdapters.listenInPoolThread(
- salBundleService.controlBundle(commitBundleInput));
+ return JdkFutureAdapters
+ .listenInPoolThread(salBundleService.addBundleMessages(addBundleMessagesInput));
}
return Futures.immediateFuture(null);
});
- /* Bundles not supported for meters*/
- List<Meter> meters = flowNode.get().getMeter() != null
- ? flowNode.get().getMeter() : Collections.emptyList();
- ListenableFuture<RpcResult<Void>> meterFuture =
- Futures.transformAsync(commitBundleFuture, rpcResult -> {
- if (rpcResult.isSuccessful()) {
- for (Meter meter : meters) {
- final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent =
- nodeIdentity.child(Meter.class, meter.getKey());
- provider.getMeterCommiter().add(meterIdent, meter, nodeIdentity);
- }
- }
- return Futures.immediateFuture(null);
- });
-
- trans.close();
- try {
- if(commitBundleFuture.get().isSuccessful()) {
- LOG.debug("Completing bundle based reconciliation for device ID:{}", nDpId);
- return true;
- } else {
- return false;
+ ListenableFuture<RpcResult<Void>> commitBundleFuture = Futures.transformAsync(addBundleMessagesFuture,
+ rpcResult -> {
+ if (rpcResult.isSuccessful()) {
+ return JdkFutureAdapters
+ .listenInPoolThread(salBundleService.controlBundle(commitBundleInput));
+ }
+ return Futures.immediateFuture(null);
+ });
+
+ /* Bundles not supported for meters */
+ List<Meter> meters = flowNode.get().getMeter() != null ? flowNode.get().getMeter()
+ : Collections.emptyList();
+ ListenableFuture<RpcResult<Void>> meterFuture = Futures.transformAsync(commitBundleFuture,
+ rpcResult -> {
+ if (rpcResult.isSuccessful()) {
+ for (Meter meter : meters) {
+ final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent = nodeIdentity
+ .child(Meter.class, meter.getKey());
+ provider.getMeterCommiter().add(meterIdent, meter, nodeIdentity);
}
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Error while doing bundle based reconciliation for device ID:{}", nodeIdentity);
- return false;
}
+ return Futures.immediateFuture(null);
+ });
+
+ trans.close();
+ try {
+ if (commitBundleFuture.get().isSuccessful()) {
+ LOG.debug("Completing bundle based reconciliation for device ID:{}", dpnId);
+ return true;
+ } else {
+ return false;
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error("Error while doing bundle based reconciliation for device ID:{}", nodeIdentity);
+ return false;
+ }
}
- LOG.error("FlowNode not present for Datapath ID {}", nDpId);
+ LOG.error("FlowNode not present for Datapath ID {}", dpnId);
return false;
}
}
InstanceIdentifier<FlowCapableNode> nodeIdentity;
- public ReconciliationTask(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- nodeIdentity = nodeIdent;
+ ReconciliationTask(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ nodeIdentity = nodeIdent;
}
+ @Override
public Boolean call() {
- String sNode = nodeIdentity.firstKeyOf(Node.class, NodeKey.class).getId().getValue();
- BigInteger nDpId = getDpnIdFromNodeName(sNode);
+ String node = nodeIdentity.firstKeyOf(Node.class, NodeKey.class).getId().getValue();
+ BigInteger dpnId = getDpnIdFromNodeName(node);
ReadOnlyTransaction trans = provider.getReadTranaction();
Optional<FlowCapableNode> flowNode = Optional.absent();
- //initialize the counter
+ // initialize the counter
int counter = 0;
try {
flowNode = trans.read(LogicalDatastoreType.CONFIGURATION, nodeIdentity).get();
- } catch (Exception e) {
+ } catch (ExecutionException | InterruptedException e) {
LOG.warn("Fail with read Config/DS for Node {} !", nodeIdentity, e);
return false;
}
if (flowNode.isPresent()) {
- /* Tables - have to be pushed before groups */
- // CHECK if while pusing the update, updateTableInput can be null to emulate a table add
+ /* Tables - have to be pushed before groups */
+ // CHECK if while pusing the update, updateTableInput can be null to emulate a
+ // table add
List<TableFeatures> tableList = flowNode.get().getTableFeatures() != null
- ? flowNode.get().getTableFeatures() : Collections.<TableFeatures>emptyList();
+ ? flowNode.get().getTableFeatures()
+ : Collections.<TableFeatures>emptyList();
for (TableFeatures tableFeaturesItem : tableList) {
TableFeaturesKey tableKey = tableFeaturesItem.getKey();
- KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII
- = nodeIdentity.child(TableFeatures.class, new TableFeaturesKey(tableKey.getTableId()));
+ KeyedInstanceIdentifier<TableFeatures, TableFeaturesKey> tableFeaturesII = nodeIdentity
+ .child(TableFeatures.class, new TableFeaturesKey(tableKey.getTableId()));
provider.getTableFeaturesCommiter().update(tableFeaturesII, tableFeaturesItem, null, nodeIdentity);
}
- /* Groups - have to be first */
- List<Group> groups = flowNode.get().getGroup() != null
- ? flowNode.get().getGroup() : Collections.<Group>emptyList();
+ /* Groups - have to be first */
+ List<Group> groups = flowNode.get().getGroup() != null ? flowNode.get().getGroup()
+ : Collections.<Group>emptyList();
List<Group> toBeInstalledGroups = new ArrayList<>();
toBeInstalledGroups.addAll(groups);
- //new list for suspected groups pointing to ports .. when the ports come up late
+ // new list for suspected groups pointing to ports .. when the ports come up
+ // late
List<Group> suspectedGroups = new ArrayList<>();
Map<Long, ListenableFuture<?>> groupFutures = new HashMap<>();
- while ((!(toBeInstalledGroups.isEmpty()) || !(suspectedGroups.isEmpty())) &&
- (counter <= provider.getReconciliationRetryCount())) { //also check if the counter has not crossed the threshold
+ while ((!toBeInstalledGroups.isEmpty() || !suspectedGroups.isEmpty())
+ && counter <= provider.getReconciliationRetryCount()) { // also check if the counter has not
+ // crossed the threshold
if (toBeInstalledGroups.isEmpty() && !suspectedGroups.isEmpty()) {
- LOG.debug("These Groups are pointing to node-connectors that are not up yet {}", suspectedGroups.toString());
+ LOG.debug("These Groups are pointing to node-connectors that are not up yet {}",
+ suspectedGroups.toString());
toBeInstalledGroups.addAll(suspectedGroups);
break;
}
Group group = iterator.next();
boolean okToInstall = true;
Buckets buckets = group.getBuckets();
- List<Bucket> bucketList = (buckets == null)
- ? null : buckets.getBucket();
+ List<Bucket> bucketList = buckets == null ? null : buckets.getBucket();
if (bucketList == null) {
bucketList = Collections.<Bucket>emptyList();
}
actions = Collections.<Action>emptyList();
}
for (Action action : actions) {
- //chained-port
+ // chained-port
if (action.getAction().getImplementedInterface().getName()
- .equals("org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.OutputActionCase")) {
- String nodeConnectorUri = ((OutputActionCase) (action.getAction()))
- .getOutputAction().getOutputNodeConnector().getValue();
+ .equals("org.opendaylight.yang.gen.v1.urn.opendaylight"
+ + ".action.types.rev131112.action.action.OutputActionCase")) {
+ String nodeConnectorUri = ((OutputActionCase) action.getAction()).getOutputAction()
+ .getOutputNodeConnector().getValue();
LOG.debug("Installing the group for node connector {}", nodeConnectorUri);
- //check if the nodeconnector is there in the multimap
+ // check if the nodeconnector is there in the multimap
boolean isPresent = provider.getFlowNodeConnectorInventoryTranslatorImpl()
- .isNodeConnectorUpdated(nDpId, nodeConnectorUri);
- //if yes set okToInstall = true
+ .isNodeConnectorUpdated(dpnId, nodeConnectorUri);
+ // if yes set okToInstall = true
if (isPresent) {
break;
- }//else put it in a different list and still set okToInstall = true
- else {
+ } else {
+ // else put it in a different list and still set okToInstall = true
suspectedGroups.add(group);
- LOG.debug("Not yet received the node-connector updated for {} " +
- "for the group with id {}", nodeConnectorUri, group.getGroupId().toString());
+ LOG.debug(
+ "Not yet received the node-connector updated for {} "
+ + "for the group with id {}",
+ nodeConnectorUri, group.getGroupId().toString());
break;
}
-
-
- }
- //chained groups
- else if (action.getAction().getImplementedInterface().getName()
- .equals("org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.GroupActionCase")) {
- Long groupId = ((GroupActionCase) (action.getAction())).getGroupAction().getGroupId();
- ListenableFuture<?> future =
- groupFutures.get(groupId);
+ } else if (action.getAction().getImplementedInterface().getName()
+ .equals("org.opendaylight.yang.gen.v1.urn.opendaylight"
+ + ".action.types.rev131112.action.action.GroupActionCase")) {
+ // chained groups
+ Long groupId = ((GroupActionCase) action.getAction()).getGroupAction().getGroupId();
+ ListenableFuture<?> future = groupFutures.get(groupId);
if (future == null) {
okToInstall = false;
break;
}
-
// Need to ensure that the group specified
// by group-action is already installed.
- awaitGroup(sNode, future);
+ awaitGroup(node, future);
}
}
if (!okToInstall) {
- //increment retry counter value
+ // increment retry counter value
counter++;
break;
}
}
-
if (okToInstall) {
addGroup(groupFutures, group);
iterator.remove();
}
}
- /* installation of suspected groups*/
+ /* installation of suspected groups */
if (!toBeInstalledGroups.isEmpty()) {
for (Group group : toBeInstalledGroups) {
- LOG.debug("Installing the group {} finally although the port is not up after checking for {} times "
- , group.getGroupId().toString(), provider.getReconciliationRetryCount());
+ LOG.debug(
+ "Installing the group {} finally although "
+ + "the port is not up after checking for {} times ",
+ group.getGroupId().toString(), provider.getReconciliationRetryCount());
addGroup(groupFutures, group);
}
}
- /* Meters */
- List<Meter> meters = flowNode.get().getMeter() != null
- ? flowNode.get().getMeter() : Collections.<Meter>emptyList();
+ /* Meters */
+ List<Meter> meters = flowNode.get().getMeter() != null ? flowNode.get().getMeter()
+ : Collections.<Meter>emptyList();
for (Meter meter : meters) {
- final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent =
- nodeIdentity.child(Meter.class, meter.getKey());
+ final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent = nodeIdentity.child(Meter.class,
+ meter.getKey());
provider.getMeterCommiter().add(meterIdent, meter, nodeIdentity);
}
// Need to wait for all groups to be installed before adding
// flows.
- awaitGroups(sNode, groupFutures.values());
+ awaitGroups(node, groupFutures.values());
- /* Flows */
- List<Table> tables = flowNode.get().getTable() != null
- ? flowNode.get().getTable() : Collections.<Table>emptyList();
+ /* Flows */
+ List<Table> tables = flowNode.get().getTable() != null ? flowNode.get().getTable()
+ : Collections.<Table>emptyList();
for (Table table : tables) {
- final KeyedInstanceIdentifier<Table, TableKey> tableIdent =
- nodeIdentity.child(Table.class, table.getKey());
+ final KeyedInstanceIdentifier<Table, TableKey> tableIdent = nodeIdentity.child(Table.class,
+ table.getKey());
List<Flow> flows = table.getFlow() != null ? table.getFlow() : Collections.<Flow>emptyList();
for (Flow flow : flows) {
- final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent =
- tableIdent.child(Flow.class, flow.getKey());
+ final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class,
+ flow.getKey());
provider.getFlowCommiter().add(flowIdent, flow, nodeIdentity);
}
}
}
- /* clean transaction */
+ /* clean transaction */
trans.close();
return true;
}
/**
- * Invoke add-group RPC, and put listenable future associated with the
- * RPC into the given map.
+ * Invoke add-group RPC, and put listenable future associated with the RPC into
+ * the given map.
*
- * @param map The map to store listenable futures associated with
- * add-group RPC.
- * @param group The group to add.
+ * @param map
+ * The map to store listenable futures associated with add-group RPC.
+ * @param group
+ * The group to add.
*/
private void addGroup(Map<Long, ListenableFuture<?>> map, Group group) {
- KeyedInstanceIdentifier<Group, GroupKey> groupIdent =
- nodeIdentity.child(Group.class, group.getKey());
+ KeyedInstanceIdentifier<Group, GroupKey> groupIdent = nodeIdentity.child(Group.class, group.getKey());
final Long groupId = group.getGroupId().getValue();
- ListenableFuture<?> future = JdkFutureAdapters.listenInPoolThread(
- provider.getGroupCommiter().add(
- groupIdent, group, nodeIdentity));
+ ListenableFuture<?> future = JdkFutureAdapters
+ .listenInPoolThread(provider.getGroupCommiter().add(groupIdent, group, nodeIdentity));
Futures.addCallback(future, new FutureCallback<Object>() {
@Override
public void onSuccess(Object result) {
if (LOG.isTraceEnabled()) {
LOG.trace("add-group RPC completed: node={}, id={}",
- nodeIdentity.firstKeyOf(Node.class).getId().
- getValue(), groupId);
+ nodeIdentity.firstKeyOf(Node.class).getId().getValue(), groupId);
}
}
@Override
public void onFailure(Throwable cause) {
- String msg = "add-group RPC failed: node=" +
- nodeIdentity.firstKeyOf(Node.class).getId().getValue() +
- ", id=" + groupId;
+ String msg = "add-group RPC failed: node=" + nodeIdentity.firstKeyOf(Node.class).getId().getValue()
+ + ", id=" + groupId;
LOG.debug(msg, cause);
}
});
/**
* Wait for completion of add-group RPC.
*
- * @param nodeId The identifier for the target node.
- * @param future Future associated with add-group RPC that installs
- * the target group.
+ * @param nodeId
+ * The identifier for the target node.
+ * @param future
+ * Future associated with add-group RPC that installs the target
+ * group.
*/
private void awaitGroup(String nodeId, ListenableFuture<?> future) {
awaitGroups(nodeId, Collections.singleton(future));
/**
* Wait for completion of add-group RPCs.
*
- * @param nodeId The identifier for the target node.
- * @param futures A collection of futures associated with add-group
- * RPCs.
+ * @param nodeId
+ * The identifier for the target node.
+ * @param futures
+ * A collection of futures associated with add-group RPCs.
*/
- private void awaitGroups(String nodeId,
- Collection<ListenableFuture<?>> futures) {
+ private void awaitGroups(String nodeId, Collection<ListenableFuture<?>> futures) {
if (!futures.isEmpty()) {
- long timeout = Math.min(
- ADD_GROUP_TIMEOUT * futures.size(), MAX_ADD_GROUP_TIMEOUT);
+ long timeout = Math.min(ADD_GROUP_TIMEOUT * futures.size(), MAX_ADD_GROUP_TIMEOUT);
try {
- Futures.successfulAsList(futures).
- get(timeout, TimeUnit.NANOSECONDS);
+ Futures.successfulAsList(futures).get(timeout, TimeUnit.NANOSECONDS);
LOG.trace("awaitGroups() completed: node={}", nodeId);
- } catch (TimeoutException e) {
- LOG.debug("add-group RPCs did not complete: node={}",
- nodeId);
- } catch (Exception e) {
- LOG.debug("Unhandled exception while waiting for group installation on node {}",
- nodeId, e);
+ } catch (TimeoutException | InterruptedException | ExecutionException e) {
+ LOG.debug("add-group RPCs did not complete: node={}", nodeId);
}
}
}
}
private void reconciliationPreProcess(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
-
List<InstanceIdentifier<StaleFlow>> staleFlowsToBeBulkDeleted = Lists.newArrayList();
List<InstanceIdentifier<StaleGroup>> staleGroupsToBeBulkDeleted = Lists.newArrayList();
List<InstanceIdentifier<StaleMeter>> staleMetersToBeBulkDeleted = Lists.newArrayList();
-
ReadOnlyTransaction trans = provider.getReadTranaction();
Optional<FlowCapableNode> flowNode = Optional.absent();
try {
flowNode = trans.read(LogicalDatastoreType.CONFIGURATION, nodeIdent).get();
- }
- catch (Exception e) {
+ } catch (ExecutionException | InterruptedException e) {
LOG.warn("Reconciliation Pre-Processing Fail with read Config/DS for Node {} !", nodeIdent, e);
}
LOG.debug("Proceeding with deletion of stale-marked Flows on switch {} using Openflow interface",
nodeIdent.toString());
/* Stale-Flows - Stale-marked Flows have to be removed first for safety */
- List<Table> tables = flowNode.get().getTable() != null
- ? flowNode.get().getTable() : Collections.<Table> emptyList();
+ List<Table> tables = flowNode.get().getTable() != null ? flowNode.get().getTable()
+ : Collections.<Table>emptyList();
for (Table table : tables) {
- final KeyedInstanceIdentifier<Table, TableKey> tableIdent =
- nodeIdent.child(Table.class, table.getKey());
- List<StaleFlow> staleFlows = table.getStaleFlow() != null ? table.getStaleFlow() : Collections.<StaleFlow> emptyList();
+ final KeyedInstanceIdentifier<Table, TableKey> tableIdent = nodeIdent.child(Table.class,
+ table.getKey());
+ List<StaleFlow> staleFlows = table.getStaleFlow() != null ? table.getStaleFlow()
+ : Collections.<StaleFlow>emptyList();
for (StaleFlow staleFlow : staleFlows) {
FlowBuilder flowBuilder = new FlowBuilder(staleFlow);
Flow toBeDeletedFlow = flowBuilder.setId(staleFlow.getId()).build();
- final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent =
- tableIdent.child(Flow.class, toBeDeletedFlow.getKey());
-
+ final KeyedInstanceIdentifier<Flow, FlowKey> flowIdent = tableIdent.child(Flow.class,
+ toBeDeletedFlow.getKey());
this.provider.getFlowCommiter().remove(flowIdent, toBeDeletedFlow, nodeIdent);
}
}
-
LOG.debug("Proceeding with deletion of stale-marked Groups for switch {} using Openflow interface",
nodeIdent.toString());
- // TODO: Should we collate the futures of RPC-calls to be sure that groups are Flows are fully deleted
+ // TODO: Should we collate the futures of RPC-calls to be sure that groups are
+ // Flows are fully deleted
// before attempting to delete groups - just in case there are references
/* Stale-marked Groups - Can be deleted after flows */
- List<StaleGroup> staleGroups = flowNode.get().getStaleGroup() != null
- ? flowNode.get().getStaleGroup() : Collections.<StaleGroup> emptyList();
+ List<StaleGroup> staleGroups = flowNode.get().getStaleGroup() != null ? flowNode.get().getStaleGroup()
+ : Collections.<StaleGroup>emptyList();
for (StaleGroup staleGroup : staleGroups) {
GroupBuilder groupBuilder = new GroupBuilder(staleGroup);
Group toBeDeletedGroup = groupBuilder.setGroupId(staleGroup.getGroupId()).build();
- final KeyedInstanceIdentifier<Group, GroupKey> groupIdent =
- nodeIdent.child(Group.class, toBeDeletedGroup.getKey());
+ final KeyedInstanceIdentifier<Group, GroupKey> groupIdent = nodeIdent.child(Group.class,
+ toBeDeletedGroup.getKey());
this.provider.getGroupCommiter().remove(groupIdent, toBeDeletedGroup, nodeIdent);
LOG.debug("Proceeding with deletion of stale-marked Meters for switch {} using Openflow interface",
nodeIdent.toString());
/* Stale-marked Meters - can be deleted anytime - so least priority */
- List<StaleMeter> staleMeters = flowNode.get().getStaleMeter() != null
- ? flowNode.get().getStaleMeter() : Collections.<StaleMeter> emptyList();
+ List<StaleMeter> staleMeters = flowNode.get().getStaleMeter() != null ? flowNode.get().getStaleMeter()
+ : Collections.<StaleMeter>emptyList();
for (StaleMeter staleMeter : staleMeters) {
MeterBuilder meterBuilder = new MeterBuilder(staleMeter);
Meter toBeDeletedMeter = meterBuilder.setMeterId(staleMeter.getMeterId()).build();
- final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent =
- nodeIdent.child(Meter.class, toBeDeletedMeter.getKey());
-
+ final KeyedInstanceIdentifier<Meter, MeterKey> meterIdent = nodeIdent.child(Meter.class,
+ toBeDeletedMeter.getKey());
this.provider.getMeterCommiter().remove(meterIdent, toBeDeletedMeter, nodeIdent);
LOG.debug("Deleting all stale-marked flows/groups/meters of for switch {} in Configuration DS",
nodeIdent.toString());
- // Now, do the bulk deletions
- deleteDSStaleFlows(staleFlowsToBeBulkDeleted);
+ // Now, do the bulk deletions
+ deleteDSStaleFlows(staleFlowsToBeBulkDeleted);
deleteDSStaleGroups(staleGroupsToBeBulkDeleted);
deleteDSStaleMeters(staleMetersToBeBulkDeleted);
-
}
-
- private void deleteDSStaleFlows(List<InstanceIdentifier<StaleFlow>> flowsForBulkDelete){
+ private void deleteDSStaleFlows(List<InstanceIdentifier<StaleFlow>> flowsForBulkDelete) {
ImmutableList.Builder<InstanceIdentifier<StaleFlow>> builder = ImmutableList.builder();
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- for (InstanceIdentifier<StaleFlow> staleFlowIId : flowsForBulkDelete){
+ for (InstanceIdentifier<StaleFlow> staleFlowIId : flowsForBulkDelete) {
writeTransaction.delete(LogicalDatastoreType.CONFIGURATION, staleFlowIId);
}
handleStaleEntityDeletionResultFuture(submitFuture);
}
- private void deleteDSStaleGroups(List<InstanceIdentifier<StaleGroup>> groupsForBulkDelete){
+ private void deleteDSStaleGroups(List<InstanceIdentifier<StaleGroup>> groupsForBulkDelete) {
ImmutableList.Builder<InstanceIdentifier<StaleGroup>> builder = ImmutableList.builder();
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- for (InstanceIdentifier<StaleGroup> staleGroupIId : groupsForBulkDelete){
+ for (InstanceIdentifier<StaleGroup> staleGroupIId : groupsForBulkDelete) {
writeTransaction.delete(LogicalDatastoreType.CONFIGURATION, staleGroupIId);
}
CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTransaction.submit();
handleStaleEntityDeletionResultFuture(submitFuture);
-
}
- private void deleteDSStaleMeters(List<InstanceIdentifier<StaleMeter>> metersForBulkDelete){
+ private void deleteDSStaleMeters(List<InstanceIdentifier<StaleMeter>> metersForBulkDelete) {
ImmutableList.Builder<InstanceIdentifier<StaleMeter>> builder = ImmutableList.builder();
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- for (InstanceIdentifier<StaleMeter> staleMeterIId : metersForBulkDelete){
+ for (InstanceIdentifier<StaleMeter> staleMeterIId : metersForBulkDelete) {
writeTransaction.delete(LogicalDatastoreType.CONFIGURATION, staleMeterIId);
}
CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTransaction.submit();
handleStaleEntityDeletionResultFuture(submitFuture);
-
-
}
-
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow> getStaleFlowInstanceIdentifier(StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(Table.class, new TableKey(staleFlow.getTableId()))
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow.class,
- new StaleFlowKey(new FlowId(staleFlow.getId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight
+ .flow.inventory.rev130819.tables.table.StaleFlow> getStaleFlowInstanceIdentifier(
+ StaleFlow staleFlow, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(Table.class, new TableKey(staleFlow.getTableId())).child(
+ org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.StaleFlow.class,
+ new StaleFlowKey(new FlowId(staleFlow.getId())));
}
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.StaleGroup> getStaleGroupInstanceIdentifier(StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(StaleGroup.class, new StaleGroupKey(new GroupId(staleGroup.getGroupId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight
+ .group.types.rev131018.groups.StaleGroup> getStaleGroupInstanceIdentifier(
+ StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(StaleGroup.class, new StaleGroupKey(new GroupId(staleGroup.getGroupId())));
}
-
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.StaleMeter> getStaleMeterInstanceIdentifier(StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(StaleMeter.class, new StaleMeterKey(new MeterId(staleMeter.getMeterId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight
+ .flow.inventory.rev130819.meters.StaleMeter> getStaleMeterInstanceIdentifier(
+ StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(StaleMeter.class, new StaleMeterKey(new MeterId(staleMeter.getMeterId())));
}
-
- private void handleStaleEntityDeletionResultFuture(CheckedFuture<Void, TransactionCommitFailedException> submitFuture) {
+ private void handleStaleEntityDeletionResultFuture(
+ CheckedFuture<Void, TransactionCommitFailedException> submitFuture) {
Futures.addCallback(submitFuture, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
}
@Override
- public void onFailure(Throwable t) {
- LOG.debug("Stale entity removal failed {}", t);
+ public void onFailure(Throwable throwable) {
+ LOG.debug("Stale entity removal failed {}", throwable);
}
});
}
- private Flow getDeleteAllFlow(){
+ private Flow getDeleteAllFlow() {
final FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setTableId(OFConstants.OFPTT_ALL);
return flowBuilder.build();
}
- private Group getDeleteAllGroup(){
+ private Group getDeleteAllGroup() {
final GroupBuilder groupBuilder = new GroupBuilder();
groupBuilder.setGroupType(GroupTypes.GroupAll);
groupBuilder.setGroupId(new GroupId(OFConstants.OFPG_ALL));
return groupBuilder.build();
}
- private Messages createMessages(final NodeRef nodeRef , final Optional<FlowCapableNode> flowNode) {
- final List<Message> messages = new ArrayList<>();
- messages.add(new MessageBuilder().setNode(nodeRef).setBundleInnerMessage(
- new BundleRemoveFlowCaseBuilder()
- .setRemoveFlowCaseData(new RemoveFlowCaseDataBuilder(getDeleteAllFlow()).build()).build()).build());
+ private Messages createMessages(final NodeRef nodeRef, final Optional<FlowCapableNode> flowNode) {
+ final List<Message> messages = new ArrayList<>();
+ messages.add(new MessageBuilder().setNode(nodeRef)
+ .setBundleInnerMessage(new BundleRemoveFlowCaseBuilder()
+ .setRemoveFlowCaseData(new RemoveFlowCaseDataBuilder(getDeleteAllFlow()).build()).build())
+ .build());
- messages.add(new MessageBuilder().setNode(nodeRef).setBundleInnerMessage(
- new BundleRemoveGroupCaseBuilder()
- .setRemoveGroupCaseData(new RemoveGroupCaseDataBuilder(getDeleteAllGroup()).build()).build()).build());
+ messages.add(new MessageBuilder().setNode(nodeRef)
+ .setBundleInnerMessage(new BundleRemoveGroupCaseBuilder()
+ .setRemoveGroupCaseData(new RemoveGroupCaseDataBuilder(getDeleteAllGroup()).build()).build())
+ .build());
- if(flowNode.get().getGroup()!= null) {
+ if (flowNode.get().getGroup() != null) {
for (Group gr : flowNode.get().getGroup()) {
- messages.add(new MessageBuilder().setNode(nodeRef).setBundleInnerMessage(
- new BundleAddGroupCaseBuilder()
- .setAddGroupCaseData(new AddGroupCaseDataBuilder(gr).build()).build()).build());
+ messages.add(new MessageBuilder().setNode(nodeRef).setBundleInnerMessage(new BundleAddGroupCaseBuilder()
+ .setAddGroupCaseData(new AddGroupCaseDataBuilder(gr).build()).build()).build());
}
}
- if(flowNode.get().getTable()!= null) {
+ if (flowNode.get().getTable() != null) {
for (Table table : flowNode.get().getTable()) {
for (Flow flow : table.getFlow()) {
- messages.add(new MessageBuilder().setNode(nodeRef).setBundleInnerMessage(
- new BundleAddFlowCaseBuilder()
- .setAddFlowCaseData(new AddFlowCaseDataBuilder(flow).build()).build()).build());
+ messages.add(
+ new MessageBuilder().setNode(nodeRef)
+ .setBundleInnerMessage(new BundleAddFlowCaseBuilder()
+ .setAddFlowCaseData(new AddFlowCaseDataBuilder(flow).build()).build())
+ .build());
}
}
}
return new MessagesBuilder().setMessage(messages).build();
}
}
-
import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.Nonnull;
-import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.slf4j.LoggerFactory;
/**
- * forwardingrules-manager
- * org.opendaylight.openflowplugin.applications.frm.impl
+ * forwardingrules-manager org.opendaylight.openflowplugin.applications.frm.impl
*
- * Manager and middle point for whole module.
- * It contains ActiveNodeHolder and provide all RPC services.
+ * <p>
+ * Manager and middle point for whole module. It contains ActiveNodeHolder and
+ * provide all RPC services.
*
*/
public class ForwardingRulesManagerImpl implements ForwardingRulesManager {
private int reconciliationRetryCount;
private boolean isBundleBasedReconciliationEnabled;
- public ForwardingRulesManagerImpl(final DataBroker dataBroker,
- final RpcConsumerRegistry rpcRegistry,
- final ForwardingRulesManagerConfig config,
- final ClusterSingletonServiceProvider clusterSingletonService,
- final NotificationProviderService notificationService,
- final ConfigurationService configurationService,
- final ReconciliationManager reconciliationManager) {
+ public ForwardingRulesManagerImpl(final DataBroker dataBroker, final RpcConsumerRegistry rpcRegistry,
+ final ForwardingRulesManagerConfig config, final ClusterSingletonServiceProvider clusterSingletonService,
+ final NotificationProviderService notificationService, final ConfigurationService configurationService,
+ final ReconciliationManager reconciliationManager) {
disableReconciliation = config.isDisableReconciliation();
staleMarkingEnabled = config.isStaleMarkingEnabled();
reconciliationRetryCount = config.getReconciliationRetryCount();
this.dataService = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
this.clusterSingletonServiceProvider = Preconditions.checkNotNull(clusterSingletonService,
"ClusterSingletonService provider can not be null");
- this.notificationService = Preconditions.checkNotNull(notificationService, "Notification publisher configurationService is" +
- " not available");
+ this.notificationService = Preconditions.checkNotNull(notificationService,
+ "Notification publisher configurationService is" + " not available");
this.reconciliationManager = reconciliationManager;
Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
@Override
public void start() {
- this.nodeListener = new FlowNodeReconciliationImpl(this,dataService,
- SERVICE_NAME, FRM_RECONCILIATION_PRIORITY, ResultState.DONOTHING);
+ this.nodeListener = new FlowNodeReconciliationImpl(this, dataService, SERVICE_NAME, FRM_RECONCILIATION_PRIORITY,
+ ResultState.DONOTHING);
if (this.isReconciliationDisabled()) {
LOG.debug("Reconciliation is disabled by user");
} else {
this.reconciliationNotificationRegistration = reconciliationManager.registerService(this.nodeListener);
LOG.debug("Reconciliation is enabled by user and successfully registered to the reconciliation framework");
}
- this.deviceMastershipManager = new DeviceMastershipManager(clusterSingletonServiceProvider,
- notificationService,
- this.nodeListener,
- dataService);
- flowNodeConnectorInventoryTranslatorImpl = new FlowNodeConnectorInventoryTranslatorImpl(this,dataService);
+ this.deviceMastershipManager = new DeviceMastershipManager(clusterSingletonServiceProvider, notificationService,
+ this.nodeListener, dataService);
+ flowNodeConnectorInventoryTranslatorImpl = new FlowNodeConnectorInventoryTranslatorImpl(this, dataService);
this.flowListener = new FlowForwarder(this, dataService);
this.groupListener = new GroupForwarder(this, dataService);
boolean result = false;
InstanceIdentifier<Node> nodeIid = ident.firstIdentifierOf(Node.class);
final ReadOnlyTransaction transaction = dataService.newReadOnlyTransaction();
- CheckedFuture<com.google.common.base.Optional<Node>, ReadFailedException> future = transaction.read(LogicalDatastoreType.OPERATIONAL, nodeIid);
+ CheckedFuture<com.google.common.base.Optional<Node>, ReadFailedException> future = transaction
+ .read(LogicalDatastoreType.OPERATIONAL, nodeIid);
try {
com.google.common.base.Optional<Node> optionalDataObject = future.checkedGet();
if (optionalDataObject.isPresent()) {
result = true;
} else {
- LOG.debug("{}: Failed to read {}",
- Thread.currentThread().getStackTrace()[1], nodeIid);
+ LOG.debug("{}: Failed to read {}", Thread.currentThread().getStackTrace()[1], nodeIid);
}
} catch (ReadFailedException e) {
LOG.warn("Failed to read {} ", nodeIid, e);
case BUNDLE_BASED_RECONCILIATION_ENABLED:
isBundleBasedReconciliationEnabled = Boolean.valueOf(propertyValue);
break;
+ default:
+ LOG.warn("Not forwarding rule property found.");
+ break;
}
});
}
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
-import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.slf4j.LoggerFactory;
/**
- * GroupForwarder
- * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
- * for WildCardedPath to {@link Group} and ForwardingRulesCommiter interface for methods:
- * add, update and remove {@link Group} processing for
+ * GroupForwarder It implements
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
+ * for WildCardedPath to {@link Group} and ForwardingRulesCommiter interface for
+ * methods: add, update and remove {@link Group} processing for
* {@link org.opendaylight.controller.md.sal.binding.api.DataTreeModification}.
*/
public class GroupForwarder extends AbstractListeningCommiter<Group> {
private final DataBroker dataBroker;
private ListenerRegistration<GroupForwarder> listenerRegistration;
- public GroupForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ @SuppressWarnings("IllegalCatch")
+ public GroupForwarder(final ForwardingRulesManager manager, final DataBroker db) {
super(manager, Group.class);
dataBroker = Preconditions.checkNotNull(db, "DataBroker can not be null!");
- final DataTreeIdentifier<Group> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, getWildCardPath());
+ final DataTreeIdentifier<Group> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath());
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<GroupForwarder>>() {
- @Override
- public ListenerRegistration<GroupForwarder> call() throws Exception {
- return db.registerDataTreeChangeListener(treeId, GroupForwarder.this);
- }
- });
+ listenerRegistration = looper
+ .loopUntilNoException(() -> db.registerDataTreeChangeListener(treeId, GroupForwarder.this));
} catch (final Exception e) {
LOG.warn("FRM Group DataTreeChange listener registration fail!");
LOG.debug("FRM Group DataTreeChange listener registration fail ..", e);
@Override
public void close() {
if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (Exception e) {
- LOG.warn("Error by stop FRM GroupChangeListener: {}", e.getMessage());
- LOG.debug("Error by stop FRM GroupChangeListener..", e);
- }
+ listenerRegistration.close();
listenerRegistration = null;
}
}
@Override
protected InstanceIdentifier<Group> getWildCardPath() {
- return InstanceIdentifier.create(Nodes.class).child(Node.class)
- .augmentation(FlowCapableNode.class).child(Group.class);
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
+ .child(Group.class);
}
@Override
public void remove(final InstanceIdentifier<Group> identifier, final Group removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- final Group group = (removeDataObj);
+ final Group group = removeDataObj;
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
this.provider.getSalGroupService().removeGroup(builder.build());
}
- //TODO: Pull this into ForwardingRulesCommiter and override it here
+ // TODO: Pull this into ForwardingRulesCommiter and override it here
@Override
- public Future<RpcResult<RemoveGroupOutput>> removeWithResult(final InstanceIdentifier<Group> identifier, final Group removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<RemoveGroupOutput>> removeWithResult(final InstanceIdentifier<Group> identifier,
+ final Group removeDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- final Group group = (removeDataObj);
+ final Group group = removeDataObj;
final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
}
@Override
- public void update(final InstanceIdentifier<Group> identifier,
- final Group original, final Group update,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void update(final InstanceIdentifier<Group> identifier, final Group original, final Group update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- final Group originalGroup = (original);
- final Group updatedGroup = (update);
+ final Group originalGroup = original;
+ final Group updatedGroup = update;
final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setGroupRef(new GroupRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
- builder.setOriginalGroup((new OriginalGroupBuilder(originalGroup)).build());
+ builder.setUpdatedGroup(new UpdatedGroupBuilder(updatedGroup).build());
+ builder.setOriginalGroup(new OriginalGroupBuilder(originalGroup).build());
this.provider.getSalGroupService().updateGroup(builder.build());
}
@Override
- public Future<RpcResult<AddGroupOutput>> add(
- final InstanceIdentifier<Group> identifier, final Group addDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<AddGroupOutput>> add(final InstanceIdentifier<Group> identifier, final Group addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- final Group group = (addDataObj);
+ final Group group = addDataObj;
final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
}
@Override
- public void createStaleMarkEntity(InstanceIdentifier<Group> identifier, Group del, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void createStaleMarkEntity(InstanceIdentifier<Group> identifier, Group del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.debug("Creating Stale-Mark entry for the switch {} for Group {} ", nodeIdent.toString(), del.toString());
StaleGroup staleGroup = makeStaleGroup(identifier, del, nodeIdent);
persistStaleGroup(staleGroup, nodeIdent);
}
-
- private StaleGroup makeStaleGroup(InstanceIdentifier<Group> identifier, Group del, InstanceIdentifier<FlowCapableNode> nodeIdent){
+ private StaleGroup makeStaleGroup(InstanceIdentifier<Group> identifier, Group del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
StaleGroupBuilder staleGroupBuilder = new StaleGroupBuilder(del);
return staleGroupBuilder.setGroupId(del.getGroupId()).build();
}
- private void persistStaleGroup(StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent){
+ private void persistStaleGroup(StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent) {
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleGroupInstanceIdentifier(staleGroup, nodeIdent), staleGroup, false);
+ writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleGroupInstanceIdentifier(staleGroup, nodeIdent),
+ staleGroup, false);
CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTransaction.submit();
handleStaleGroupResultFuture(submitFuture);
}
@Override
- public void onFailure(Throwable t) {
- LOG.error("Stale Group creation failed {}", t);
+ public void onFailure(Throwable throwable) {
+ LOG.error("Stale Group creation failed {}", throwable);
}
});
}
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.StaleGroup> getStaleGroupInstanceIdentifier(StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(StaleGroup.class, new StaleGroupKey(new GroupId(staleGroup.getGroupId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.group
+ .types.rev131018.groups.StaleGroup> getStaleGroupInstanceIdentifier(
+ StaleGroup staleGroup, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(StaleGroup.class, new StaleGroupKey(new GroupId(staleGroup.getGroupId())));
}
}
-
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
-import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
import org.slf4j.LoggerFactory;
/**
- * MeterForwarder
- * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
- * for WildCardedPath to {@link Meter} and ForwardingRulesCommiter interface for methods:
- * add, update and remove {@link Meter} processing for
+ * MeterForwarder It implements
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener}
+ * for WildCardedPath to {@link Meter} and ForwardingRulesCommiter interface for
+ * methods: add, update and remove {@link Meter} processing for
* {@link org.opendaylight.controller.md.sal.binding.api.DataTreeModification}.
*
*/
private final DataBroker dataBroker;
private ListenerRegistration<MeterForwarder> listenerRegistration;
- public MeterForwarder (final ForwardingRulesManager manager, final DataBroker db) {
+ @SuppressWarnings("IllegalCatch")
+ public MeterForwarder(final ForwardingRulesManager manager, final DataBroker db) {
super(manager, Meter.class);
dataBroker = Preconditions.checkNotNull(db, "DataBroker can not be null!");
- final DataTreeIdentifier<Meter> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, getWildCardPath());
+ final DataTreeIdentifier<Meter> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath());
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<MeterForwarder>>() {
- @Override
- public ListenerRegistration<MeterForwarder> call() throws Exception {
- return db.registerDataTreeChangeListener(treeId, MeterForwarder.this);
- }
- });
+ listenerRegistration = looper
+ .loopUntilNoException(() -> db.registerDataTreeChangeListener(treeId, MeterForwarder.this));
} catch (final Exception e) {
LOG.warn("FRM Meter DataTreeChange listener registration fail!");
LOG.debug("FRM Meter DataTreeChange listener registration fail ..", e);
@Override
public void close() {
if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (Exception e) {
- LOG.warn("Error by stop FRM MeterChangeListener.{}", e.getMessage());
- LOG.debug("Error by stop FRM MeterChangeListener..", e);
- }
+ listenerRegistration.close();
listenerRegistration = null;
}
}
@Override
protected InstanceIdentifier<Meter> getWildCardPath() {
- return InstanceIdentifier.create(Nodes.class).child(Node.class)
- .augmentation(FlowCapableNode.class).child(Meter.class);
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
+ .child(Meter.class);
}
@Override
public void remove(final InstanceIdentifier<Meter> identifier, final Meter removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
this.provider.getSalMeterService().removeMeter(builder.build());
}
-
@Override
- public Future<RpcResult<RemoveMeterOutput>> removeWithResult(final InstanceIdentifier<Meter> identifier, final Meter removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<RemoveMeterOutput>> removeWithResult(final InstanceIdentifier<Meter> identifier,
+ final Meter removeDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
}
@Override
- public void update(final InstanceIdentifier<Meter> identifier,
- final Meter original, final Meter update,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void update(final InstanceIdentifier<Meter> identifier, final Meter original, final Meter update,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
builder.setMeterRef(new MeterRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
- builder.setOriginalMeter((new OriginalMeterBuilder(original)).build());
+ builder.setUpdatedMeter(new UpdatedMeterBuilder(update).build());
+ builder.setOriginalMeter(new OriginalMeterBuilder(original).build());
this.provider.getSalMeterService().updateMeter(builder.build());
}
@Override
- public Future<RpcResult<AddMeterOutput>> add(
- final InstanceIdentifier<Meter> identifier, final Meter addDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<RpcResult<AddMeterOutput>> add(final InstanceIdentifier<Meter> identifier, final Meter addDataObj,
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
}
@Override
- public void createStaleMarkEntity(InstanceIdentifier<Meter> identifier, Meter del, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public void createStaleMarkEntity(InstanceIdentifier<Meter> identifier, Meter del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.debug("Creating Stale-Mark entry for the switch {} for meter {} ", nodeIdent.toString(), del.toString());
StaleMeter staleMeter = makeStaleMeter(identifier, del, nodeIdent);
persistStaleMeter(staleMeter, nodeIdent);
}
- private StaleMeter makeStaleMeter(InstanceIdentifier<Meter> identifier, Meter del, InstanceIdentifier<FlowCapableNode> nodeIdent){
+ private StaleMeter makeStaleMeter(InstanceIdentifier<Meter> identifier, Meter del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
StaleMeterBuilder staleMeterBuilder = new StaleMeterBuilder(del);
return staleMeterBuilder.setMeterId(del.getMeterId()).build();
}
- private void persistStaleMeter(StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent){
+ private void persistStaleMeter(StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent) {
WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
- writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleMeterInstanceIdentifier(staleMeter, nodeIdent), staleMeter, false);
+ writeTransaction.put(LogicalDatastoreType.CONFIGURATION, getStaleMeterInstanceIdentifier(staleMeter, nodeIdent),
+ staleMeter, false);
CheckedFuture<Void, TransactionCommitFailedException> submitFuture = writeTransaction.submit();
handleStaleMeterResultFuture(submitFuture);
-
}
- private void handleStaleMeterResultFuture(CheckedFuture<Void, TransactionCommitFailedException> submitFuture){
+ private void handleStaleMeterResultFuture(CheckedFuture<Void, TransactionCommitFailedException> submitFuture) {
Futures.addCallback(submitFuture, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
}
@Override
- public void onFailure(Throwable t) {
- LOG.error("Stale Meter creation failed {}", t);
+ public void onFailure(Throwable throwable) {
+ LOG.error("Stale Meter creation failed {}", throwable);
}
});
-
}
-
- private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.StaleMeter> getStaleMeterInstanceIdentifier(StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent) {
- return nodeIdent
- .child(StaleMeter.class, new StaleMeterKey(new MeterId(staleMeter.getMeterId())));
+ private InstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow
+ .inventory.rev130819.meters.StaleMeter> getStaleMeterInstanceIdentifier(
+ StaleMeter staleMeter, InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ return nodeIdent.child(StaleMeter.class, new StaleMeterKey(new MeterId(staleMeter.getMeterId())));
}
}
-
/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
private static final Logger LOG = LoggerFactory.getLogger(TableForwarder.class);
private ListenerRegistration<TableForwarder> listenerRegistration;
+ @SuppressWarnings("IllegalCatch")
public TableForwarder(final ForwardingRulesManager manager, final DataBroker db) {
super(manager, TableFeatures.class);
Preconditions.checkNotNull(db, "DataBroker can not be null!");
- final DataTreeIdentifier<TableFeatures> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, getWildCardPath());
+ final DataTreeIdentifier<TableFeatures> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION,
+ getWildCardPath());
try {
SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
- listenerRegistration = looper.loopUntilNoException(() -> db.registerDataTreeChangeListener(treeId, TableForwarder.this));
+ listenerRegistration = looper
+ .loopUntilNoException(() -> db.registerDataTreeChangeListener(treeId, TableForwarder.this));
} catch (final Exception e) {
LOG.warn("FRM Table DataTreeChangeListener registration fail!");
LOG.debug("FRM Table DataTreeChangeListener registration fail ..", e);
@Override
public void close() {
if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (Exception e) {
- LOG.warn("Error by stop FRM TableChangeListener: {}", e.getMessage());
- LOG.debug("Error by stop FRM TableChangeListener..", e);
- }
+ listenerRegistration.close();
listenerRegistration = null;
}
}
@Override
protected InstanceIdentifier<TableFeatures> getWildCardPath() {
- return InstanceIdentifier.create(Nodes.class).child(Node.class)
- .augmentation(FlowCapableNode.class).child(TableFeatures.class);
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
+ .child(TableFeatures.class);
}
@Override
public void remove(final InstanceIdentifier<TableFeatures> identifier, final TableFeatures removeDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ final InstanceIdentifier<FlowCapableNode> nodeIdent) {
// DO Nothing
}
@Override
- public void update(final InstanceIdentifier<TableFeatures> identifier,
- final TableFeatures original, final TableFeatures update,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- LOG.debug("Received the Table Update request [Tbl id, node Id, original, upd" +
- " " + identifier + " " + nodeIdent + " " + original + " " + update);
+ public void update(final InstanceIdentifier<TableFeatures> identifier, final TableFeatures original,
+ final TableFeatures update, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ LOG.debug("Received the Table Update request [Tbl id, node Id, original, upd" + " " + identifier + " "
+ + nodeIdent + " " + original + " " + update);
final TableFeatures originalTableFeatures = original;
TableFeatures updatedTableFeatures;
if (null == update) {
updatedTableFeatures = original;
- }
- else {
+ } else {
updatedTableFeatures = update;
}
final UpdateTableInputBuilder builder = new UpdateTableInputBuilder();
builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
- // TODO: reconsider model - this particular field is not used in service implementation
+ // TODO: reconsider model - this particular field is not used in service
+ // implementation
builder.setTableRef(new TableRef(identifier));
builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
- builder.setUpdatedTable(new UpdatedTableBuilder().setTableFeatures(
- Collections.singletonList(updatedTableFeatures)).build());
+ builder.setUpdatedTable(
+ new UpdatedTableBuilder().setTableFeatures(Collections.singletonList(updatedTableFeatures)).build());
- builder.setOriginalTable(new OriginalTableBuilder().setTableFeatures(
- Collections.singletonList(originalTableFeatures)).build());
+ builder.setOriginalTable(
+ new OriginalTableBuilder().setTableFeatures(Collections.singletonList(originalTableFeatures)).build());
LOG.debug("Invoking SalTableService ");
- if (this.provider.getSalTableService() != null)
+ if (this.provider.getSalTableService() != null) {
LOG.debug(" Handle to SalTableServices" + this.provider.getSalTableService());
+ }
this.provider.getSalTableService().updateTable(builder.build());
}
@Override
- public Future<? extends RpcResult<?>> add(
- final InstanceIdentifier<TableFeatures> identifier,
- final TableFeatures addDataObj,
- final InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ public Future<? extends RpcResult<?>> add(final InstanceIdentifier<TableFeatures> identifier,
+ final TableFeatures addDataObj, final InstanceIdentifier<FlowCapableNode> nodeIdent) {
return Futures.immediateFuture(null);
}
public void createStaleMarkEntity(InstanceIdentifier<TableFeatures> identifier, TableFeatures del,
InstanceIdentifier<FlowCapableNode> nodeIdent) {
LOG.debug("NO-OP");
-
}
@Override
TableFeatures del, InstanceIdentifier<FlowCapableNode> nodeIdent) {
return null;
}
-
-
}
@Before
public void setUp() throws Exception {
- deviceMastershipManager = new DeviceMastershipManager(clusterSingletonService,
- notificationService, reconciliationAgent, dataBroker);
+ deviceMastershipManager = new DeviceMastershipManager(clusterSingletonService, notificationService,
+ reconciliationAgent, dataBroker);
Mockito.when(clusterSingletonService.registerClusterSingletonService(Matchers.<ClusterSingletonService>any()))
.thenReturn(registration);
}
// destroy context - unregister
Assert.assertNotNull(deviceMastershipManager.getDeviceMasterships().get(NODE_ID));
NodeRemovedBuilder nodeRemovedBuilder = new NodeRemovedBuilder();
- InstanceIdentifier<Node> nodeIId = InstanceIdentifier.create(Nodes.class).
- child(Node.class, new NodeKey(NODE_ID));
+ InstanceIdentifier<Node> nodeIId = InstanceIdentifier.create(Nodes.class).child(Node.class,
+ new NodeKey(NODE_ID));
nodeRemovedBuilder.setNodeRef(new NodeRef(nodeIId));
deviceMastershipManager.onNodeRemoved(nodeRemovedBuilder.build());
Assert.assertNull(deviceMastershipManager.getDeviceMasterships().get(NODE_ID));
deviceMastershipManager.getDeviceMasterships().get(NODE_ID).closeServiceInstance();
Assert.assertFalse(deviceMastershipManager.isDeviceMastered(NODE_ID));
}
-
-}
\ No newline at end of file
+}
@RunWith(MockitoJUnitRunner.class)
public class FlowListenerTest extends FRMTest {
private ForwardingRulesManagerImpl forwardingRulesManager;
- private final static NodeId NODE_ID = new NodeId("testnode:1");
- private final static NodeKey s1Key = new NodeKey(NODE_ID);
+ private static final NodeId NODE_ID = new NodeId("testnode:1");
+ private static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
TableKey tableKey = new TableKey((short) 2);
@Mock
@Before
public void setUp() {
- forwardingRulesManager = new ForwardingRulesManagerImpl(
- getDataBroker(),
- rpcProviderRegistryMock,
- getConfig(),
- clusterSingletonService,
- notificationService,
- getConfigurationService(),
- reconciliationManager);
+ forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock, getConfig(),
+ clusterSingletonService, notificationService, getConfigurationService(), reconciliationManager);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
@Test
public void addTwoFlowsTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
- InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey);
- InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
flowKey = new FlowKey(new FlowId("test_Flow2"));
- flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
- .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY).augmentation(FlowCapableNode.class)
+ .child(Table.class, tableKey).child(Flow.class, flowKey);
flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
writeTx = getDataBroker().newWriteOnlyTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
assertEquals("DOM-1", addFlowCalls.get(1).getTransactionUri().getValue());
assertEquals(2, addFlowCalls.get(1).getTableId().intValue());
assertEquals(flowII, addFlowCalls.get(1).getFlowRef().getValue());
-}
+ }
@Test
public void updateFlowTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
- InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey);
- InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
flowKey = new FlowKey(new FlowId("test_Flow"));
- flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
- .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY).augmentation(FlowCapableNode.class)
+ .child(Table.class, tableKey).child(Flow.class, flowKey);
flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).setOutGroup((long) 5).build();
writeTx = getDataBroker().newWriteOnlyTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
@Test
public void updateFlowScopeTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
- InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey);
- InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
- IpMatch ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short)4)).build();
+ IpMatch ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short) 4)).build();
Match match = new MatchBuilder().setIpMatch(ipMatch).build();
Flow flow = new FlowBuilder().setMatch(match).setKey(flowKey).setTableId((short) 2).build();
assertEquals("DOM-0", addFlowCalls.get(0).getTransactionUri().getValue());
flowKey = new FlowKey(new FlowId("test_Flow"));
- flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
- .augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
- ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short)5)).build();
+ flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY).augmentation(FlowCapableNode.class)
+ .child(Table.class, tableKey).child(Flow.class, flowKey);
+ ipMatch = new IpMatchBuilder().setIpDscp(new Dscp((short) 5)).build();
match = new MatchBuilder().setIpMatch(ipMatch).build();
flow = new FlowBuilder().setMatch(match).setKey(flowKey).setTableId((short) 2).build();
writeTx = getDataBroker().newWriteOnlyTransaction();
@Test
public void deleteFlowTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
FlowKey flowKey = new FlowKey(new FlowId("test_Flow"));
- InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey);
- InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(Flow.class, flowKey);
Table table = new TableBuilder().setKey(tableKey).setFlow(Collections.<Flow>emptyList()).build();
Flow flow = new FlowBuilder().setKey(flowKey).setTableId((short) 2).build();
assertEquals(Boolean.TRUE, removeFlowCalls.get(0).isStrict());
}
-
@Test
- public void staleMarkedFlowCreationTest() throws Exception{
+ public void staleMarkedFlowCreationTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
StaleFlowKey flowKey = new StaleFlowKey(new FlowId("stale_Flow"));
- InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey);
- InstanceIdentifier<StaleFlow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<StaleFlow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Table.class, tableKey).child(StaleFlow.class, flowKey);
Table table = new TableBuilder().setKey(tableKey).setStaleFlow(Collections.<StaleFlow>emptyList()).build();
StaleFlow flow = new StaleFlowBuilder().setKey(flowKey).setTableId((short) 2).build();
public void tearDown() throws Exception {
forwardingRulesManager.close();
}
-
}
@RunWith(MockitoJUnitRunner.class)
public class GroupListenerTest extends FRMTest {
private ForwardingRulesManagerImpl forwardingRulesManager;
- private final static NodeId NODE_ID = new NodeId("testnode:1");
- private final static NodeKey s1Key = new NodeKey(NODE_ID);
+ private static final NodeId NODE_ID = new NodeId("testnode:1");
+ private static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
@Mock
ClusterSingletonServiceProvider clusterSingletonService;
@Test
public void addTwoGroupsTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
GroupKey groupKey = new GroupKey(new GroupId((long) 255));
- InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Group.class, groupKey);
Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
assertEquals("DOM-0", addGroupCalls.get(0).getTransactionUri().getValue());
groupKey = new GroupKey(new GroupId((long) 256));
- groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Group.class, groupKey);
group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
writeTx = getDataBroker().newWriteOnlyTransaction();
@Test
public void updateGroupTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
GroupKey groupKey = new GroupKey(new GroupId((long) 255));
- InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Group.class, groupKey);
Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
@Test
public void removeGroupTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
GroupKey groupKey = new GroupKey(new GroupId((long) 255));
- InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Group.class, groupKey);
Group group = new GroupBuilder().setKey(groupKey).setGroupName("Group1").build();
@Test
public void staleGroupCreationTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
StaleGroupKey groupKey = new StaleGroupKey(new GroupId((long) 255));
- InstanceIdentifier<StaleGroup> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<StaleGroup> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(StaleGroup.class, groupKey);
StaleGroup group = new StaleGroupBuilder().setKey(groupKey).setGroupName("Stale_Group1").build();
public void tearDown() throws Exception {
forwardingRulesManager.close();
}
-
}
@RunWith(MockitoJUnitRunner.class)
public class MeterListenerTest extends FRMTest {
private ForwardingRulesManagerImpl forwardingRulesManager;
- private final static NodeId NODE_ID = new NodeId("testnode:1");
- private final static NodeKey s1Key = new NodeKey(NODE_ID);
+ private static final NodeId NODE_ID = new NodeId("testnode:1");
+ private static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
@Mock
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
private ReconciliationManager reconciliationManager;
-
@Before
public void setUp() {
forwardingRulesManager = new ForwardingRulesManagerImpl(
@Test
public void addTwoMetersTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
- InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").build();
assertEquals("DOM-0", addMeterCalls.get(0).getTransactionUri().getValue());
meterKey = new MeterKey(new MeterId((long) 2001));
- meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_two").setBarrier(true).build();
writeTx = getDataBroker().newWriteOnlyTransaction();
@Test
public void updateMeterTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
- InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").setBarrier(false).build();
@Test
public void removeMeterTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
MeterKey meterKey = new MeterKey(new MeterId((long) 2000));
- InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(Meter.class, meterKey);
Meter meter = new MeterBuilder().setKey(meterKey).setMeterName("meter_one").build();
assertEquals(meterII, removeMeterCalls.get(0).getMeterRef().getValue());
}
-
@Test
public void staleMeterCreationTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
StaleMeterKey meterKey = new StaleMeterKey(new MeterId((long) 2000));
- InstanceIdentifier<StaleMeter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<StaleMeter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class).child(StaleMeter.class, meterKey);
StaleMeter meter = new StaleMeterBuilder().setKey(meterKey).setMeterName("stale_meter_one").build();
public void tearDown() throws Exception {
forwardingRulesManager.close();
}
-
}
@RunWith(MockitoJUnitRunner.class)
public class NodeListenerTest extends FRMTest {
private ForwardingRulesManagerImpl forwardingRulesManager;
- private final static NodeKey s1Key = new NodeKey(new NodeId("testnode:1"));
+ private static final NodeKey NODE_KEY = new NodeKey(new NodeId("testnode:1"));
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
@Mock
ClusterSingletonServiceProvider clusterSingletonService;
@Test
public void addRemoveNodeTest() throws Exception {
- addFlowCapableNode(s1Key);
+ addFlowCapableNode(NODE_KEY);
- InstanceIdentifier<FlowCapableNode> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ InstanceIdentifier<FlowCapableNode> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, NODE_KEY)
.augmentation(FlowCapableNode.class);
boolean nodeActive = forwardingRulesManager.isNodeActive(nodeII);
assertTrue(nodeActive);
- removeNode(s1Key);
+ removeNode(NODE_KEY);
nodeActive = forwardingRulesManager.isNodeActive(nodeII);
assertFalse(nodeActive);
}
public void tearDown() throws Exception {
forwardingRulesManager.close();
}
-
}
@RunWith(MockitoJUnitRunner.class)
public class TableFeaturesListenerTest extends FRMTest {
private ForwardingRulesManagerImpl forwardingRulesManager;
- private final static NodeId NODE_ID = new NodeId("testnode:1");
- private final static NodeKey s1Key = new NodeKey(NODE_ID);
+ private static final NodeId NODE_ID = new NodeId("testnode:1");
+ private static final NodeKey NODE_KEY = new NodeKey(NODE_ID);
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
@Mock
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
private ReconciliationManager reconciliationManager;
-
@Before
public void setUp() {
- forwardingRulesManager = new ForwardingRulesManagerImpl(
- getDataBroker(),
- rpcProviderRegistryMock,
- getConfig(),
- clusterSingletonService,
- notificationService,
- getConfigurationService(),
- reconciliationManager);
+ forwardingRulesManager = new ForwardingRulesManagerImpl(getDataBroker(), rpcProviderRegistryMock, getConfig(),
+ clusterSingletonService, notificationService, getConfigurationService(), reconciliationManager);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
TableKey tableKey = new TableKey((short) 2);
TableFeaturesKey tableFeaturesKey = new TableFeaturesKey(tableKey.getId());
- addTable(tableKey, s1Key);
+ addTable(tableKey, NODE_KEY);
TableFeatures tableFeaturesData = new TableFeaturesBuilder().setKey(tableFeaturesKey).build();
- InstanceIdentifier<TableFeatures> tableFeaturesII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
- .augmentation(FlowCapableNode.class).child(TableFeatures.class, tableFeaturesKey);
+ InstanceIdentifier<TableFeatures> tableFeaturesII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, NODE_KEY).augmentation(FlowCapableNode.class)
+ .child(TableFeatures.class, tableFeaturesKey);
WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
writeTx.put(LogicalDatastoreType.CONFIGURATION, tableFeaturesII, tableFeaturesData);
assertCommit(writeTx.submit());
public void tearDown() throws Exception {
forwardingRulesManager.close();
}
-
}
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
public void addFlowCapableNode(NodeKey nodeKey) {
Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
- InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
- .child(Node.class, nodeKey);
FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
NodeBuilder nodeBuilder = new NodeBuilder();
WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+
+ InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey);
writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, nodeBuilder.build());
writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, nodeBuilder.build());
public void removeNode(NodeKey nodeKey) throws ExecutionException, InterruptedException {
WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
- writeTx.delete(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey));
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL,
+ InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey));
writeTx.submit().get();
}
final ConfigurationService configurationService = Mockito.mock(ConfigurationService.class);
final ForwardingRulesManagerConfig config = getConfig();
- Mockito.when(configurationService.registerListener(Mockito.any())).thenReturn(() -> {});
+ Mockito.when(configurationService.registerListener(Mockito.any())).thenReturn(() -> {
+ });
Mockito.when(configurationService.getProperty(Mockito.eq("disable-reconciliation"), Mockito.any()))
.thenReturn(config.isDisableReconciliation());
return configurationService;
}
-
}
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package test.mock.util;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.onf.bundle.service.rev170124.SalBundleService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
-
import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.SalFlowService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.SalGroupService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.SalMeterService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.onf.bundle.service.rev170124.SalBundleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.RpcService;
public class RpcProviderRegistryMock implements RpcProviderRegistry {
@Override
- public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface,
+ T implementation) throws IllegalStateException {
return null;
}
@Override
- public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(
+ Class<T> serviceInterface, T implementation) throws IllegalStateException {
return null;
}
@Override
- public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(L listener) {
+ public <L extends RouteChangeListener<RpcContextIdentifier,
+ InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(
+ L listener) {
return null;
}
private final List<ControlBundleInput> controlBundleInput = new ArrayList<>();
private final List<AddBundleMessagesInput> addBundleMessagesInput = new ArrayList<>();
-
@Override
public Future<RpcResult<java.lang.Void>> controlBundle(ControlBundleInput input) {
getControlBundleInput().add(input);
public List<AddBundleMessagesInput> getAddBundleMessagesInput() {
return addBundleMessagesInput;
}
-
-
}
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package test.mock.util;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.AddFlowOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.RemoveFlowInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.service.rev130819.UpdateFlowOutput;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Future;
-
-public class SalFlowServiceMock implements SalFlowService{
- private List<AddFlowInput> addFlowCalls = new ArrayList<>();
- private List<RemoveFlowInput> removeFlowCalls = new ArrayList<>();
- private List<UpdateFlowInput> updateFlowCalls = new ArrayList<>();
+public class SalFlowServiceMock implements SalFlowService {
+ private final List<AddFlowInput> addFlowCalls = new ArrayList<>();
+ private final List<RemoveFlowInput> removeFlowCalls = new ArrayList<>();
+ private final List<UpdateFlowInput> updateFlowCalls = new ArrayList<>();
@Override
public Future<RpcResult<AddFlowOutput>> addFlow(AddFlowInput input) {
return null;
}
-
@Override
public Future<RpcResult<RemoveFlowOutput>> removeFlow(RemoveFlowInput input) {
removeFlowCalls.add(input);
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package test.mock.util;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.AddGroupOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.RemoveGroupInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.service.rev130918.UpdateGroupOutput;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Future;
-
public class SalGroupServiceMock implements SalGroupService {
- private List<AddGroupInput> addGroupCalls = new ArrayList<>();
- private List<RemoveGroupInput> removeGroupCalls = new ArrayList<>();
- private List<UpdateGroupInput> updateGroupCalls = new ArrayList<>();
+ private final List<AddGroupInput> addGroupCalls = new ArrayList<>();
+ private final List<RemoveGroupInput> removeGroupCalls = new ArrayList<>();
+ private final List<UpdateGroupInput> updateGroupCalls = new ArrayList<>();
@Override
public Future<RpcResult<AddGroupOutput>> addGroup(AddGroupInput input) {
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package test.mock.util;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.AddMeterOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.RemoveMeterInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.service.rev130918.UpdateMeterOutput;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Future;
-
public class SalMeterServiceMock implements SalMeterService {
- private List<AddMeterInput> addMeterCalls = new ArrayList<>();
- private List<RemoveMeterInput> removeMeterCalls = new ArrayList<>();
- private List<UpdateMeterInput> updateMeterCalls = new ArrayList<>();
+ private final List<AddMeterInput> addMeterCalls = new ArrayList<>();
+ private final List<RemoveMeterInput> removeMeterCalls = new ArrayList<>();
+ private final List<UpdateMeterInput> updateMeterCalls = new ArrayList<>();
@Override
public Future<RpcResult<AddMeterOutput>> addMeter(AddMeterInput input) {
/*
- * Copyright (c) 2014, 2016 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package test.mock.util;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableOutput;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.SalTableService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.service.rev131026.UpdateTableOutput;
+import org.opendaylight.yangtools.yang.common.RpcResult;
public class SalTableServiceMock implements SalTableService {
- private List<UpdateTableInput> updateTableInput = new ArrayList<>();
-
+ private final List<UpdateTableInput> updateTableInput = new ArrayList<>();
public List<UpdateTableInput> getUpdateTableInput() {
return updateTableInput;