<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>bulk-o-matic</artifactId>
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchBuilder;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-public class BulkOMaticUtils {
-
- private BulkOMaticUtils () { }
+public final class BulkOMaticUtils {
public static final int DEFUALT_STATUS = FlowCounter.OperationStatus.INIT.status();
public static final int DEFAULT_FLOW_COUNT = 0;
public static final String DEFAULT_UNITS = "ns";
public static final String DEVICE_TYPE_PREFIX = "openflow:";
+ private BulkOMaticUtils() {
+ }
+
public static String ipIntToStr (int k) {
- return new StringBuilder().append(((k >> 24) & 0xFF)).append(".")
- .append(((k >> 16) & 0xFF)).append(".")
- .append(((k >> 8) & 0xFF)).append(".")
- .append((k & 0xFF)).append("/32").toString();
+ return new StringBuilder().append(k >> 24 & 0xFF).append(".")
+ .append(k >> 16 & 0xFF).append(".")
+ .append(k >> 8 & 0xFF).append(".")
+ .append(k & 0xFF).append("/32").toString();
}
public static Match getMatch(final Integer sourceIp){
.augmentation(FlowCapableNode.class)
.child(Table.class, new TableKey(tableId))
.build();
-
}
public static InstanceIdentifier<Flow> getFlowId(final InstanceIdentifier<Table> tablePath, final String flowId) {
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<artifactId>features-flow</artifactId>
<packaging>jar</packaging>
<properties>
- <yangtools.version>1.0.3-SNAPSHOT</yangtools.version>
- <config.version>0.5.3-SNAPSHOT</config.version>
- <mdsal.version>1.4.3-SNAPSHOT</mdsal.version>
- <openflowjava.version>0.8.3-SNAPSHOT</openflowjava.version>
- <lldp.version>0.11.3-SNAPSHOT</lldp.version>
+ <yangtools.version>1.0.5-SNAPSHOT</yangtools.version>
+ <config.version>0.5.5-SNAPSHOT</config.version>
+ <mdsal.version>1.4.5-SNAPSHOT</mdsal.version>
+ <openflowjava.version>0.8.5-SNAPSHOT</openflowjava.version>
+ <lldp.version>0.11.5-SNAPSHOT</lldp.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
<config.statistics.manager.configfile>30-statistics-manager.xml</config.statistics.manager.configfile>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>forwardingrules-manager</artifactId>
/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.openflowplugin.applications.frm;
-import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * forwardingrules-manager
- * org.opendaylight.openflowplugin.applications.frm
- *
- * FlowNodeReconciliation
- * It represent Reconciliation functionality for every new device.
- * So we have to read all possible pre-configured Flows, Meters and Groups from
- * Config/DS and add all to new device.
- * New device is represented by new {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
- * in Operational/DS. So we have to add listener for Wildcarded path in base data change scope.
- *
- * WildCarded InstanceIdentifier:
- * {@code
- *
- * InstanceIdentifier.create(Nodes.class).child(Node.class).augmentation(FlowCapableNode.class)
- *
- * }
+ * Implementation provider of this interface will implement reconciliation functionality for a newly connected node.
+ * Implementation is not enforced to do reconciliation in any specific way, but the higher level intention is to
+ * provide best effort reconciliation of all the configuration (flow/meter/group) present in configuration data store
+ * for the given node.
*
* @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
- *
- * Created: Aug 26, 2014
*/
-public interface FlowNodeReconciliation extends ClusteredDataTreeChangeListener<FlowCapableNode>, AutoCloseable {
-
- /**
- * Method contains Node registration to {@link ForwardingRulesManager} functionality
- * as a prevention to use a validation check to the Operational/DS for identify
- * pre-configure transaction and serious device commit in every transaction.
- *
- * Second part of functionality is own reconciliation pre-configure
- * Flows, Meters and Groups.
- *
- * @param connectedNode - {@link org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier} to new Node
- */
- void flowNodeConnected(InstanceIdentifier<FlowCapableNode> connectedNode);
+public interface FlowNodeReconciliation extends AutoCloseable {
/**
- * Method contains functionality for registered Node {@link FlowCapableNode} removing
- * from {@link ForwardingRulesManager}
- *
- * @param disconnectedNode - {@link org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier} to removed Node
+ * Reconcile the switch data store configuration on the switch
+ * @param connectedNode Node that need reconciliation
*/
- void flowNodeDisconnected(InstanceIdentifier<FlowCapableNode> disconnectedNode);
+ void reconcileConfiguration(InstanceIdentifier<FlowCapableNode> connectedNode);
}
/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * forwardingrules-manager
- * org.opendaylight.openflowplugin.applications.frm
- *
- * ForwardingRulesManager
- * It represent a central point for whole modul. Implementation
+ * It represent a central point for whole module. Implementation
* Flow Provider registers the link FlowChangeListener} and it holds all needed
* services for link FlowChangeListener}.
*
* @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
- *
- * Created: Aug 25, 2014
*/
public interface ForwardingRulesManager extends AutoCloseable {
- public void start();
+ void start();
/**
* Method returns information :
* @param ident - the key of the node
* @return boolean - true if device is connected
*/
- public boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident);
+ boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident);
/**
* Method returns information :
* @param ident - the key of the node
* @return boolean - true if device is present in operational data store
*/
- public boolean checkNodeInOperationalDataStore(InstanceIdentifier<FlowCapableNode> ident);
-
- /**
- * Method add new {@link FlowCapableNode} to active Node Holder.
- * ActiveNodeHolder prevent unnecessary Operational/DS read for identify
- * pre-configure and serious Configure/DS transactions.
- *
- * @param ident - the key of the node
- */
- public void registrateNewNode(InstanceIdentifier<FlowCapableNode> ident);
-
- /**
- * Method remove disconnected {@link FlowCapableNode} from active Node
- * Holder. And all next flows or groups or meters will stay in Config/DS
- * only.
- *
- * @param ident - the key of the node
- */
- public void unregistrateNode(InstanceIdentifier<FlowCapableNode> ident);
+ boolean checkNodeInOperationalDataStore(InstanceIdentifier<FlowCapableNode> ident);
/**
* Method returns generated transaction ID, which is unique for
*
* @return String transactionID for RPC transaction identification
*/
- public String getNewTransactionId();
+ String getNewTransactionId();
/**
* Method returns Read Transacion. It is need for Node reconciliation only.
*
* @return ReadOnlyTransaction
*/
- public ReadOnlyTransaction getReadTranaction();
+ ReadOnlyTransaction getReadTranaction();
/**
* Flow RPC service
*
* @return
*/
- public SalFlowService getSalFlowService();
+ SalFlowService getSalFlowService();
/**
* Group RPC service
*
* @return
*/
- public SalGroupService getSalGroupService();
+ SalGroupService getSalGroupService();
/**
* Meter RPC service
*
* @return
*/
- public SalMeterService getSalMeterService();
+ SalMeterService getSalMeterService();
/**
* Table RPC service
*
* @return
*/
- public SalTableService getSalTableService();
+ SalTableService getSalTableService();
/**
* Content definition method and prevent code duplicity in Reconcil
* @return ForwardingRulesCommiter<Flow>
*/
- public ForwardingRulesCommiter<Flow> getFlowCommiter();
+ ForwardingRulesCommiter<Flow> getFlowCommiter();
/**
* Content definition method and prevent code duplicity in Reconcil
* @return ForwardingRulesCommiter<Group>
*/
- public ForwardingRulesCommiter<Group> getGroupCommiter();
+ ForwardingRulesCommiter<Group> getGroupCommiter();
/**
* Content definition method and prevent code duplicity
* @return ForwardingRulesCommiter<Meter>
*/
- public ForwardingRulesCommiter<Meter> getMeterCommiter();
+ ForwardingRulesCommiter<Meter> getMeterCommiter();
/**
* Content definition method and prevent code duplicity
* @return ForwardingRulesCommiter<Table>
*/
- public ForwardingRulesCommiter<TableFeatures> getTableFeaturesCommiter();
+ ForwardingRulesCommiter<TableFeatures> getTableFeaturesCommiter();
+
+ /**
+ * Check if reconciliation is disabled by user.
+ * @return true if reconciliation is disabled, else false
+ */
+ boolean isReconciliationDisabled();
/**
- * Content definition method
- * @return FlowNodeReconciliation
+ * Check if stale marking is enabled for switch reconciliation.
+ * @return true if stale marking is enabled, else false
*/
- public FlowNodeReconciliation getFlowNodeReconciliation();
+ boolean isStaleMarkingEnabled();
/**
- * Returns the config-subsystem/fallback configuration of FRM
- * @return ForwardingRulesManagerConfig
+ * Return number of reconciliation retry are allowed.
+ * @return number of retries.
*/
- public ForwardingRulesManagerConfig getConfiguration();
+ int getReconciliationRetryCount();
/**
* Method checks if *this* instance of openflowplugin is owner of
* the given openflow node.
* @return True if owner, else false
*/
- public boolean isNodeOwner(InstanceIdentifier<FlowCapableNode> ident);
+ boolean isNodeOwner(InstanceIdentifier<FlowCapableNode> ident);
/**
* Content definition method and prevent code duplicity
* @return FlowNodeConnectorInventoryTranslatorImpl
*/
- public FlowNodeConnectorInventoryTranslatorImpl getFlowNodeConnectorInventoryTranslatorImpl();
+ FlowNodeConnectorInventoryTranslatorImpl getFlowNodeConnectorInventoryTranslatorImpl();
}
/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
@Override
public void onDataTreeChanged(Collection<DataTreeModification<T>> changes) {
Preconditions.checkNotNull(changes, "Changes may not be null!");
+ LOG.trace("Received data changes :{}", changes);
for (DataTreeModification<T> change : changes) {
final InstanceIdentifier<T> key = change.getRootPath().getRootIdentifier();
final DataObjectModification<T> mod = change.getRootNode();
final InstanceIdentifier<FlowCapableNode> nodeIdent =
key.firstIdentifierOf(FlowCapableNode.class);
-
if (preConfigurationCheck(nodeIdent)) {
switch (mod.getModificationType()) {
case DELETE:
}
}
else{
- if (provider.getConfiguration().isStaleMarkingEnabled()) {
+ if (provider.isStaleMarkingEnabled()) {
LOG.info("Stale-Marking ENABLED and switch {} is NOT connected, storing stale entities",
nodeIdent.toString());
// Switch is NOT connected
protected abstract InstanceIdentifier<T> getWildCardPath();
private boolean preConfigurationCheck(final InstanceIdentifier<FlowCapableNode> nodeIdent) {
- Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode identifier can not be null!");
// In single node cluster, node should be in local cache before we get any flow/group/meter
// data change event from data store. So first check should pass.
// In case of 3-node cluster, when shard leader changes, clustering will send blob of data
// should get populated. But to handle a scenario where flow request comes before the blob
// of config/operational data gets processes, it won't find node in local cache and it will
// skip the flow/group/meter operational. This requires an addition check, where it reads
- // node from operational data store and if it's present it calls flowNodeConnected to explictly
+ // node from operational data store and if it's present it calls flowNodeConnected to explicitly
// trigger the event of new node connected.
if(!provider.isNodeOwner(nodeIdent)) { return false; }
if (!provider.isNodeActive(nodeIdent)) {
if (provider.checkNodeInOperationalDataStore(nodeIdent)) {
- provider.getFlowNodeReconciliation().flowNodeConnected(nodeIdent);
return true;
} else {
return false;
/**
- * Copyright (c) 2016 Pantheon Technologies s.r.o. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.atomic.AtomicBoolean;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(DeviceMastership.class);
private final NodeId nodeId;
private final ServiceGroupIdentifier identifier;
- private final ClusterSingletonServiceRegistration clusterSingletonServiceRegistration;
- private boolean deviceMastered;
+ private final ClusterSingletonServiceProvider clusterSingletonServiceProvider;
+ private final FlowNodeReconciliation reconcliationAgent;
+ private final AtomicBoolean deviceMastered = new AtomicBoolean(false);
+ private final AtomicBoolean isDeviceInOperDS = new AtomicBoolean(false);
+ private final InstanceIdentifier<FlowCapableNode> fcnIID;
+ private ClusterSingletonServiceRegistration clusterSingletonServiceRegistration;
- public DeviceMastership(final NodeId nodeId, final ClusterSingletonServiceProvider clusterSingletonService) {
+ public DeviceMastership(final NodeId nodeId,
+ final ClusterSingletonServiceProvider clusterSingletonService,
+ final FlowNodeReconciliation reconcliationAgent) {
this.nodeId = nodeId;
this.identifier = ServiceGroupIdentifier.create(nodeId.getValue());
- this.deviceMastered = false;
- clusterSingletonServiceRegistration = clusterSingletonService.registerClusterSingletonService(this);
+ this.clusterSingletonServiceProvider = clusterSingletonService;
+ this.reconcliationAgent = reconcliationAgent;
+ fcnIID = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId)).augmentation
+ (FlowCapableNode.class);
}
@Override
public void instantiateServiceInstance() {
- LOG.debug("FRM started for: {}", nodeId.getValue());
- deviceMastered = true;
+ LOG.info("FRM started for: {}", nodeId.getValue());
+ deviceMastered.set(true);
+ if(canReconcile()) {
+ reconcliationAgent.reconcileConfiguration(fcnIID);
+ }
}
@Override
public ListenableFuture<Void> closeServiceInstance() {
- LOG.debug("FRM stopped for: {}", nodeId.getValue());
- deviceMastered = false;
+ LOG.info("FRM stopped for: {}", nodeId.getValue());
+ deviceMastered.set(false);
return Futures.immediateFuture(null);
}
}
public boolean isDeviceMastered() {
- return deviceMastered;
+ return deviceMastered.get();
+ }
+
+ public void setDeviceOperationalStatus(boolean inOperDS) {
+ isDeviceInOperDS.set(inOperDS);
+ if(canReconcile()) {
+ LOG.info("Triggering reconciliation for device {}", nodeId.getValue());
+ reconcliationAgent.reconcileConfiguration(fcnIID);
+ }
}
+ public void registerClusterSingletonService() {
+ LOG.info("Registering FRM as a cluster singleton service listner for service id : {}",getIdentifier());
+ clusterSingletonServiceRegistration = clusterSingletonServiceProvider.registerClusterSingletonService(this);
+ }
+
+ private boolean canReconcile() {
+ return (deviceMastered.get() && isDeviceInOperDS.get());
+ }
}
/**
- * Copyright (c) 2016 Pantheon Technologies s.r.o. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.openflowplugin.applications.frm.impl;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
+import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
+import org.opendaylight.openflowplugin.common.wait.SimpleTaskRetryLooper;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdated;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manager for clustering service registrations of {@link DeviceMastership}.
*/
-public class DeviceMastershipManager {
+public class DeviceMastershipManager implements ClusteredDataTreeChangeListener<FlowCapableNode>,
+ OpendaylightInventoryListener, AutoCloseable{
private static final Logger LOG = LoggerFactory.getLogger(DeviceMastershipManager.class);
+ private static final InstanceIdentifier<FlowCapableNode> II_TO_FLOW_CAPABLE_NODE
+ = InstanceIdentifier.builder(Nodes.class)
+ .child(Node.class)
+ .augmentation(FlowCapableNode.class)
+ .build();
+
private final ClusterSingletonServiceProvider clusterSingletonService;
+ private final ListenerRegistration<?> notifListenerRegistration;
+ private final FlowNodeReconciliation reconcliationAgent;
+ private final DataBroker dataBroker;
private final ConcurrentHashMap<NodeId, DeviceMastership> deviceMasterships = new ConcurrentHashMap();
+ private final Object lockObj = new Object();
+ private ListenerRegistration<DeviceMastershipManager> listenerRegistration;
+ private Set<InstanceIdentifier<FlowCapableNode>> activeNodes = Collections.emptySet();
- public DeviceMastershipManager(final ClusterSingletonServiceProvider clusterSingletonService) {
+ public DeviceMastershipManager(final ClusterSingletonServiceProvider clusterSingletonService,
+ final NotificationProviderService notificationService,
+ final FlowNodeReconciliation reconcliationAgent,
+ final DataBroker dataBroker) {
this.clusterSingletonService = clusterSingletonService;
+ this.notifListenerRegistration = notificationService.registerNotificationListener(this);
+ this.reconcliationAgent = reconcliationAgent;
+ this.dataBroker = dataBroker;
+ registerNodeListener();
+ }
+
+ public boolean isDeviceMastered(final NodeId nodeId) {
+ return deviceMasterships.get(nodeId) != null && deviceMasterships.get(nodeId).isDeviceMastered();
+ }
+
+ public boolean isNodeActive(final NodeId nodeId) {
+ final InstanceIdentifier<FlowCapableNode> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId)).augmentation(FlowCapableNode.class);
+ return activeNodes.contains(flowNodeIdentifier);
+
+ }
+
+ @VisibleForTesting
+ ConcurrentHashMap<NodeId, DeviceMastership> getDeviceMasterships() {
+ return deviceMasterships;
+ }
+
+ @Override
+ public void onNodeUpdated(NodeUpdated notification) {
+ LOG.debug("NodeUpdate notification received : {}", notification);
+ DeviceMastership membership = deviceMasterships.computeIfAbsent(notification.getId(), device ->
+ new DeviceMastership(notification.getId(), clusterSingletonService, reconcliationAgent));
+ membership.registerClusterSingletonService();
}
- public void onDeviceConnected(final NodeId nodeId) {
- LOG.debug("FRM service registered for: {}", nodeId.getValue());
- final DeviceMastership mastership = new DeviceMastership(nodeId, clusterSingletonService);
- deviceMasterships.put(nodeId, mastership);
+ @Override
+ public void onNodeConnectorUpdated(NodeConnectorUpdated notification) {
+ //Not published by plugin
}
- public void onDeviceDisconnected(final NodeId nodeId) {
+ @Override
+ public void onNodeRemoved(NodeRemoved notification) {
+ LOG.debug("NodeRemoved notification received : {}", notification);
+ NodeId nodeId = notification.getNodeRef().getValue().firstKeyOf(Node.class).getId();
final DeviceMastership mastership = deviceMasterships.remove(nodeId);
if (mastership != null) {
mastership.close();
+ LOG.info("Unregistered FRM cluster singleton service for service id : {}", nodeId.getValue());
}
- LOG.debug("FRM service unregistered for: {}", nodeId.getValue());
}
- public boolean isDeviceMastered(final NodeId nodeId) {
- return deviceMasterships.get(nodeId) != null && deviceMasterships.get(nodeId).isDeviceMastered();
+ @Override
+ public void onNodeConnectorRemoved(NodeConnectorRemoved notification) {
+ //Not published by plugin
}
- @VisibleForTesting
- ConcurrentHashMap<NodeId, DeviceMastership> getDeviceMasterships() {
- return deviceMasterships;
+ @Override
+ public void onDataTreeChanged(@Nonnull Collection<DataTreeModification<FlowCapableNode>> changes) {
+ Preconditions.checkNotNull(changes, "Changes may not be null!");
+
+ for (DataTreeModification<FlowCapableNode> change : changes) {
+ final InstanceIdentifier<FlowCapableNode> key = change.getRootPath().getRootIdentifier();
+ final DataObjectModification<FlowCapableNode> mod = change.getRootNode();
+ final InstanceIdentifier<FlowCapableNode> nodeIdent =
+ key.firstIdentifierOf(FlowCapableNode.class);
+
+ switch (mod.getModificationType()) {
+ case DELETE:
+ if (mod.getDataAfter() == null) {
+ remove(key, mod.getDataBefore(), nodeIdent);
+ }
+ break;
+ case SUBTREE_MODIFIED:
+ //NO-OP since we do not need to reconcile on Node-updated
+ break;
+ case WRITE:
+ if (mod.getDataBefore() == null) {
+ add(key, mod.getDataAfter(), nodeIdent);
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
+ }
+ }
+ }
+
+ public void remove(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode del,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Node removed: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
+ }
+
+ if ( ! nodeIdent.isWildcarded()) {
+ if (activeNodes.contains(nodeIdent)) {
+ synchronized (lockObj) {
+ if (activeNodes.contains(nodeIdent)) {
+ Set<InstanceIdentifier<FlowCapableNode>> set =
+ Sets.newHashSet(activeNodes);
+ set.remove(nodeIdent);
+ activeNodes = Collections.unmodifiableSet(set);
+ setNodeOperationalStatus(nodeIdent,false);
+ }
+ }
+ }
+ }
+
+ }
+ }
+
+ public void add(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode add,
+ InstanceIdentifier<FlowCapableNode> nodeIdent) {
+ if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Node added: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
+ }
+
+ if ( ! nodeIdent.isWildcarded()) {
+ if (!activeNodes.contains(nodeIdent)) {
+ synchronized (lockObj) {
+ if (!activeNodes.contains(nodeIdent)) {
+ Set<InstanceIdentifier<FlowCapableNode>> set = Sets.newHashSet(activeNodes);
+ set.add(nodeIdent);
+ activeNodes = Collections.unmodifiableSet(set);
+ setNodeOperationalStatus(nodeIdent,true);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (Exception e) {
+ LOG.warn("Error occurred while closing operational Node listener: {}", e.getMessage());
+ LOG.debug("Error occurred while closing operational Node listener", e);
+ }
+ listenerRegistration = null;
+ }
+ if (notifListenerRegistration != null) {
+ notifListenerRegistration.close();
+ }
+ }
+
+
+ private boolean compareInstanceIdentifierTail(InstanceIdentifier<?> identifier1,
+ InstanceIdentifier<?> identifier2) {
+ return Iterables.getLast(identifier1.getPathArguments()).equals(Iterables.getLast(identifier2.getPathArguments()));
+ }
+
+ private void setNodeOperationalStatus(InstanceIdentifier<FlowCapableNode> nodeIid, boolean status) {
+ NodeId nodeId = nodeIid.firstKeyOf(Node.class).getId();
+ if (nodeId != null ) {
+ if (deviceMasterships.containsKey(nodeId) ) {
+ deviceMasterships.get(nodeId).setDeviceOperationalStatus(status);
+ LOG.debug("Operational status of device {} is set to {}",nodeId, status);
+ }
+ }
+ }
+ private void registerNodeListener(){
+
+ final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class).augmentation(FlowCapableNode.class);
+
+ final DataTreeIdentifier<FlowCapableNode> treeId =
+ new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, flowNodeWildCardIdentifier);
+
+ try {
+ SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
+ ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
+
+ listenerRegistration = looper.loopUntilNoException(() ->
+ dataBroker.registerDataTreeChangeListener(treeId, DeviceMastershipManager.this));
+ } catch (Exception e) {
+ LOG.warn("Data listener registration failed: {}", e.getMessage());
+ LOG.debug("Data listener registration failed ", e);
+ throw new IllegalStateException("Node listener registration failed!", e);
+ }
}
}
/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
-import java.util.Map.Entry;
-import java.util.HashMap;
-import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicInteger;
-import javax.annotation.Nonnull;
-
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
import org.opendaylight.openflowplugin.applications.frm.ForwardingRulesManager;
-import org.opendaylight.openflowplugin.common.wait.SimpleTaskRetryLooper;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.GroupActionCase;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.action.OutputActionCase;
import org.opendaylight.yang.gen.v1.urn.opendaylight.action.types.rev131112.action.list.Action;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.StaleGroup;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.StaleGroupKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.table.features.TableFeaturesKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
/**
- * forwardingrules-manager
- * org.opendaylight.openflowplugin.applications.frm
- *
- * FlowNode Reconciliation Listener
- * Reconciliation for a new FlowNode
+ * Default implementation of {@link ForwardingRulesManager}
*
* @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
- *
- * Created: Jun 13, 2014
*/
public class FlowNodeReconciliationImpl implements FlowNodeReconciliation {
+ private static final Logger LOG = LoggerFactory.getLogger(FlowNodeReconciliationImpl.class);
- private static final Logger LOG = LoggerFactory.getLogger(FlowNodeReconciliationImpl.class);
-
- /**
- * The number of nanoseconds to wait for a single group to be added.
- */
+ //The number of nanoseconds to wait for a single group to be added.
private static final long ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(3);
- /**
- * The maximum number of nanoseconds to wait for completion of add-group
- * RPCs.
- */
- private static final long MAX_ADD_GROUP_TIMEOUT =
- TimeUnit.SECONDS.toNanos(20);
-
- private final DataBroker dataBroker;
-
- private final ForwardingRulesManager provider;
+ //The maximum number of nanoseconds to wait for completion of add-group RPCs.
+ private static final long MAX_ADD_GROUP_TIMEOUT = TimeUnit.SECONDS.toNanos(20);
private static final String SEPARATOR = ":";
-
- private ListenerRegistration<FlowNodeReconciliationImpl> listenerRegistration;
-
private static final int THREAD_POOL_SIZE = 4;
- ExecutorService executor = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
- private static final InstanceIdentifier<FlowCapableNode> II_TO_FLOW_CAPABLE_NODE
- = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class)
- .augmentation(FlowCapableNode.class)
- .build();
+ private final DataBroker dataBroker;
+ private final ForwardingRulesManager provider;
+ private final ExecutorService executor = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
public FlowNodeReconciliationImpl (final ForwardingRulesManager manager, final DataBroker db) {
this.provider = Preconditions.checkNotNull(manager, "ForwardingRulesManager can not be null!");
dataBroker = Preconditions.checkNotNull(db, "DataBroker can not be null!");
- /* Build Path */
- final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
- .child(Node.class).augmentation(FlowCapableNode.class);
-
- final DataTreeIdentifier<FlowCapableNode> treeId =
- new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, flowNodeWildCardIdentifier);
-
- try {
- SimpleTaskRetryLooper looper = new SimpleTaskRetryLooper(ForwardingRulesManagerImpl.STARTUP_LOOP_TICK,
- ForwardingRulesManagerImpl.STARTUP_LOOP_MAX_RETRIES);
-
- listenerRegistration = looper.loopUntilNoException(new Callable<ListenerRegistration<FlowNodeReconciliationImpl>>() {
- @Override
- public ListenerRegistration<FlowNodeReconciliationImpl> call() throws Exception {
- return dataBroker.registerDataTreeChangeListener(treeId, FlowNodeReconciliationImpl.this);
- }
- });
- } catch (Exception e) {
- LOG.warn("data listener registration failed: {}", e.getMessage());
- LOG.debug("data listener registration failed.. ", e);
- throw new IllegalStateException("FlowNodeReconciliation startup fail! System needs restart.", e);
- }
}
@Override
public void close() {
- if (listenerRegistration != null) {
- try {
- listenerRegistration.close();
- } catch (Exception e) {
- LOG.warn("Error by stop FRM FlowNodeReconilListener: {}", e.getMessage());
- LOG.debug("Error by stop FRM FlowNodeReconilListener..", e);
- }
- listenerRegistration = null;
+ if (executor != null) {
+ executor.shutdownNow();
}
}
@Override
- public void onDataTreeChanged(@Nonnull Collection<DataTreeModification<FlowCapableNode>> changes) {
- Preconditions.checkNotNull(changes, "Changes may not be null!");
-
- for (DataTreeModification<FlowCapableNode> change : changes) {
- final InstanceIdentifier<FlowCapableNode> key = change.getRootPath().getRootIdentifier();
- final DataObjectModification<FlowCapableNode> mod = change.getRootNode();
- final InstanceIdentifier<FlowCapableNode> nodeIdent =
- key.firstIdentifierOf(FlowCapableNode.class);
-
- switch (mod.getModificationType()) {
- case DELETE:
- if (mod.getDataAfter() == null) {
- remove(key, mod.getDataBefore(), nodeIdent);
- }
- break;
- case SUBTREE_MODIFIED:
- //NO-OP since we donot need to reconciliate on Node-updated
- break;
- case WRITE:
- if (mod.getDataBefore() == null) {
- add(key, mod.getDataAfter(), nodeIdent);
- }
- break;
- default:
- throw new IllegalArgumentException("Unhandled modification type " + mod.getModificationType());
- }
+ public void reconcileConfiguration(InstanceIdentifier<FlowCapableNode> connectedNode) {
+ if (provider.isReconciliationDisabled()) {
+ LOG.debug("Reconciliation is disabled by user. Skipping reconciliation of node : {}", connectedNode
+ .firstKeyOf(Node.class));
+ return;
}
- }
-
-
-
- public void remove(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode del,
- InstanceIdentifier<FlowCapableNode> nodeIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
- if (LOG.isDebugEnabled()) {
- LOG.debug("Node removed: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
- }
-
- if ( ! nodeIdent.isWildcarded()) {
- flowNodeDisconnected(nodeIdent);
- }
-
- }
- }
-
- public void add(InstanceIdentifier<FlowCapableNode> identifier, FlowCapableNode add,
- InstanceIdentifier<FlowCapableNode> nodeIdent) {
- if(compareInstanceIdentifierTail(identifier,II_TO_FLOW_CAPABLE_NODE)){
- if (LOG.isDebugEnabled()) {
- LOG.debug("Node added: {}",nodeIdent.firstKeyOf(Node.class).getId().getValue());
- }
-
- if ( ! nodeIdent.isWildcarded()) {
- flowNodeConnected(nodeIdent);
- }
- }
- }
-
- @Override
- public void flowNodeDisconnected(InstanceIdentifier<FlowCapableNode> disconnectedNode) {
- provider.unregistrateNode(disconnectedNode);
- }
-
- @Override
- public void flowNodeConnected(InstanceIdentifier<FlowCapableNode> connectedNode) {
- flowNodeConnected(connectedNode, false);
- }
-
- private void flowNodeConnected(InstanceIdentifier<FlowCapableNode> connectedNode, boolean force) {
- if (force || !provider.isNodeActive(connectedNode)) {
- provider.registrateNewNode(connectedNode);
-
- if(!provider.isNodeOwner(connectedNode)) { return; }
-
- if (provider.getConfiguration().isStaleMarkingEnabled()) {
+ if (provider.isNodeOwner(connectedNode)) {
+ LOG.info("Triggering reconciliation for device {}", connectedNode.firstKeyOf(Node.class));
+ if (provider.isStaleMarkingEnabled()) {
LOG.info("Stale-Marking is ENABLED and proceeding with deletion of stale-marked entities on switch {}",
connectedNode.toString());
reconciliationPreProcess(connectedNode);
Map<Long, ListenableFuture<?>> groupFutures = new HashMap<>();
while ((!(toBeInstalledGroups.isEmpty()) || !(suspectedGroups.isEmpty())) &&
- (counter <= provider.getConfiguration().getReconciliationRetryCount())) { //also check if the counter has not crossed the threshold
+ (counter <= provider.getReconciliationRetryCount())) { //also check if the counter has not crossed the threshold
if (toBeInstalledGroups.isEmpty() && !suspectedGroups.isEmpty()) {
LOG.error("These Groups are pointing to node-connectors that are not up yet {}", suspectedGroups.toString());
if (!toBeInstalledGroups.isEmpty()) {
for (Group group : toBeInstalledGroups) {
LOG.error("Installing the group {} finally although the port is not up after checking for {} times "
- , group.getGroupId().toString(), provider.getConfiguration().getReconciliationRetryCount());
+ , group.getGroupId().toString(), provider.getReconciliationRetryCount());
addGroup(groupFutures, group);
}
}
LOG.error("Stale entity removal failed {}", t);
}
});
-
- }
-
-
- private boolean compareInstanceIdentifierTail(InstanceIdentifier<?> identifier1,
- InstanceIdentifier<?> identifier2) {
- return Iterables.getLast(identifier1.getPathArguments()).equals(Iterables.getLast(identifier2.getPathArguments()));
}
}
/**
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
import com.google.common.util.concurrent.CheckedFuture;
-import java.util.Collections;
import java.util.Objects;
-import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
*
*/
public class ForwardingRulesManagerImpl implements ForwardingRulesManager {
-
private static final Logger LOG = LoggerFactory.getLogger(ForwardingRulesManagerImpl.class);
+
static final int STARTUP_LOOP_TICK = 500;
static final int STARTUP_LOOP_MAX_RETRIES = 8;
private final AtomicLong txNum = new AtomicLong();
- private final Object lockObj = new Object();
- private Set<InstanceIdentifier<FlowCapableNode>> activeNodes = Collections.emptySet();
-
private final DataBroker dataService;
private final SalFlowService salFlowService;
private final SalGroupService salGroupService;
private final SalMeterService salMeterService;
private final SalTableService salTableService;
+ private final ForwardingRulesManagerConfig forwardingRulesManagerConfig;
+ private final ClusterSingletonServiceProvider clusterSingletonServiceProvider;
+ private final NotificationProviderService notificationService;
+ private final boolean disableReconciliation;
+ private final boolean staleMarkingEnabled;
+ private final int reconciliationRetryCount;
private ForwardingRulesCommiter<Flow> flowListener;
private ForwardingRulesCommiter<Group> groupListener;
private ForwardingRulesCommiter<Meter> meterListener;
private ForwardingRulesCommiter<TableFeatures> tableListener;
private FlowNodeReconciliation nodeListener;
-
- private final ForwardingRulesManagerConfig forwardingRulesManagerConfig;
private FlowNodeConnectorInventoryTranslatorImpl flowNodeConnectorInventoryTranslatorImpl;
- private final ClusterSingletonServiceProvider clusterSingletonServiceProvider;
private DeviceMastershipManager deviceMastershipManager;
public ForwardingRulesManagerImpl(final DataBroker dataBroker,
final RpcConsumerRegistry rpcRegistry,
final ForwardingRulesManagerConfig config,
- final ClusterSingletonServiceProvider clusterSingletonService) {
+ final ClusterSingletonServiceProvider clusterSingletonService,
+ final NotificationProviderService notificationService,
+ final boolean disableReconciliation,
+ final boolean staleMarkingEnabled,
+ final int reconciliationRetryCount) {
this.dataService = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
this.forwardingRulesManagerConfig = Preconditions.checkNotNull(config, "Configuration for FRM cannot be null");
this.clusterSingletonServiceProvider = Preconditions.checkNotNull(clusterSingletonService,
"ClusterSingletonService provider can not be null");
+ this.notificationService = Preconditions.checkNotNull(notificationService, "Notification publisher service is" +
+ " not available");
Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
"RPC SalMeterService not found.");
this.salTableService = Preconditions.checkNotNull(rpcRegistry.getRpcService(SalTableService.class),
"RPC SalTableService not found.");
+
+ this.disableReconciliation = disableReconciliation;
+ this.staleMarkingEnabled = staleMarkingEnabled;
+ this.reconciliationRetryCount = reconciliationRetryCount;
}
@Override
public void start() {
- this.deviceMastershipManager = new DeviceMastershipManager(clusterSingletonServiceProvider);
+ this.nodeListener = new FlowNodeReconciliationImpl(this, dataService);
+ this.deviceMastershipManager = new DeviceMastershipManager(clusterSingletonServiceProvider,
+ notificationService,
+ this.nodeListener,
+ dataService);
+ flowNodeConnectorInventoryTranslatorImpl = new FlowNodeConnectorInventoryTranslatorImpl(this,dataService);
+
this.flowListener = new FlowForwarder(this, dataService);
this.groupListener = new GroupForwarder(this, dataService);
this.meterListener = new MeterForwarder(this, dataService);
this.tableListener = new TableForwarder(this, dataService);
- this.nodeListener = new FlowNodeReconciliationImpl(this, dataService);
- flowNodeConnectorInventoryTranslatorImpl =
- new FlowNodeConnectorInventoryTranslatorImpl(this,dataService);
LOG.info("ForwardingRulesManager has started successfully.");
}
this.nodeListener.close();
this.nodeListener = null;
}
+ if (deviceMastershipManager != null) {
+ deviceMastershipManager.close();
+ }
}
@Override
@Override
public boolean isNodeActive(InstanceIdentifier<FlowCapableNode> ident) {
- return activeNodes.contains(ident);
+ return deviceMastershipManager.isNodeActive(ident.firstKeyOf(Node.class).getId());
}
@Override
return result;
}
- @Override
- public void registrateNewNode(InstanceIdentifier<FlowCapableNode> ident) {
- if (!activeNodes.contains(ident)) {
- synchronized (lockObj) {
- if (!activeNodes.contains(ident)) {
- Set<InstanceIdentifier<FlowCapableNode>> set =
- Sets.newHashSet(activeNodes);
- set.add(ident);
- activeNodes = Collections.unmodifiableSet(set);
- deviceMastershipManager.onDeviceConnected(ident.firstKeyOf(Node.class).getId());
- }
- }
- }
- }
-
- @Override
- public void unregistrateNode(InstanceIdentifier<FlowCapableNode> ident) {
- if (activeNodes.contains(ident)) {
- synchronized (lockObj) {
- if (activeNodes.contains(ident)) {
- Set<InstanceIdentifier<FlowCapableNode>> set =
- Sets.newHashSet(activeNodes);
- set.remove(ident);
- activeNodes = Collections.unmodifiableSet(set);
- deviceMastershipManager.onDeviceDisconnected(ident.firstKeyOf(Node.class).getId());
- }
- }
- }
- }
-
@Override
public SalFlowService getSalFlowService() {
return salFlowService;
}
@Override
- public FlowNodeReconciliation getFlowNodeReconciliation() {
- return nodeListener;
+ public boolean isReconciliationDisabled() {
+ return this.disableReconciliation;
+ }
+
+ @Override
+ public boolean isStaleMarkingEnabled() {
+ return this.staleMarkingEnabled;
}
@Override
- public ForwardingRulesManagerConfig getConfiguration() {
- return forwardingRulesManagerConfig;
+ public int getReconciliationRetryCount() {
+ return this.reconciliationRetryCount;
}
@Override
public void setDeviceMastershipManager(final DeviceMastershipManager deviceMastershipManager) {
this.deviceMastershipManager = deviceMastershipManager;
}
-
}
<?xml version="1.0" encoding="UTF-8"?>
<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+ xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
odl:use-default-for-reference-types="true">
<reference id="dataBroker" interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"/>
<reference id="rpcRegistry" interface="org.opendaylight.controller.sal.binding.api.RpcProviderRegistry"/>
+ <reference id="notificationService" interface="org.opendaylight.controller.sal.binding.api.NotificationProviderService"/>
<reference id="clusterSingletonService" interface="org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider"/>
<odl:clustered-app-config id="frmConfig"
binding-class="org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.app.forwardingrules.manager.config.rev160511.ForwardingRulesManagerConfig"/>
+ <cm:property-placeholder persistent-id="org.opendaylight.openflowplugin"
+ placeholder-prefix="${frm-"
+ update-strategy="none">
+ <cm:default-properties>
+ <!-- Disable switch reconciliation -->
+ <cm:property name="disable-reconciliation" value="false"/>
+ <!-- Enable stale marking for switch reconciliation -->
+ <cm:property name="stale-marking-enabled" value="false"/>
+ <!-- Number of retries for switch reconciliation -->
+ <cm:property name="reconciliation-retry-count" value="5"/>
+ </cm:default-properties>
+ </cm:property-placeholder>
+
<bean id="frmManager" class="org.opendaylight.openflowplugin.applications.frm.impl.ForwardingRulesManagerImpl"
init-method="start" destroy-method="close">
<argument ref="dataBroker"/>
<argument ref="rpcRegistry"/>
<argument ref="frmConfig"/>
<argument ref="clusterSingletonService"/>
+ <argument ref="notificationService"/>
+ <argument value="${frm-disable-reconciliation}"/>
+ <argument value="${frm-stale-marking-enabled}"/>
+ <argument value="${frm-reconciliation-retry-count}"/>
+
</bean>
</blueprint>
\ No newline at end of file
/**
- * Copyright (c) 2016 Pantheon Technologies s.r.o. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
+import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
* Test for {@link DeviceMastershipManager}.
private ClusterSingletonServiceRegistration registration;
@Mock
private ClusterSingletonServiceProvider clusterSingletonService;
+ @Mock
+ private NotificationProviderService notificationService;
+ @Mock
+ private FlowNodeReconciliation reconciliationAgent;
+ @Mock
+ private DataBroker dataBroker;
@Before
public void setUp() throws Exception {
- deviceMastershipManager = new DeviceMastershipManager(clusterSingletonService);
+ deviceMastershipManager = new DeviceMastershipManager(clusterSingletonService,
+ notificationService, reconciliationAgent, dataBroker);
Mockito.when(clusterSingletonService.registerClusterSingletonService(Matchers.<ClusterSingletonService>any()))
.thenReturn(registration);
}
public void testOnDeviceConnectedAndDisconnected() throws Exception {
// no context
Assert.assertNull(deviceMastershipManager.getDeviceMasterships().get(NODE_ID));
- // create context - register
- deviceMastershipManager.onDeviceConnected(NODE_ID);
+ NodeUpdatedBuilder nodeUpdatedBuilder = new NodeUpdatedBuilder();
+ nodeUpdatedBuilder.setId(NODE_ID);
+ deviceMastershipManager.onNodeUpdated(nodeUpdatedBuilder.build());
DeviceMastership serviceInstance = deviceMastershipManager.getDeviceMasterships().get(NODE_ID);
Assert.assertNotNull(serviceInstance);
- Mockito.verify(clusterSingletonService).registerClusterSingletonService(serviceInstance);
// destroy context - unregister
- deviceMastershipManager.onDeviceDisconnected(NODE_ID);
+ Assert.assertNotNull(deviceMastershipManager.getDeviceMasterships().get(NODE_ID));
+ NodeRemovedBuilder nodeRemovedBuilder = new NodeRemovedBuilder();
+ InstanceIdentifier<Node> nodeIId = InstanceIdentifier.create(Nodes.class).
+ child(Node.class, new NodeKey(NODE_ID));
+ nodeRemovedBuilder.setNodeRef(new NodeRef(nodeIId));
+ deviceMastershipManager.onNodeRemoved(nodeRemovedBuilder.build());
Assert.assertNull(deviceMastershipManager.getDeviceMasterships().get(NODE_ID));
- Mockito.verify(registration).close();
}
@Test
public void testIsDeviceMasteredOrSlaved() {
// no context
Assert.assertFalse(deviceMastershipManager.isDeviceMastered(NODE_ID));
- deviceMastershipManager.onDeviceConnected(NODE_ID);
+ NodeUpdatedBuilder nodeUpdatedBuilder = new NodeUpdatedBuilder();
+ nodeUpdatedBuilder.setId(NODE_ID);
+ deviceMastershipManager.onNodeUpdated(nodeUpdatedBuilder.build());
// is master
deviceMastershipManager.getDeviceMasterships().get(NODE_ID).instantiateServiceInstance();
Assert.assertTrue(deviceMastershipManager.isDeviceMastered(NODE_ID));
/**
- * Copyright (c) 2016 Pantheon Technologies s.r.o. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
+import org.opendaylight.openflowplugin.applications.frm.FlowNodeReconciliation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
/**
@Mock
private DeviceMastershipManager deviceMastershipManager;
+ @Mock
+ private FlowNodeReconciliation reconcliationAgent;
+
@Before
public void setUp() throws Exception {
- deviceMastership = new DeviceMastership(NODE_ID, Mockito.mock(ClusterSingletonServiceProvider.class));
+ deviceMastership = new DeviceMastership(NODE_ID, Mockito.mock(ClusterSingletonServiceProvider.class), reconcliationAgent);
}
@Test
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.impl.DeviceMastershipManager;
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
DeviceMastershipManager deviceMastershipManager;
+ @Mock
+ private NotificationProviderService notificationService;
@Before
public void setUp() {
getDataBroker(),
rpcProviderRegistryMock,
getConfig(),
- clusterSingletonService);
+ clusterSingletonService,
+ notificationService, false, false, 5);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
forwardingRulesManager.setDeviceMastershipManager(deviceMastershipManager);
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.impl.DeviceMastershipManager;
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
DeviceMastershipManager deviceMastershipManager;
+ @Mock
+ private NotificationProviderService notificationService;
@Before
public void setUp() {
getDataBroker(),
rpcProviderRegistryMock,
getConfig(),
- clusterSingletonService);
+ clusterSingletonService,
+ notificationService, false, false, 5);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
forwardingRulesManager.setDeviceMastershipManager(deviceMastershipManager);
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.impl.DeviceMastershipManager;
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
DeviceMastershipManager deviceMastershipManager;
+ @Mock
+ private NotificationProviderService notificationService;
@Before
public void setUp() {
getDataBroker(),
rpcProviderRegistryMock,
getConfig(),
- clusterSingletonService);
+ clusterSingletonService,
+ notificationService, false, false, 5);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
forwardingRulesManager.setDeviceMastershipManager(deviceMastershipManager);
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.impl.ForwardingRulesManagerImpl;
RpcProviderRegistry rpcProviderRegistryMock = new RpcProviderRegistryMock();
@Mock
ClusterSingletonServiceProvider clusterSingletonService;
+ @Mock
+ private NotificationProviderService notificationService;
@Before
public void setUp() {
getDataBroker(),
rpcProviderRegistryMock,
getConfig(),
- clusterSingletonService);
+ clusterSingletonService,
+ notificationService, false ,false ,5);
forwardingRulesManager.start();
}
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.applications.frm.impl.DeviceMastershipManager;
ClusterSingletonServiceProvider clusterSingletonService;
@Mock
DeviceMastershipManager deviceMastershipManager;
+ @Mock
+ private NotificationProviderService notificationService;
@Before
public void setUp() {
getDataBroker(),
rpcProviderRegistryMock,
getConfig(),
- clusterSingletonService);
+ clusterSingletonService,
+ notificationService, false, false , 5);
forwardingRulesManager.start();
// TODO consider tests rewrite (added because of complicated access)
forwardingRulesManager.setDeviceMastershipManager(deviceMastershipManager);
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>forwardingrules-sync</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-parent</artifactId>
- <version>0.5.3-SNAPSHOT</version>
+ <version>0.5.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>inventory-manager</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>lldp-speaker</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>notification-supplier</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>of-switch-config-pusher</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>statistics-manager</artifactId>
package org.opendaylight.openflowplugin.applications.statistics.manager;
import java.util.List;
-import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev150304.TransactionAware;
*
* Created: Aug 29, 2014
*/
-public interface StatRpcMsgManager extends Runnable, AutoCloseable {
-
- interface RpcJobsQueue extends Callable<Void> {}
-
+public interface StatRpcMsgManager {
/**
* Transaction container is definition for Multipart transaction
* join container for all Multipart msg with same TransactionId
import java.math.BigInteger;
import java.util.Arrays;
import java.util.List;
-import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
private final Cache<String, TransactionCacheContainer<? super TransactionAware>> txCache;
+ /**
+ * Cache for futures to be returned by
+ * {@link #isExpectedStatistics(TransactionId, NodeId)}.
+ */
+ private final Cache<String, SettableFuture<Boolean>> txFutureCache;
+
+ /**
+ * The number of seconds to wait for transaction container to be put into
+ * {@link #txCache}.
+ */
+ private static final long TXCACHE_WAIT_TIMEOUT = 10L;
+
private static final int MAX_CACHE_SIZE = 10000;
- private static final int QUEUE_CAPACITY = 5000;
private static final String MSG_TRANS_ID_NOT_NULL = "TransactionId can not be null!";
private static final String MSG_NODE_ID_NOT_NULL = "NodeId can not be null!";
private final OpendaylightFlowTableStatisticsService flowTableStatsService;
private final OpendaylightQueueStatisticsService queueStatsService;
- private BlockingQueue<RpcJobsQueue> statsRpcJobQueue;
-
- private volatile boolean finishing = false;
-
public StatRpcMsgManagerImpl (final StatisticsManager manager,
final RpcConsumerRegistry rpcRegistry, final long maxNodeForCollector) {
Preconditions.checkArgument(manager != null, "StatisticManager can not be null!");
rpcRegistry.getRpcService(OpendaylightQueueStatisticsService.class),
"OpendaylightQueueStatisticsService can not be null!");
- statsRpcJobQueue = new LinkedBlockingQueue<>(QUEUE_CAPACITY);
txCache = CacheBuilder.newBuilder().expireAfterWrite((maxNodeForCollector * POSSIBLE_STAT_WAIT_FOR_NOTIFICATION), TimeUnit.SECONDS)
.maximumSize(MAX_CACHE_SIZE).build();
- }
-
- @Override
- public void close() {
- finishing = true;
- statsRpcJobQueue = null;
- }
-
- @Override
- public void run() {
- /* Neverending cyle - wait for finishing */
- while ( ! finishing) {
- try {
- statsRpcJobQueue.take().call();
- }
- catch (final Exception e) {
- LOG.warn("Stat Element RPC executor fail!", e);
- }
- }
- // Drain all rpcCall, making sure any blocked threads are unblocked
- while ( ! statsRpcJobQueue.isEmpty()) {
- statsRpcJobQueue.poll();
- }
- }
-
- private void addGetAllStatJob(final RpcJobsQueue getAllStatJob) {
- final boolean success = statsRpcJobQueue.offer(getAllStatJob);
- if ( ! success) {
- LOG.warn("Put RPC request getAllStat fail! Queue is full.");
- }
- }
-
- private void addStatJob(final RpcJobsQueue getStatJob) {
- final boolean success = statsRpcJobQueue.offer(getStatJob);
- if ( ! success) {
- LOG.debug("Put RPC request for getStat fail! Queue is full.");
- }
+ txFutureCache = CacheBuilder.newBuilder().
+ expireAfterWrite(TXCACHE_WAIT_TIMEOUT, TimeUnit.SECONDS).
+ maximumSize(MAX_CACHE_SIZE).build();
}
@Override
final String cacheKey = buildCacheKey(id, nodeKey.getId());
final TransactionCacheContainer<? super TransactionAware> container =
new TransactionCacheContainerImpl<>(id, inputObj, nodeKey.getId());
- txCache.put(cacheKey, container);
+ putTransaction(cacheKey, container);
}
}
return String.valueOf(id.getValue()) + "-" + nodeId.getValue();
}
+ /**
+ * Put the given statistics transaction container into the cache.
+ *
+ * @param key Key that specifies the given transaction container.
+ * @param container Transaction container.
+ */
+ private synchronized void putTransaction(
+ String key, TransactionCacheContainer<? super TransactionAware> container) {
+ txCache.put(key, container);
+
+ SettableFuture<Boolean> future = txFutureCache.asMap().remove(key);
+ if (future != null) {
+ // Wake up a thread waiting for this transaction container.
+ future.set(true);
+ }
+ }
+
+ /**
+ * Check to see if the specified transaction container is cached in
+ * {@link #txCache}.
+ *
+ * @param key Key that specifies the transaction container.
+ * @return A future that will contain the result.
+ */
+ private synchronized Future<Boolean> isExpectedStatistics(String key) {
+ Future<Boolean> future;
+ TransactionCacheContainer<?> container = txCache.getIfPresent(key);
+ if (container == null) {
+ // Wait for the transaction container to be put into the cache.
+ SettableFuture<Boolean> f = SettableFuture.<Boolean>create();
+ SettableFuture<Boolean> current =
+ txFutureCache.asMap().putIfAbsent(key, f);
+ future = (current == null) ? f : current;
+ } else {
+ future = Futures.immediateFuture(Boolean.TRUE);
+ }
+
+ return future;
+ }
+
@Override
public Future<Optional<TransactionCacheContainer<?>>> getTransactionCacheContainer(
final TransactionId id, final NodeId nodeId) {
Preconditions.checkArgument(id != null, MSG_TRANS_ID_NOT_NULL);
Preconditions.checkArgument(nodeId != null, MSG_NODE_ID_NOT_NULL);
- final String key = buildCacheKey(id, nodeId);
- final SettableFuture<Optional<TransactionCacheContainer<?>>> result = SettableFuture.create();
-
- final RpcJobsQueue getTransactionCacheContainer = new RpcJobsQueue() {
+ String key = buildCacheKey(id, nodeId);
+ Optional<TransactionCacheContainer<?>> resultContainer =
+ Optional.<TransactionCacheContainer<?>> fromNullable(
+ txCache.asMap().remove(key));
+ if (!resultContainer.isPresent()) {
+ LOG.warn("Transaction cache not found: {}", key);
+ }
- @Override
- public Void call() throws Exception {
- final Optional<TransactionCacheContainer<?>> resultContainer =
- Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
- if (resultContainer.isPresent()) {
- txCache.invalidate(key);
- }
- result.set(resultContainer);
- return null;
- }
- };
- addStatJob(getTransactionCacheContainer);
- return result;
+ return Futures.immediateFuture(resultContainer);
}
@Override
Preconditions.checkArgument(id != null, MSG_TRANS_ID_NOT_NULL);
Preconditions.checkArgument(nodeId != null, MSG_NODE_ID_NOT_NULL);
- final String key = buildCacheKey(id, nodeId);
- final SettableFuture<Boolean> checkStatId = SettableFuture.create();
-
- final RpcJobsQueue isExpecedStatistics = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final Optional<TransactionCacheContainer<?>> result =
- Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
- checkStatId.set(Boolean.valueOf(result.isPresent()));
- return null;
- }
- };
- addStatJob(isExpecedStatistics);
- return checkStatId;
+ String key = buildCacheKey(id, nodeId);
+ return isExpectedStatistics(key);
}
@Override
Preconditions.checkArgument(notification != null, "TransactionAware can not be null!");
Preconditions.checkArgument(nodeId != null, MSG_NODE_ID_NOT_NULL);
- final RpcJobsQueue addNotification = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final TransactionId txId = notification.getTransactionId();
- final String key = buildCacheKey(txId, nodeId);
- final TransactionCacheContainer<? super TransactionAware> container = (txCache.getIfPresent(key));
- if (container != null) {
- container.addNotif(notification);
- }
- return null;
- }
- };
- addStatJob(addNotification);
+ TransactionId txId = notification.getTransactionId();
+ String key = buildCacheKey(txId, nodeId);
+ TransactionCacheContainer<? super TransactionAware> container =
+ txCache.getIfPresent(key);
+ if (container != null) {
+ container.addNotif(notification);
+ } else {
+ LOG.warn("Unable to add notification: {}, {}", key,
+ notification.getImplementedInterface());
+ }
}
@Override
public Future<TransactionId> getAllGroupsStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllGroupStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllGroupStatisticsInputBuilder builder =
- new GetAllGroupStatisticsInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(groupStatsService
- .getAllGroupStatistics(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllGroupStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllGroupStatisticsInputBuilder builder =
+ new GetAllGroupStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ groupStatsService.getAllGroupStatistics(builder.build()), null,
+ nodeRef, result);
return result;
}
@Override
public Future<TransactionId> getAllMetersStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllMeterStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllMeterStatisticsInputBuilder builder =
- new GetAllMeterStatisticsInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(meterStatsService
- .getAllMeterStatistics(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllMeterStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllMeterStatisticsInputBuilder builder =
+ new GetAllMeterStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ meterStatsService.getAllMeterStatistics(builder.build()), null,
+ nodeRef, result);
return result;
}
@Override
public Future<TransactionId> getAllFlowsStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllFlowStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllFlowsStatisticsFromAllFlowTablesInputBuilder builder =
- new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(flowStatsService
- .getAllFlowsStatisticsFromAllFlowTables(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllFlowStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllFlowsStatisticsFromAllFlowTablesInputBuilder builder =
+ new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ flowStatsService.getAllFlowsStatisticsFromAllFlowTables(builder.build()),
+ null, nodeRef, result);
return result;
}
public void getAggregateFlowStat(final NodeRef nodeRef, final TableId tableId) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
Preconditions.checkArgument(tableId != null, "TableId can not be null!");
- final RpcJobsQueue getAggregateFlowStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder builder =
- new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
- builder.setNode(nodeRef);
- builder.setTableId(tableId);
-
- final TableBuilder tbuilder = new TableBuilder();
- tbuilder.setId(tableId.getValue());
- tbuilder.setKey(new TableKey(tableId.getValue()));
- registrationRpcFutureCallBack(flowStatsService
- .getAggregateFlowStatisticsFromFlowTableForAllFlows(builder.build()), tbuilder.build(), nodeRef, null);
- return null;
- }
- };
- addGetAllStatJob(getAggregateFlowStat);
+ GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder builder =
+ new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
+ builder.setNode(nodeRef).setTableId(tableId);
+
+ TableBuilder tbuilder = new TableBuilder().
+ setId(tableId.getValue()).
+ setKey(new TableKey(tableId.getValue()));
+ registrationRpcFutureCallBack(
+ flowStatsService.getAggregateFlowStatisticsFromFlowTableForAllFlows(builder.build()),
+ tbuilder.build(), nodeRef, null);
}
@Override
public Future<TransactionId> getAllPortsStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllPortsStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllNodeConnectorsStatisticsInputBuilder builder =
- new GetAllNodeConnectorsStatisticsInputBuilder();
- builder.setNode(nodeRef);
- final Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> rpc =
- portStatsService.getAllNodeConnectorsStatistics(builder.build());
- registrationRpcFutureCallBack(rpc, null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllPortsStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllNodeConnectorsStatisticsInputBuilder builder =
+ new GetAllNodeConnectorsStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> rpc =
+ portStatsService.getAllNodeConnectorsStatistics(builder.build());
+ registrationRpcFutureCallBack(rpc, null, nodeRef, result);
return result;
}
@Override
public Future<TransactionId> getAllTablesStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllTableStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetFlowTablesStatisticsInputBuilder builder =
- new GetFlowTablesStatisticsInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(flowTableStatsService
- .getFlowTablesStatistics(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllTableStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetFlowTablesStatisticsInputBuilder builder =
+ new GetFlowTablesStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ flowTableStatsService.getFlowTablesStatistics(builder.build()),
+ null, nodeRef, result);
return result;
}
@Override
public Future<TransactionId> getAllQueueStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllQueueStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllQueuesStatisticsFromAllPortsInputBuilder builder =
- new GetAllQueuesStatisticsFromAllPortsInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(queueStatsService
- .getAllQueuesStatisticsFromAllPorts(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(getAllQueueStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllQueuesStatisticsFromAllPortsInputBuilder builder =
+ new GetAllQueuesStatisticsFromAllPortsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ queueStatsService.getAllQueuesStatisticsFromAllPorts(builder.build()),
+ null, nodeRef, result);
return result;
}
@Override
public Future<TransactionId> getAllMeterConfigStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue qetAllMeterConfStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetAllMeterConfigStatisticsInputBuilder builder =
- new GetAllMeterConfigStatisticsInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(meterStatsService
- .getAllMeterConfigStatistics(builder.build()), null, nodeRef, result);
- return null;
- }
- };
- addGetAllStatJob(qetAllMeterConfStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetAllMeterConfigStatisticsInputBuilder builder =
+ new GetAllMeterConfigStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ meterStatsService.getAllMeterConfigStatistics(builder.build()),
+ null, nodeRef, result);
return result;
}
@Override
public void getGroupFeaturesStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final RpcJobsQueue getGroupFeaturesStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- /* RPC input */
- final GetGroupFeaturesInputBuilder input = new GetGroupFeaturesInputBuilder();
- input.setNode(nodeRef);
- registrationRpcFutureCallBack(groupStatsService.getGroupFeatures(input.build()), null, nodeRef, null);
- return null;
- }
- };
- addStatJob(getGroupFeaturesStat);
+ GetGroupFeaturesInputBuilder input = new GetGroupFeaturesInputBuilder().
+ setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ groupStatsService.getGroupFeatures(input.build()), null, nodeRef,
+ null);
}
@Override
public void getMeterFeaturesStat(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final RpcJobsQueue getMeterFeaturesStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- /* RPC input */
- final GetMeterFeaturesInputBuilder input = new GetMeterFeaturesInputBuilder();
- input.setNode(nodeRef);
- registrationRpcFutureCallBack(meterStatsService.getMeterFeatures(input.build()), null, nodeRef, null);
- return null;
- }
- };
- addStatJob(getMeterFeaturesStat);
+ GetMeterFeaturesInputBuilder input = new GetMeterFeaturesInputBuilder().
+ setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ meterStatsService.getMeterFeatures(input.build()), null, nodeRef,
+ null);
}
@Override
public Future<TransactionId> getAllGroupsConfStats(final NodeRef nodeRef) {
Preconditions.checkArgument(nodeRef != null, MSG_NODE_REF_NOT_NULL);
- final SettableFuture<TransactionId> result = SettableFuture.create();
- final RpcJobsQueue getAllGropConfStat = new RpcJobsQueue() {
-
- @Override
- public Void call() throws Exception {
- final GetGroupDescriptionInputBuilder builder =
- new GetGroupDescriptionInputBuilder();
- builder.setNode(nodeRef);
- registrationRpcFutureCallBack(groupStatsService
- .getGroupDescription(builder.build()), null, nodeRef, result);
-
- return null;
- }
- };
- addGetAllStatJob(getAllGropConfStat);
+ SettableFuture<TransactionId> result = SettableFuture.create();
+ GetGroupDescriptionInputBuilder builder =
+ new GetGroupDescriptionInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(
+ groupStatsService.getGroupDescription(builder.build()), null,
+ nodeRef, result);
return result;
}
}
}
}
-
private final DataBroker dataBroker;
- private final ExecutorService statRpcMsgManagerExecutor;
private final ExecutorService statDataStoreOperationServ;
private EntityOwnershipService ownershipService;
private StatRpcMsgManager rpcMsgManager;
this.dataBroker = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
ThreadFactory threadFact;
threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-rpc-oper-thread-%d").build();
- statRpcMsgManagerExecutor = Executors.newSingleThreadExecutor(threadFact);
threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-ds-oper-thread-%d").build();
statDataStoreOperationServ = Executors.newSingleThreadExecutor(threadFact);
txChain = dataBroker.createTransactionChain(this);
portNotifyCommiter = new StatNotifyCommitPort(this, notifService, nodeRegistrator);
queueNotifyCommiter = new StatListenCommitQueue(this, dataBroker, notifService, nodeRegistrator);
- statRpcMsgManagerExecutor.execute(rpcMsgManager);
statDataStoreOperationServ.execute(this);
LOG.info("Statistics Manager started successfully!");
}
}
statCollectors = null;
}
- rpcMsgManager = close(rpcMsgManager);
- statRpcMsgManagerExecutor.shutdown();
+ rpcMsgManager = null;
statDataStoreOperationServ.shutdown();
txChain = close(txChain);
}
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>applications</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>table-miss-enforcer</artifactId>
/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2014 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
import java.util.List;
import java.util.concurrent.Callable;
import javax.annotation.Nonnull;
+import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-public class LLDPPacketPuntEnforcer implements AutoCloseable, DataTreeChangeListener<FlowCapableNode> {
+public class LLDPPacketPuntEnforcer implements AutoCloseable, ClusteredDataTreeChangeListener<FlowCapableNode> {
private static final long STARTUP_LOOP_TICK = 500L;
private static final int STARTUP_LOOP_MAX_RETRIES = 8;
private static final short TABLE_ID = (short) 0;
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-parent</artifactId>
- <version>0.5.3-SNAPSHOT</version>
+ <version>0.5.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>topology-lldp-discovery</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencyManagement>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>liblldp</artifactId>
- <version>0.11.3-SNAPSHOT</version>
+ <version>0.11.5-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin.model</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-parent</artifactId>
- <version>0.5.3-SNAPSHOT</version>
+ <version>0.5.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin.applications</groupId>
<artifactId>topology-manager</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent-lite</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-artifacts</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>karaf-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath></relativePath>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-karaf</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
</prerequisites>
<properties>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
</properties>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>drop-test-karaf</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>drop-test</artifactId>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<artifactId>features-openflowplugin-extension-he</artifactId>
<packaging>jar</packaging>
<properties>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
</properties>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<artifactId>features-openflowplugin-extension</artifactId>
<packaging>jar</packaging>
<properties>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
</properties>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>openflowjava-extension-nicira-api</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>openflowjava-extension-nicira</artifactId>
public abstract class AbstractMatchCodec implements OFSerializer<MatchEntry>, OFDeserializer<MatchEntry> {
- private NxmHeader headerWithMask;
- private NxmHeader headerWithoutMask;
+ protected NxmHeader headerWithMask;
+ protected NxmHeader headerWithoutMask;
protected MatchEntryBuilder deserializeHeader(ByteBuf message) {
MatchEntryBuilder builder = new MatchEntryBuilder();
@Override
public MatchEntry deserialize(ByteBuf message) {
- MatchEntryBuilder matchEntriesBuilder = deserializeHeader(message);
- RegCaseValueBuilder caseBuilder = new RegCaseValueBuilder();
- caseBuilder.setRegValues(new RegValuesBuilder().setValue(message.readUnsignedInt()).build());
- matchEntriesBuilder.setMatchEntryValue(caseBuilder.build());
- return matchEntriesBuilder.build();
+ final MatchEntryBuilder matchEntriesBuilder = deserializeHeader(message);
+ final RegValuesBuilder regValuesBuilder = new RegValuesBuilder();
+ regValuesBuilder.setValue(message.readUnsignedInt());
+
+ if (matchEntriesBuilder.isHasMask()) {
+ regValuesBuilder.setMask(message.readUnsignedInt());
+ }
+
+ return matchEntriesBuilder
+ .setMatchEntryValue(new RegCaseValueBuilder()
+ .setRegValues(regValuesBuilder.build())
+ .build())
+ .build();
}
@Override
public void serialize(MatchEntry input, ByteBuf outBuffer) {
serializeHeader(input, outBuffer);
- RegCaseValue regCase = ((RegCaseValue) input.getMatchEntryValue());
- Long value = regCase.getRegValues().getValue();
- outBuffer.writeInt(value.intValue());
+ final RegCaseValue regCase = ((RegCaseValue) input.getMatchEntryValue());
+ outBuffer.writeInt(regCase.getRegValues().getValue().intValue());
+
+ if (input.isHasMask()) {
+ outBuffer.writeInt(regCase.getRegValues().getMask().intValue());
+ }
+ }
+
+ @Override
+ public NxmHeader getHeaderWithHasMask() {
+ if (headerWithMask == null) {
+ headerWithMask = new NxmHeader(getOxmClassCode(), getNxmFieldCode(), true, getValueLength() * 2);
+ }
+ return headerWithMask;
}
@Override
base ofoxm:match-field;
}
-
grouping ofj-nxm-nx-match-reg-grouping {
container reg-values {
leaf value {
type uint32;
}
+ leaf mask {
+ type uint32;
+ }
}
}
+
grouping ofj-oxm-of-mpls-label-grouping {
container mpls-label-values {
leaf mpls-label {
}
}
}
+
grouping ofj-nxm-nx-match-arp-sha-grouping {
container arp-sha-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-nx-match-arp-tha-grouping {
container arp-tha-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-of-match-arp-op-grouping {
container arp-op-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-match-arp-spa-grouping {
container arp-spa-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-match-arp-tpa-grouping {
container arp-tpa-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-in-port-type-grouping {
container nxm-of-in-port-values {
leaf value {
}
}
}
+
grouping ofj-nxm-nx-match-tun-ipv4-dst-grouping {
container tun-ipv4-dst-values {
leaf value {
}
}
}
+
grouping ofj-nxm-nx-match-tun-ipv4-src-grouping {
container tun-ipv4-src-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-match-eth-src-grouping {
container eth-src-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-of-match-eth-dst-grouping {
container eth-dst-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-of-match-eth-type-grouping {
container eth-type-values {
leaf value {
uses ofj-nxm-nx-match-nshc-grouping;
}
}
+
grouping ofj-nxm-nx-match-nshc-2-grouping {
container nshc-2-values {
uses ofj-nxm-nx-match-nshc-grouping;
}
}
+
grouping ofj-nxm-nx-match-nshc-3-grouping {
container nshc-3-values {
uses ofj-nxm-nx-match-nshc-grouping;
}
}
+
grouping ofj-nxm-nx-match-nshc-4-grouping {
container nshc-4-values {
uses ofj-nxm-nx-match-nshc-grouping;
}
}
+
grouping ofj-nxm-of-match-tcp-src-grouping {
container tcp-src-values {
leaf port {
}
}
}
+
grouping ofj-nxm-of-match-tcp-dst-grouping {
container tcp-dst-values {
leaf port {
}
}
}
- grouping ofj-nxm-of-match-udp-src-grouping {
+
+ grouping ofj-nxm-of-match-udp-src-grouping {
container udp-src-values {
leaf port {
type inet:port-number;
}
}
}
+
grouping ofj-nxm-of-match-udp-dst-grouping {
container udp-dst-values {
leaf port {
}
}
}
+
grouping ofj-nxm-nx-match-ct-state-grouping{
container ct-state-values {
leaf ct-state {
}
}
}
+
grouping ofj-nxm-nx-match-ct-zone-grouping{
container ct-zone-values {
leaf ct-zone {
}
}
}
+
grouping ofj-nxm-of-match-ip-dst-grouping {
container ip-dst-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-match-ip-src-grouping {
container ip-src-values {
leaf value {
}
}
}
+
grouping ofj-nxm-of-match-icmp-type-grouping {
container icmp-type-values {
leaf value {
}
}
}
+
grouping ofj-nxm-nx-match-encap-eth-type-grouping {
container encap-eth-type-values {
leaf encap-eth-type {
}
}
}
+
grouping ofj-nxm-nx-match-encap-eth-src-grouping {
container encap-eth-src-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-nx-match-encap-eth-dst-grouping {
container encap-eth-dst-values {
leaf mac-address {
}
}
}
+
grouping ofj-nxm-nx-match-nsh-mdtype-grouping {
container nsh-mdtype-values {
leaf value {
}
}
}
+
grouping ofj-nxm-nx-match-nsh-np-grouping {
container nsh-np-values {
leaf value {
}
}
}
+
grouping ofj-nxm-nx-match-tun-gpe-np-grouping {
container tun-gpe-np-values {
leaf value {
case tun-gpe-np-case-value {
uses ofj-nxm-nx-match-tun-gpe-np-grouping;
}
-
case eth-type-case-value {
uses ofj-nxm-of-match-eth-type-grouping;
}
case icmp-type-case-value {
uses ofj-nxm-of-match-icmp-type-grouping;
}
-
}
}
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
classes = new HashSet<>();
}
+ /**
+ * Get augmentation classes
+ * @return list of augmentation classes
+ */
+ public Set<Class<? extends Augmentation<?>>> getClasses() {
+ return classes;
+ }
+
/**
* @param cls equivalent augmentation class
* @return this for chaining
return Optional.absent();
}
+
}
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
import org.opendaylight.openflowjava.nx.codec.match.EncapEthSrcCodec;
import org.opendaylight.openflowjava.nx.codec.match.EncapEthDstCodec;
import org.opendaylight.openflowjava.nx.codec.match.NshMdtypeCodec;
-import org.opendaylight.openflowjava.nx.codec.match.NshNpCodec;
import org.opendaylight.openflowjava.nx.codec.match.TunGpeNpCodec;
import org.opendaylight.openflowjava.nx.codec.match.UdpSrcCodec;
import org.opendaylight.openflowjava.nx.codec.match.UdpDstCodec;
private ExtensionConverterRegistrator extensionConverterRegistrator;
private Set<ObjectRegistration<?>> registrations;
- private final static RegConvertor REG_CONVERTOR = new RegConvertor();
- private final static TunIdConvertor TUN_ID_CONVERTOR = new TunIdConvertor();
- private final static ArpOpConvertor ARP_OP_CONVERTOR = new ArpOpConvertor();
- private final static ArpShaConvertor ARP_SHA_CONVERTOR = new ArpShaConvertor();
- private final static ArpSpaConvertor ARP_SPA_CONVERTOR = new ArpSpaConvertor();
- private final static ArpTpaConvertor ARP_TPA_CONVERTOR = new ArpTpaConvertor();
- private final static ArpThaConvertor ARP_THA_CONVERTOR = new ArpThaConvertor();
- private final static NxmInPortConvertor NXM_IN_PORT_CONVERTOR = new NxmInPortConvertor();
- private final static EthDstConvertor ETH_DST_CONVERTOR = new EthDstConvertor();
- private final static EthSrcConvertor ETH_SRC_CONVERTOR = new EthSrcConvertor();
- private final static RegLoadConvertor REG_LOAD_CONVERTOR = new RegLoadConvertor();
- private final static RegMoveConvertor REG_MOVE_CONVERTOR = new RegMoveConvertor();
- private final static OutputRegConvertor OUTPUT_REG_CONVERTOR = new OutputRegConvertor();
- private final static EthTypeConvertor ETH_TYPE_CONVERTOR = new EthTypeConvertor();
- private final static ResubmitConvertor RESUBMIT_CONVERTOR = new ResubmitConvertor();
- private final static FinTimeoutConvertor FIN_TIMEOUT_CONVERTOR = new FinTimeoutConvertor();
- private final static MultipathConvertor MULTIPATH_CONVERTOR = new MultipathConvertor();
- private final static PushNshConvertor PUSH_NSH_CONVERTOR = new PushNshConvertor();
- private final static PopNshConvertor POP_NSH_CONVERTOR = new PopNshConvertor();
- private final static NspConvertor NSP_CONVERTOR = new NspConvertor();
- private final static NsiConvertor NSI_CONVERTOR = new NsiConvertor();
- private final static Nshc1Convertor NSC1_CONVERTOR = new Nshc1Convertor();
- private final static Nshc2Convertor NSC2_CONVERTOR = new Nshc2Convertor();
- private final static Nshc3Convertor NSC3_CONVERTOR = new Nshc3Convertor();
- private final static Nshc4Convertor NSC4_CONVERTOR = new Nshc4Convertor();
- private final static TunIPv4SrcConvertor TUN_IPV4_SRC_CONVERTOR = new TunIPv4SrcConvertor();
- private final static TunIPv4DstConvertor TUN_IPV4_DST_CONVERTOR = new TunIPv4DstConvertor();
- private final static EncapEthTypeConvertor ENCAP_ETH_TYPE_CONVERTOR = new EncapEthTypeConvertor();
- private final static EncapEthSrcConvertor ENCAP_ETH_SRC_CONVERTOR = new EncapEthSrcConvertor();
- private final static EncapEthDstConvertor ENCAP_ETH_DST_CONVERTOR = new EncapEthDstConvertor();
- private final static NshMdtypeConvertor NSH_MDTYPE_CONVERTOR = new NshMdtypeConvertor();
- private final static NshNpConvertor NSH_NP_CONVERTOR = new NshNpConvertor();
- private final static TunGpeNpConvertor TUN_GPE_NP_CONVERTOR = new TunGpeNpConvertor();
- private final static TcpSrcConvertor TCP_SRC_CONVERTOR = new TcpSrcConvertor();
- private final static TcpDstConvertor TCP_DST_CONVERTOR = new TcpDstConvertor();
- private final static UdpSrcConvertor UDP_SRC_CONVERTOR = new UdpSrcConvertor();
- private final static UdpDstConvertor UDP_DST_CONVERTOR = new UdpDstConvertor();
- private final static ConntrackConvertor CONNTRACK_CONVERTOR = new ConntrackConvertor();
- private final static LearnConvertor LEARN_CONVERTOR = new LearnConvertor();
- private final static CtStateConvertor CT_STATE_CONVERTOR = new CtStateConvertor();
- private final static CtZoneConvertor CT_ZONE_CONVERTOR = new CtZoneConvertor();
+ private static final RegConvertor REG_CONVERTOR = new RegConvertor();
+ private static final TunIdConvertor TUN_ID_CONVERTOR = new TunIdConvertor();
+ private static final ArpOpConvertor ARP_OP_CONVERTOR = new ArpOpConvertor();
+ private static final ArpShaConvertor ARP_SHA_CONVERTOR = new ArpShaConvertor();
+ private static final ArpSpaConvertor ARP_SPA_CONVERTOR = new ArpSpaConvertor();
+ private static final ArpTpaConvertor ARP_TPA_CONVERTOR = new ArpTpaConvertor();
+ private static final ArpThaConvertor ARP_THA_CONVERTOR = new ArpThaConvertor();
+ private static final NxmInPortConvertor NXM_IN_PORT_CONVERTOR = new NxmInPortConvertor();
+ private static final EthDstConvertor ETH_DST_CONVERTOR = new EthDstConvertor();
+ private static final EthSrcConvertor ETH_SRC_CONVERTOR = new EthSrcConvertor();
+ private static final RegLoadConvertor REG_LOAD_CONVERTOR = new RegLoadConvertor();
+ private static final RegMoveConvertor REG_MOVE_CONVERTOR = new RegMoveConvertor();
+ private static final OutputRegConvertor OUTPUT_REG_CONVERTOR = new OutputRegConvertor();
+ private static final EthTypeConvertor ETH_TYPE_CONVERTOR = new EthTypeConvertor();
+ private static final ResubmitConvertor RESUBMIT_CONVERTOR = new ResubmitConvertor();
+ private static final FinTimeoutConvertor FIN_TIMEOUT_CONVERTOR = new FinTimeoutConvertor();
+ private static final MultipathConvertor MULTIPATH_CONVERTOR = new MultipathConvertor();
+ private static final PushNshConvertor PUSH_NSH_CONVERTOR = new PushNshConvertor();
+ private static final PopNshConvertor POP_NSH_CONVERTOR = new PopNshConvertor();
+ private static final NspConvertor NSP_CONVERTOR = new NspConvertor();
+ private static final NsiConvertor NSI_CONVERTOR = new NsiConvertor();
+ private static final Nshc1Convertor NSC1_CONVERTOR = new Nshc1Convertor();
+ private static final Nshc2Convertor NSC2_CONVERTOR = new Nshc2Convertor();
+ private static final Nshc3Convertor NSC3_CONVERTOR = new Nshc3Convertor();
+ private static final Nshc4Convertor NSC4_CONVERTOR = new Nshc4Convertor();
+ private static final TunIPv4SrcConvertor TUN_IPV4_SRC_CONVERTOR = new TunIPv4SrcConvertor();
+ private static final TunIPv4DstConvertor TUN_IPV4_DST_CONVERTOR = new TunIPv4DstConvertor();
+ private static final EncapEthTypeConvertor ENCAP_ETH_TYPE_CONVERTOR = new EncapEthTypeConvertor();
+ private static final EncapEthSrcConvertor ENCAP_ETH_SRC_CONVERTOR = new EncapEthSrcConvertor();
+ private static final EncapEthDstConvertor ENCAP_ETH_DST_CONVERTOR = new EncapEthDstConvertor();
+ private static final NshMdtypeConvertor NSH_MDTYPE_CONVERTOR = new NshMdtypeConvertor();
+ private static final NshNpConvertor NSH_NP_CONVERTOR = new NshNpConvertor();
+ private static final TunGpeNpConvertor TUN_GPE_NP_CONVERTOR = new TunGpeNpConvertor();
+ private static final TcpSrcConvertor TCP_SRC_CONVERTOR = new TcpSrcConvertor();
+ private static final TcpDstConvertor TCP_DST_CONVERTOR = new TcpDstConvertor();
+ private static final UdpSrcConvertor UDP_SRC_CONVERTOR = new UdpSrcConvertor();
+ private static final UdpDstConvertor UDP_DST_CONVERTOR = new UdpDstConvertor();
+ private static final ConntrackConvertor CONNTRACK_CONVERTOR = new ConntrackConvertor();
+ private static final LearnConvertor LEARN_CONVERTOR = new LearnConvertor();
+ private static final CtStateConvertor CT_STATE_CONVERTOR = new CtStateConvertor();
+ private static final CtZoneConvertor CT_ZONE_CONVERTOR = new CtZoneConvertor();
@Override
public void close() {
ActionSerializerKey<?> key = new ActionSerializerKey(EncodeConstants.OF13_VERSION_ID, actionCaseType, null);
registrations.add(extensionConverterRegistrator.registerActionConvertor(key, actionConvertor));
}
-
}
package org.opendaylight.openflowplugin.extension.vendor.nicira.convertor.match;
import com.google.common.base.Optional;
+import java.util.Objects;
import org.opendaylight.openflowjava.nx.NiciraMatchCodecs;
import org.opendaylight.openflowplugin.extension.api.ConvertorFromOFJava;
import org.opendaylight.openflowplugin.extension.api.ConvertorToOFJava;
/*
* (non-Javadoc)
- *
+ *
* @see
* org.opendaylight.openflowplugin.extension.api.ConvertorFromOFJava#convert
* (org.opendaylight.yangtools.yang.binding.DataContainer,
.getOxmMatchField());
RegCaseValue regCaseValue = ((RegCaseValue) input.getMatchEntryValue());
nxRegBuilder.setValue(regCaseValue.getRegValues().getValue());
+
+ if (input.isHasMask()) {
+ nxRegBuilder.setMask(regCaseValue.getRegValues().getMask());
+ }
+
return resolveAugmentation(nxRegBuilder.build(), path, resolveRegKey(input.getOxmMatchField()));
}
/*
* (non-Javadoc)
- *
+ *
* @see
* org.opendaylight.openflowplugin.extension.api.ConvertorToOFJava#convert
* (org
throw new CodecPreconditionException(extension);
}
NxmNxReg nxmNxReg = matchGrouping.get().getNxmNxReg();
- RegValuesBuilder regValuesBuilder = new RegValuesBuilder().setValue(nxmNxReg.getValue());
+ RegValuesBuilder regValuesBuilder = new RegValuesBuilder()
+ .setValue(nxmNxReg.getValue())
+ .setMask(nxmNxReg.getMask());
+
RegCaseValueBuilder regCaseValueBuilder = new RegCaseValueBuilder();
regCaseValueBuilder.setRegValues(regValuesBuilder.build());
return MatchUtil.createDefaultMatchEntryBuilder(nxmNxReg.getReg(),
Nxm1Class.class,
- regCaseValueBuilder.build()).build();
+ regCaseValueBuilder.build())
+ .setHasMask(Objects.nonNull(nxmNxReg.getMask()))
+ .build();
}
}
leaf value {
type uint32;
}
+ leaf mask {
+ type uint32;
+ }
}
}
grouping nxm-nx-tun-id-grouping {
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-extension-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>test-extension</artifactId>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin-he</artifactId>
<packaging>jar</packaging>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<properties>
- <yangtools.version>1.0.3-SNAPSHOT</yangtools.version>
- <config.version>0.5.3-SNAPSHOT</config.version>
- <mdsal.version>1.4.3-SNAPSHOT</mdsal.version>
- <openflowjava.version>0.8.3-SNAPSHOT</openflowjava.version>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
- <dlux.version>0.4.3-SNAPSHOT</dlux.version>
- <lldp.version>0.11.3-SNAPSHOT</lldp.version>
+ <yangtools.version>1.0.5-SNAPSHOT</yangtools.version>
+ <config.version>0.5.5-SNAPSHOT</config.version>
+ <mdsal.version>1.4.5-SNAPSHOT</mdsal.version>
+ <openflowjava.version>0.8.5-SNAPSHOT</openflowjava.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
+ <dlux.version>0.4.5-SNAPSHOT</dlux.version>
+ <lldp.version>0.11.5-SNAPSHOT</lldp.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
- <restconf.version>1.4.3-SNAPSHOT</restconf.version>
- <mdsal.model.version>0.9.3-SNAPSHOT</mdsal.model.version>
+ <restconf.version>1.4.5-SNAPSHOT</restconf.version>
+ <mdsal.model.version>0.9.5-SNAPSHOT</mdsal.model.version>
</properties>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>features-parent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
<packaging>jar</packaging>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<properties>
- <yangtools.version>1.0.3-SNAPSHOT</yangtools.version>
- <config.version>0.5.3-SNAPSHOT</config.version>
- <mdsal.version>1.4.3-SNAPSHOT</mdsal.version>
- <openflowjava.version>0.8.3-SNAPSHOT</openflowjava.version>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
- <dlux.version>0.4.3-SNAPSHOT</dlux.version>
- <lldp.version>0.11.3-SNAPSHOT</lldp.version>
+ <yangtools.version>1.0.5-SNAPSHOT</yangtools.version>
+ <config.version>0.5.5-SNAPSHOT</config.version>
+ <mdsal.version>1.4.5-SNAPSHOT</mdsal.version>
+ <openflowjava.version>0.8.5-SNAPSHOT</openflowjava.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
+ <dlux.version>0.4.5-SNAPSHOT</dlux.version>
+ <lldp.version>0.11.5-SNAPSHOT</lldp.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
- <restconf.version>1.4.3-SNAPSHOT</restconf.version>
- <mdsal.model.version>0.9.3-SNAPSHOT</mdsal.model.version>
+ <restconf.version>1.4.5-SNAPSHOT</restconf.version>
+ <mdsal.model.version>0.9.5-SNAPSHOT</mdsal.model.version>
</properties>
<dependencyManagement>
<parent>
<groupId>org.opendaylight.openflowplugin.model</groupId>
<artifactId>model-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<artifactId>model-flow-base</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.openflowplugin.model</groupId>
<artifactId>model-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<artifactId>model-flow-service</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.openflowplugin.model</groupId>
<artifactId>model-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
</parent>
<artifactId>model-flow-statistics</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<groupId>org.opendaylight.openflowplugin.model</groupId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
public static final String APPLICATION_TAG = "OPENFLOW_PLUGIN";
/** RpcError tag - timeout */
public static final String ERROR_TAG_TIMEOUT = "TIMOUT";
+
+ /** Persistent ID of OpenFlowPlugin configuration file */
+ public static final String CONFIG_FILE_ID = "org.opendaylight.openflowplugin";
}
/**
* About to stop services in cluster not master anymore or going down
* @return Future most of services need time to be closed
- * @param connectionInterrupted true if clustering services stopping by device disconnect
*/
- default ListenableFuture<Void> stopClusterServices(boolean connectionInterrupted) {
+ default ListenableFuture<Void> stopClusterServices() {
return Futures.immediateFailedFuture(new RejectedExecutionException("Cannot stop abstract services, check implementation of cluster services"));
}
--- /dev/null
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow;
+
+import com.google.common.collect.ImmutableMap;
+import java.util.Map;
+import javax.annotation.Nonnull;
+
+/**
+ * Manages OpenFlowPlugin configuration
+ */
+public interface OpenFlowPluginConfigurationService {
+
+ /**
+ * Enum of property keys. All keys from OpenFlowPlugin configuration file are parsed to this enum.
+ * Each enum value represents one working configuration key in format
+ * ENUM.name().toLowerCase().replace('_', '-'), so for example PropertyType.IS_STATISTICS_POLLING_ON
+ * represents 'is-statistics-polling-on' configuration key.
+ */
+ enum PropertyType {
+ /**
+ * Is statistics polling on property type.
+ */
+ IS_STATISTICS_POLLING_ON,
+ /**
+ * Barrier count limit property type.
+ */
+ BARRIER_COUNT_LIMIT,
+ /**
+ * Barrier interval timeout limit property type.
+ */
+ BARRIER_INTERVAL_TIMEOUT_LIMIT,
+ /**
+ * Echo reply timeout property type.
+ */
+ ECHO_REPLY_TIMEOUT,
+ /**
+ * Enable flow removed notification property type.
+ */
+ ENABLE_FLOW_REMOVED_NOTIFICATION,
+ /**
+ * Skip table features property type.
+ */
+ SKIP_TABLE_FEATURES,
+ /**
+ * Basic timer delay property type.
+ */
+ BASIC_TIMER_DELAY,
+ /**
+ * Maximum timer delay property type.
+ */
+ MAXIMUM_TIMER_DELAY,
+ /**
+ * Switch features mandatory property type.
+ */
+ SWITCH_FEATURES_MANDATORY,
+ /**
+ * Is statistics rpc enabled property type.
+ */
+ @Deprecated
+ IS_STATISTICS_RPC_ENABLED,
+ /**
+ * Rpc requests quota property type.
+ */
+ RPC_REQUESTS_QUOTA,
+ /**
+ * Global notification quota property type.
+ */
+ GLOBAL_NOTIFICATION_QUOTA,
+ /**
+ * Thread pool min threads property type.
+ */
+ THREAD_POOL_MIN_THREADS,
+ /**
+ * Thread pool max threads property type.
+ */
+ THREAD_POOL_MAX_THREADS,
+ /**
+ * Thread pool timeout property type.
+ */
+ THREAD_POOL_TIMEOUT;
+
+ private static final Map<String, PropertyType> KEY_VALUE_MAP;
+
+ /**
+ * Get property type from property key
+ *
+ * @param key the property key
+ * @return the property type
+ */
+ public static PropertyType forValue(final String key) {
+ return KEY_VALUE_MAP.get(key);
+ }
+
+ static {
+ final PropertyType[] values = values();
+ final ImmutableMap.Builder<String, PropertyType> builder = ImmutableMap.builder();
+
+ for (final PropertyType value : values) {
+ builder.put(value.toString(), value);
+ }
+
+ KEY_VALUE_MAP = builder.build();
+ }
+
+ /**
+ * Converts enum name to property key
+ *
+ * @return the property key
+ */
+ @Override
+ public String toString() {
+ return this.name().toLowerCase().replace('_', '-');
+ }
+
+ }
+
+ /**
+ * Parses key-value pairs of properties read from OpenFlowPlugin configuration file and processes them
+ *
+ * @param properties properties
+ */
+ void update(@Nonnull Map<String,Object> properties);
+
+ /**
+ * Parse and process single property key-value pair
+ *
+ * @see org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginConfigurationService.PropertyType
+ * @param key property type
+ * @param value property value
+ */
+ void updateProperty(@Nonnull PropertyType key, @Nonnull Object value);
+
+}
/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2015, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
package org.opendaylight.openflowplugin.api.openflow;
-import java.util.Collection;
-import java.util.Map;
import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.openflowjava.protocol.spi.connection.SwitchConnectionProvider;
/**
* Plugin services provider
*/
public interface OpenFlowPluginProvider extends AutoCloseable, BindingService {
- /**
- * Method sets openflow java's connection providers.
- */
- void setSwitchConnectionProviders(Collection<SwitchConnectionProvider> switchConnectionProvider);
-
- /**
- * setter
- *
- * @param dataBroker
- */
- void setDataBroker(DataBroker dataBroker);
-
- void setRpcProviderRegistry(RpcProviderRegistry rpcProviderRegistry);
-
- void setNotificationProviderService(NotificationService notificationProviderService);
-
- void setNotificationPublishService(NotificationPublishService notificationPublishService);
-
/**
* Method initializes all DeviceManager, RpcManager and related contexts.
*/
void initialize();
- /**
- * This parameter indicates whether it is mandatory for switch to support OF1.3 features : table, flow, meter,group.
- * If this is set to true and switch doesn't support these features its connection will be denied.
- * @param switchFeaturesMandatory
- */
- void setSwitchFeaturesMandatory(final boolean switchFeaturesMandatory);
-
- boolean isSwitchFeaturesMandatory();
-
- boolean isStatisticsPollingOn();
-
- void setStatisticsPollingOn(final boolean isStatisticsPollingOn);
-
- /**
- * Backward compatibility feature - exposing rpc for statistics polling (result is provided in form of async notification)
- *
- * @param isStatisticsRpcEnabled
- */
- void setIsStatisticsRpcEnabled(boolean isStatisticsRpcEnabled);
-
- void setBarrierCountLimit(int barrierCountLimit);
-
- void setBarrierInterval(long barrierTimeoutLimit);
-
- void setEchoReplyTimeout(long echoReplyTimeout);
-
- void setFlowRemovedNotification(boolean isFlowRemovedNotificationOn);
-
- void update(Map<String,Object> props);
-
- void setClusteringSingletonServicesProvider(ClusterSingletonServiceProvider singletonServicesProvider);
-
- void setSkipTableFeatures(boolean skipTableFeatures);
-
- void setBasicTimerDelay(long basicTimerDelay);
-
- void setMaximumTimerDelay(long maximumTimerDelay);
}
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowjava.protocol.spi.connection.SwitchConnectionProvider;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflow.provider.config.rev160510.OpenflowProviderConfig;
+import org.osgi.framework.BundleContext;
/**
* Factory for creating OpenFlowPluginProvider instances.
*/
public interface OpenFlowPluginProviderFactory {
- OpenFlowPluginProvider newInstance(OpenflowProviderConfig providerConfig, DataBroker dataBroker,
- RpcProviderRegistry rpcRegistry, NotificationService notificationService,
+ OpenFlowPluginProvider newInstance(OpenflowProviderConfig providerConfig,
+ DataBroker dataBroker,
+ RpcProviderRegistry rpcRegistry,
+ NotificationService notificationService,
NotificationPublishService notificationPublishService,
EntityOwnershipService entityOwnershipService,
List<SwitchConnectionProvider> switchConnectionProviders,
- ClusterSingletonServiceProvider singletonServiceProvider);
+ ClusterSingletonServiceProvider singletonServiceProvider,
+ BundleContext bundleContext);
}
* @return listenable future from sal role service
*/
ListenableFuture<RpcResult<SetRoleOutput>> makeDeviceSlave();
+
+ void sendNodeAddedNotification();
+
+ void sendNodeRemovedNotification();
+
+ void cleanupDeviceData();
}
package org.opendaylight.openflowplugin.api.openflow.device;
import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.openflowplugin.api.openflow.OFPManager;
import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceConnectedHandler;
OFPManager,
DeviceConnectedHandler,
DeviceDisconnectedHandler,
- TranslatorLibrarian {
+ TranslatorLibrarian,
+ EntityOwnershipListener {
/**
* invoked after all services injected
void setBarrierInterval(long barrierTimeoutLimit);
CheckedFuture<Void, TransactionCommitFailedException> removeDeviceFromOperationalDS(DeviceInfo deviceInfo);
+
+ void setGlobalNotificationQuota(long globalNotificationQuota);
+
+ void setSwitchFeaturesMandatory(boolean switchFeaturesMandatory);
}
package org.opendaylight.openflowplugin.api.openflow.lifecycle;
import javax.annotation.CheckForNull;
+
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListener;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.api.openflow.OFPContext;
import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-public abstract class AbstractModelDrivenSwitchRegistration extends AbstractObjectRegistration<ModelDrivenSwitch>
- implements ModelDrivenSwitchRegistration {
+public abstract class AbstractModelDrivenSwitchRegistration
+ extends AbstractObjectRegistration<ModelDrivenSwitch>
+ implements ModelDrivenSwitchRegistration {
protected AbstractModelDrivenSwitchRegistration(final ModelDrivenSwitch instance) {
super(instance);
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
/**
- * interface concatenating all md-sal services provided by OF-switch
+ * interface concatenating all md-sal services provided by OF-switch.
*/
public interface ModelDrivenSwitch
extends
Identifiable<InstanceIdentifier<Node>> {
/**
- * @param rpcProviderRegistry
+ * Register.
+ * @param rpcProviderRegistry rpc provider
* @return wrapped list of {service provider + path} registration couples
*/
ModelDrivenSwitchRegistration register(RpcProviderRegistry rpcProviderRegistry);
/**
+ * Getter.
* @return id of encapsulated node (served by this impl)
*/
NodeId getNodeId();
/**
- * returnes the session context associated with this model-driven switch
+ * returnes the session context associated with this model-driven switch.
*
* @return session context object
*/
SessionContext getSessionContext();
/**
- * Returns whether this *instance* is entity owner or not
+ * Returns whether this *instance* is entity owner or not.
* @return true if it's entity owner, else false.
*/
boolean isEntityOwner();
/**
- * Set entity ownership satus of this switch in *this* instance
- * @param isOwner
+ * Set entity ownership satus of this switch in *this* instance.
+ * @param isOwner is owner
*/
void setEntityOwnership(boolean isOwner);
/**
* Method send port/desc multipart request to the switch to fetch the initial details.
*/
-
- public abstract void requestSwitchDetails();
+ void requestSwitchDetails();
}
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.yangtools.yang.binding.DataObject;
-/**
- * @author mirehak
- */
public interface ConnectionConductor {
- /** distinguished connection states */
- public enum CONDUCTOR_STATE {
- /** initial phase of talking to switch */
+ /** distinguished connection states. */
+ @SuppressWarnings({"checkstyle:abbreviationaswordinname", "checkstyle:typename"})
+ enum CONDUCTOR_STATE {
+ /** initial phase of talking to switch. */
HANDSHAKING,
- /** standard phase - interacting with switch */
+ /** standard phase - interacting with switch. */
WORKING,
- /** connection is idle, waiting for echo reply from switch */
+ /** connection is idle, waiting for echo reply from switch. */
TIMEOUTING,
- /** talking to switch is over - resting in pieces */
+ /** talking to switch is over - resting in pieces. */
RIP
}
- /** supported version ordered by height (highest version is at the beginning) */
- List<Short> versionOrder = Lists.newArrayList((short) 0x04, (short) 0x01);
+ /** supported version ordered by height (highest version is at the beginning). */
+ List<Short> VERSION_ORDER = Lists.newArrayList((short) 0x04, (short) 0x01);
/**
- * initialize wiring around {@link ConnectionAdapter}
+ * initialize wiring around {@link ConnectionAdapter}.
*/
void init();
/**
- * @return the negotiated version
+ * return the negotiated version.
*/
Short getVersion();
/**
- * @return the state of conductor
+ * return the state of conductor.
*/
CONDUCTOR_STATE getConductorState();
/**
- * @param conductorState
+ * Setter.
+ * @param conductorState state
*/
void setConductorState(CONDUCTOR_STATE conductorState);
/**
- * terminates owned connection
+ * terminates owned connection.
* @return future result of disconnect action
*/
Future<Boolean> disconnect();
/**
- * assign corresponding {@link SessionContext} to this conductor (to handle disconnect caused by switch)
- * @param context
+ * assign corresponding {@link SessionContext} to this conductor (to handle disconnect caused by switch).
+ * @param context session context
*/
void setSessionContext(SessionContext context);
/**
- * assign corresponding {@link org.opendaylight.openflowplugin.api.openflow.md.core.SwitchConnectionDistinguisher} to this conductor
- * to handle disconnect caused by switch. This involves auxiliary conductors only.
- * @param auxiliaryKey
+ * assign corresponding {@link org.opendaylight.openflowplugin.api.openflow.md.core.SwitchConnectionDistinguisher}
+ * to this conductor to handle disconnect caused by switch. This involves auxiliary conductors only.
+ * @param auxiliaryKey key
*/
void setConnectionCookie(SwitchConnectionDistinguisher auxiliaryKey);
/**
- * @return the sessionContext
+ * return the sessionContext.
*/
SessionContext getSessionContext();
/**
- * @return the auxiliaryKey (null if this is a primary connection)
+ * return the auxiliaryKey (null if this is a primary connection).
*/
SwitchConnectionDistinguisher getAuxiliaryKey();
/**
- * @return the connectionAdapter
+ * return the connectionAdapter.
*/
ConnectionAdapter getConnectionAdapter();
/**
- * assign global queueKeeper
- * @param queueKeeper
+ * assign global queueKeeper.
+ * @param queueKeeper keeper
*/
void setQueueProcessor(QueueProcessor<OfHeader, DataObject> queueKeeper);
/**
+ * Setter.
* @param errorHandler for internal exception handling
*/
void setErrorHandler(ErrorHandler errorHandler);
/**
- * @param conductorId
+ * Setter.
+ * @param conductorId id
*/
void setId(int conductorId);
import org.opendaylight.openflowplugin.api.openflow.md.core.session.SessionContext;
-/**
- * @author mirehak
- *
- */
public interface ErrorHandler {
/**
- * @param e cause
+ * Exception handler.
+ * @param throwable cause
* @param sessionContext of source
*/
- void handleException(Throwable e, SessionContext sessionContext);
+ void handleException(Throwable throwable, SessionContext sessionContext);
}
import org.opendaylight.openflowplugin.api.openflow.connection.HandshakeContext;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
-/**
- * @author mirehak
- *
- */
public interface HandshakeListener {
/**
+ * Handshake successfull.
* @param featureOutput obtained
* @param version negotiated
*/
void onHandshakeFailure();
/**
- * @param handshakeContext
+ * Setter.
+ * @param handshakeContext context
*/
void setHandshakeContext(HandshakeContext handshakeContext);
}
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.HelloMessage;
-/**
- * @author mirehak
- */
public interface HandshakeManager {
/**
- * @return negotiated version
+ * return negotiated version.
*/
Short getVersion();
/**
+ * Setter.
* @param errorHandler the errorHandler to set
*/
void setErrorHandler(ErrorHandler errorHandler);
/**
+ * Setter.
* @param handshakeListener the handshakeListener to set
*/
void setHandshakeListener(HandshakeListener handshakeListener);
/**
- * @param isBitmapNegotiationEnable
+ * should use negotiation bit map.
+ * @param isBitmapNegotiationEnable yes/no
*/
void setUseVersionBitmap(boolean isBitmapNegotiationEnable);
/**
+ * process current handshake step.
* @param receivedHello message from device we need to act upon
- * process current handshake step
*/
void shake(HelloMessage receivedHello);
}
import org.opendaylight.openflowplugin.api.openflow.md.core.session.SessionContext;
/**
- * translates between messages
+ * translates between messages.
* @param <I> source message type (IN)
* @param <O> result message type (OUT)
*/
/**
* This method is called in order to translate message to MD-SAL or from MD-SAL.
- *
- * @param cookie
- * auxiliary connection identifier
- * @param sc
- * The SessionContext which sent the OF message
- * @param msg
- * The OF message
- *
+ * @param cookie auxiliary connection identifier
+ * @param sc The SessionContext which sent the OF message
+ * @param msg The OF message
* @return translated message
*/
O translate(SwitchConnectionDistinguisher cookie, SessionContext sc, I msg);
-/**
+/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
package org.opendaylight.openflowplugin.api.openflow.md.core;
/**
- * provider of wrapped notification enqueue
+ * provider of wrapped notification enqueue.
*/
public interface NotificationEnqueuer {
/**
- * enqueue given notification into standard message processing queue
- *
- * @param notification
+ * enqueue given notification into standard message processing queue.
+ * @param notification notification
*/
void enqueueNotification(NotificationQueueWrapper notification);
-/**
+/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
import org.opendaylight.yangtools.yang.binding.DataContainer;
import org.opendaylight.yangtools.yang.binding.Notification;
-/**
- *
- */
public class NotificationQueueWrapper implements OfHeader {
-
+
private final Notification notification;
private final Short version;
private Long xid = -1L;
-
+
/**
- * @param notification
- * @param version
+ * Notofication queue wrapper.
+ * @param notification notofication
+ * @param version version
*/
public NotificationQueueWrapper(final Notification notification, final Short version) {
Preconditions.checkArgument(notification != null, "wrapped notification must not be null");
Preconditions.checkArgument(version != null, "message version of wrapped notification must not be null");
- this.notification = notification;
+ this.notification = notification;
this.version = version;
}
}
/**
- * @return the notification
+ * return the notification.
*/
public Notification getNotification() {
return notification;
}
/**
+ * Setter.
* @param xid the xid to set
*/
public void setXid(Long xid) {
package org.opendaylight.openflowplugin.api.openflow.md.core;
-/**
- * @author mirehak
- */
public interface SwitchConnectionDistinguisher {
/**
+ * Getter.
* @return encoded switch session identifier
*/
long getCookie();
*/
package org.opendaylight.openflowplugin.api.openflow.md.core;
-/**
- * @author mirehak
- */
public class TranslatorKey {
- private int version;
- private String messageClass;
+ private final int version;
+ private final String messageClass;
/**
- * @param version
- * @param messageClass
+ * Constructor.
+ * @param version version
+ * @param messageClass message class
*/
public TranslatorKey(int version, String messageClass) {
this.version = version;
} else if (!messageClass.equals(other.messageClass)) {
return false;
}
- if (version != other.version) {
- return false;
- }
- return true;
+ return version == other.version;
}
}
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
/**
- * Common interface for SwitchFeatures builders for different OF versions
- *
- * @author jsebin
- *
+ * Common interface for SwitchFeatures builders for different OF versions.
*/
public interface BuildSwitchFeatures {
/**
- *
- * @param features {@link org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput}
+ * Getter.
+ * @param features
+ * {@link org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput}
* @return {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeatures}
*/
SwitchFeatures build(GetFeaturesOutput features);
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
import org.opendaylight.yangtools.yang.binding.Notification;
/**
+ * Notification composer.
* @param <N> type of notification
*/
public interface NotificationComposer<N extends Notification> {
-
+
/**
+ * Compose.
* @param xid corresponding OF transaction id
* @return notification instance
*/
/**
* Message Dispatch Service to send the message to switch.
- *
- * @author AnilGujele
- *
*/
public interface IMessageDispatchService {
- public static final String CONNECTION_ERROR_MESSAGE = "Session for the cookie is invalid. Reason: "
- + "the switch has been recently disconnected OR inventory provides outdated information.";
+ String CONNECTION_ERROR_MESSAGE = "Session for the cookie is invalid. Reason: "
+ + "the switch has been recently disconnected OR inventory provides outdated information.";
/**
- * send barrier message to switch
+ * send barrier message to switch.
*
* @param input
* - message
Future<RpcResult<BarrierOutput>> barrier(BarrierInput input, SwitchConnectionDistinguisher cookie);
/**
- * send experimenter message to switch
+ * send experimenter message to switch.
*
* @param input
* - message
Future<RpcResult<java.lang.Void>> experimenter(ExperimenterInput input, SwitchConnectionDistinguisher cookie);
/**
- * send flow modification message to switch
+ * send flow modification message to switch.
*
* @param input
* - message
Future<RpcResult<UpdateFlowOutput>> flowMod(FlowModInput input, SwitchConnectionDistinguisher cookie);
/**
- * send get async message to switch
+ * send get async message to switch.
*
* @param input
* - message
Future<RpcResult<GetAsyncOutput>> getAsync(GetAsyncInput input, SwitchConnectionDistinguisher cookie);
/**
- * send get config message to switch
+ * send get config message to switch.
*
* @param input
* - message
Future<RpcResult<GetConfigOutput>> getConfig(GetConfigInput input, SwitchConnectionDistinguisher cookie);
/**
- * send get features message to switch
+ * send get features message to switch.
*
* @param input
* - message
Future<RpcResult<GetFeaturesOutput>> getFeatures(GetFeaturesInput input, SwitchConnectionDistinguisher cookie);
/**
- * send get queue config message to switch
+ * send get queue config message to switch.
*
* @param input
* - message
SwitchConnectionDistinguisher cookie);
/**
- * send group modification message to switch
+ * send group modification message to switch.
*
* @param input
* - message
Future<RpcResult<UpdateGroupOutput>> groupMod(GroupModInput input, SwitchConnectionDistinguisher cookie);
/**
- * send meter modification message to switch
+ * send meter modification message to switch.
*
* @param input
* - message
Future<RpcResult<UpdateMeterOutput>> meterMod(MeterModInput input, SwitchConnectionDistinguisher cookie);
/**
- * send multipart request message to switch
+ * send multipart request message to switch.
*
* @param input
* - multipart request message
* any connection
* @return - the future
*/
- Future<RpcResult<java.lang.Void>> multipartRequest(MultipartRequestInput input, SwitchConnectionDistinguisher cookie);
+ Future<RpcResult<java.lang.Void>> multipartRequest(
+ MultipartRequestInput input,
+ SwitchConnectionDistinguisher cookie);
/**
- * send packet out message to switch
+ * send packet out message to switch.
*
* @param input
* - message
Future<RpcResult<java.lang.Void>> packetOut(PacketOutInput input, SwitchConnectionDistinguisher cookie);
/**
- * send port modification message to switch
+ * send port modification message to switch.
*
* @param input
* - message
Future<RpcResult<UpdatePortOutput>> portMod(PortModInput input, SwitchConnectionDistinguisher cookie);
/**
- * send role request message to switch
+ * send role request message to switch.
*
* @param input
* - message
Future<RpcResult<RoleRequestOutput>> roleRequest(RoleRequestInput input, SwitchConnectionDistinguisher cookie);
/**
- * send set async message to switch
+ * send set async message to switch.
*
* @param input
* - message
Future<RpcResult<java.lang.Void>> setAsync(SetAsyncInput input, SwitchConnectionDistinguisher cookie);
/**
- * send set config message to switch
+ * send set config message to switch.
*
* @param input
* - message
Future<RpcResult<java.lang.Void>> setConfig(SetConfigInput input, SwitchConnectionDistinguisher cookie);
/**
- * send table modification message to switch
+ * send table modification message to switch.
*
* @param input
* - message
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.PortGrouping;
-/**
- * @author mirehak
- */
public interface SessionContext {
/**
- * @return primary connection wrapper
+ * return primary connection wrapper.
*/
ConnectionConductor getPrimaryConductor();
/**
- * @return the features of corresponding switch
+ * return the features of corresponding switch.
*/
GetFeaturesOutput getFeatures();
/**
+ * Auxiliary connections.
* @param auxiliaryKey key under which the auxiliary conductor is stored
* @return list of auxiliary connection wrappers
*/
SwitchConnectionDistinguisher auxiliaryKey);
/**
- * @return entries of all auxiliary connections wrapped in conductors in this session
+ * return entries of all auxiliary connections wrapped in conductors in this session.
*/
Set<Entry<SwitchConnectionDistinguisher, ConnectionConductor>> getAuxiliaryConductors();
/**
- * register new auxiliary connection wrapped in {@link ConnectionConductor}
+ * register new auxiliary connection wrapped in {@link ConnectionConductor}.
*
- * @param auxiliaryKey
- * @param conductor
+ * @param auxiliaryKey key
+ * @param conductor connection conductor
*/
void addAuxiliaryConductor(SwitchConnectionDistinguisher auxiliaryKey,
ConnectionConductor conductor);
/**
- * @param connectionCookie
+ * Remove conductor.
+ * @param connectionCookie cookie
* @return removed connectionConductor
*/
ConnectionConductor removeAuxiliaryConductor(
SwitchConnectionDistinguisher connectionCookie);
/**
- * @return true if this session is valid
+ * return true if this session is valid.
*/
boolean isValid();
/**
+ * Setter.
* @param valid the valid to set
*/
void setValid(boolean valid);
/**
- * @return the sessionKey
+ * return the sessionKey.
*/
SwitchSessionKeyOF getSessionKey();
Boolean getPortBandwidth(Long portNumber);
/**
- * Returns True if the port is enabled,
+ * Returns True if the port is enabled.
*
- * @param portNumber
+ * @param portNumber port number
* @return True if the port is enabled
*/
boolean isPortEnabled(long portNumber);
/**
* Returns True if the port is enabled.
*
- * @param port
+ * @param port port
* @return True if the port is enabled
*/
boolean isPortEnabled(PortGrouping port);
// TODO:: add listeners here, manager will set them and conductor use them
/**
- * get message dispatch service to send the message to switch
+ * get message dispatch service to send the message to switch.
*
* @return the message service
*/
IMessageDispatchService getMessageDispatchService();
/**
- * @return the unique xid for this session
+ * return the unique xid for this session.
+ * @return xid
*/
Long getNextXid();
/**
+ * Setter.
* @param registration provider composite registration
*/
void setProviderRegistration(ModelDrivenSwitchRegistration registration);
/**
- * @return provider composite registration
+ * return provider composite registration.
+ * @return ModelDrivenSwitchRegistration
*/
ModelDrivenSwitchRegistration getProviderRegistration();
/**
- * @return seed value for random operations
+ * return seed value for random operations.
+ * @return int
*/
int getSeed();
/**
- * @return (wrapped) notification enqueue service - {@link NotificationQueueWrapper}
+ * return (wrapped) notification enqueue service - {@link NotificationQueueWrapper}.
+ * @return NotificationEnqueuer
*/
NotificationEnqueuer getNotificationEnqueuer();
/**
- * @param roleOnDevice
+ * Setter.
+ * @param roleOnDevice role
*/
void setRoleOnDevice(ControllerRole roleOnDevice);
/**
- * @return actual role
+ * return actual role.
+ * @return role
*/
ControllerRole getRoleOnDevice();
}
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import java.util.EventListener;
/**
- * listens on session changes
+ * listens on session changes.
*/
public interface SessionListener extends EventListener {
/**
- * fired upon session added
- * @param sessionKey
- * @param context
+ * fired upon session added.
+ * @param sessionKey session key
+ * @param context context
*/
void onSessionAdded(SwitchSessionKeyOF sessionKey, SessionContext context);
/**
- * fired upon session removed
- * @param context
+ * fired upon session removed.
+ * @param context context
*/
void onSessionRemoved(SessionContext context);
+
void setRole(SessionContext context);
}
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.yangtools.yang.binding.DataContainer;
import org.opendaylight.yangtools.yang.binding.DataObject;
-/**
- * @author mirehak
- */
public interface SessionManager extends AutoCloseable {
/**
- * @param sessionKey
+ * primary connection.
+ * @param sessionKey session key
* @return corresponding conductor, holding {@link ConnectionAdapter} to
- * primary connection
*/
- public SessionContext getSessionContext(SwitchSessionKeyOF sessionKey);
+ SessionContext getSessionContext(SwitchSessionKeyOF sessionKey);
/**
- * disconnect all underlying {@link ConnectionAdapter}s and notify listeners
+ * disconnect all underlying {@link ConnectionAdapter}s and notify listeners.
*
- * @param sessionKey
+ * @param sessionKey session key
*/
- public void invalidateSessionContext(SwitchSessionKeyOF sessionKey);
+ void invalidateSessionContext(SwitchSessionKeyOF sessionKey);
/**
- * register session context
+ * register session context.
*
- * @param sessionKey
- * @param context
+ * @param sessionKey session key
+ * @param context context
*/
- public void addSessionContext(SwitchSessionKeyOF sessionKey, SessionContext context);
- public void setRole(SessionContext context);
+ void addSessionContext(SwitchSessionKeyOF sessionKey, SessionContext context);
+
+ void setRole(SessionContext context);
/**
* disconnect particular auxiliary {@link ConnectionAdapter}, identified by
- * sessionKey and connectionCookie
+ * sessionKey and connectionCookie.
*
- * @param sessionKey
- * @param connectionCookie
+ * @param sessionKey session key
+ * @param connectionCookie cookie
*/
- public void invalidateAuxiliary(SwitchSessionKeyOF sessionKey,
+ void invalidateAuxiliary(SwitchSessionKeyOF sessionKey,
SwitchConnectionDistinguisher connectionCookie);
/**
- * @param connectionConductor
+ * Invalidate on disconnect.
+ * @param connectionConductor connection conductor.
*/
- public void invalidateOnDisconnect(ConnectionConductor connectionConductor);
+ void invalidateOnDisconnect(ConnectionConductor connectionConductor);
/**
- * @param translatorMapping
+ * Setter.
+ * @param translatorMapping translators
*/
- public void setTranslatorMapping(Map<TranslatorKey, Collection<IMDMessageTranslator<OfHeader, List<DataObject>>>> translatorMapping);
+ void setTranslatorMapping(
+ Map<TranslatorKey, Collection<IMDMessageTranslator<OfHeader, List<DataObject>>>> translatorMapping);
/**
+ * Getter.
* @return translator mapping
*/
- public Map<TranslatorKey, Collection<IMDMessageTranslator<OfHeader, List<DataObject>>>> getTranslatorMapping();
+ Map<TranslatorKey, Collection<IMDMessageTranslator<OfHeader, List<DataObject>>>> getTranslatorMapping();
/**
- * @param notificationProviderService
+ * Setter.
+ * @param notificationProviderService notofication provider
*/
- public void setNotificationProviderService(NotificationProviderService notificationProviderService);
+ void setNotificationProviderService(NotificationProviderService notificationProviderService);
/**
+ * Getter.
* @return notificationServiceProvider
*/
- public DataBroker getDataBroker();
+ DataBroker getDataBroker();
/**
- * @param dataBroker
+ * Setter.
+ * @param dataBroker databroker
*/
- public void setDataBroker(DataBroker dataBroker);
+ void setDataBroker(DataBroker dataBroker);
/**
+ * Gatter.
* @return notificationServiceProvider
*/
- public NotificationProviderService getNotificationProviderService();
+ NotificationProviderService getNotificationProviderService();
/**
- * @param listener
+ * Session listener registration.
+ * @param listener listener
* @return registration
*/
- public ListenerRegistration<SessionListener> registerSessionListener(SessionListener listener);
+ ListenerRegistration<SessionListener> registerSessionListener(SessionListener listener);
/**
+ * Getter.
* @return popListener mapping, key=message type; value=collection of listeners
*/
- public Map<Class<? extends DataObject>, Collection<PopListener<DataObject>>> getPopListenerMapping();
+ Map<Class<? extends DataObject>, Collection<PopListener<DataObject>>> getPopListenerMapping();
/**
+ * Setter.
* @param popListenerMapping the popListenerMapping to set
*/
- void setPopListenerMapping(Map<Class<? extends DataObject>, Collection<PopListener<DataObject>>> popListenerMapping);
+ void setPopListenerMapping(
+ Map<Class<? extends DataObject>, Collection<PopListener<DataObject>>> popListenerMapping);
/**
- * @param rpcPoolDelegator
+ * Setter.
+ * @param rpcPoolDelegator rpc pool delegator
*/
void setRpcPool(ListeningExecutorService rpcPoolDelegator);
/**
+ * Getter.
* @return the rpcPool instance
*/
ListeningExecutorService getRpcPool();
/**
- * @param messageSpy
+ * Setter.
+ * @param messageSpy message spy
*/
void setMessageSpy(MessageSpy<DataContainer> messageSpy);
/**
+ * Getter.
* @return the messageSpy
*/
MessageSpy<DataContainer> getMessageSpy();
/**
+ * Getter.
* @return collection of current sessions
*/
Collection<SessionContext> getAllSessions();
import java.math.BigInteger;
-/**
- * @author mirehak
- */
public class SwitchSessionKeyOF {
private BigInteger datapathId;
/**
- * default ctor
+ * default ctor.
*/
public SwitchSessionKeyOF() {
// NOOP
}
-
+
/**
+ * Setter.
* @param datapathId the datapathId to set
*/
public void setDatapathId(BigInteger datapathId) {
this.datapathId = datapathId;
}
-
+
/**
+ * Getter.
* @return the datapathId
*/
public byte[] getId() {
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.OfHeader;
/**
+ * Enqueuer.
* @param <I> type of queue items (IN)
*/
public interface Enqueuer<I> {
/**
+ * item to be enqueued.
* @param queueItem item to be enqueued
*/
void enqueueQueueItem(I queueItem);
/**
- * @param queueItem
+ * for testing and comparing purposes - this strategy blocks netty threads.
+ * @param queueItem item
* @deprecated for testing and comparing purposes - this strategy blocks netty threads
*/
@Deprecated
-/**
+/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
package org.opendaylight.openflowplugin.api.openflow.md.queue;
/**
- * message harvester simple control
+ * message harvester simple control.
*/
public interface HarvesterHandle {
/**
- * wakeup harvester in case it is in phase of starving sleep
+ * wakeup harvester in case it is in phase of starving sleep.
*/
void ping();
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import java.util.Collection;
/**
+ * MessageSourcePollRegistrator.
* @param <I> message wrapping type (IN)
*
*/
public interface MessageSourcePollRegistrator<I> {
/**
+ * Message source to read from during processing.
* @param messageSource to read from during processing
* @return closeable registration
*/
AutoCloseable registerMessageSource(I messageSource);
/**
+ * Unregister message source.
* @param messageSource to be unregistered
* @return true if successfully unregistered
*/
boolean unregisterMessageSource(I messageSource);
/**
+ * Getter.
* @return collection of registered message sources
*/
Collection<I> getMessageSources();
/**
+ * Getter.
* @return the harvest handle
*/
HarvesterHandle getHarvesterHandle();
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
/**
- * @author mirehak
+ * PopListener.
* @param <T> result type
*
*/
public interface PopListener<T> {
-
- /**
- * @param processedMessage
- */
void onPop(T processedMessage);
-
}
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
import org.opendaylight.openflowplugin.api.openflow.md.core.ConnectionConductor;
/**
+ * Queue item.
* @param <I> input message type (IN)
*/
public interface QueueItem<I> {
/**
+ * Getter.
* @return wrapped message
*/
I getMessage();
/**
+ * Getter.
* @return conductor the message arrived to
*/
ConnectionConductor getConnectionConductor();
/**
+ * Getter.
* @return queue type associated to this item
*/
QueueKeeper.QueueType getQueueType();
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* This processing mechanism based on queue. Processing consists of 2 steps: translate and publish.
* Proposed workflow (might slightly deviate in implementations):
* <ol>
- * <li>messages of input type are pushed in (via {@link QueueKeeper#push(Object, ConnectionConductor, QueueType)} and similar)</li>
+ * <li>messages of input type are pushed in (via {@link QueueKeeper} and similar)</li>
* <li>ticket (executable task) is build upon each pushed message and enqueued</li>
* <li>ticket is translated using appropriate translator</li>
* <li>ticket is dequeued and result is published by appropriate popListener</li>
*/
public interface QueueKeeper<I> extends AutoCloseable {
- /** type of message enqueue */
- public enum QueueType {
- /** ordered processing */
+ /** type of message enqueue. */
+ enum QueueType {
+ /** ordered processing. */
DEFAULT,
- /** unordered processing - bypass fair processing */
- UNORDERED}
+ /** unordered processing - bypass fair processing. */
+ UNORDERED
+ }
/**
- * enqueue message for processing
- * @param message
+ * enqueue message for processing.
+ * @param message message
* @param conductor source of message
* @param queueType - {@link QueueType#DEFAULT} if message order matters, {@link QueueType#UNORDERED} otherwise
*/
void push(I message, ConnectionConductor conductor, QueueType queueType);
/**
- * @return oldest item from queue - if available and remove it from queue
+ * oldest item from queue - if available and remove it from queue.
+ * @return oldest item from queue
*/
QueueItem<I> poll();
/**
+ * Setter.
* @param processingRegistration the processingRegistration to set (in order to provide close method)
*/
void setPollRegistration(AutoCloseable processingRegistration);
-/**
+/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* This processing mechanism based on queue. Processing consists of 2 steps: translate and publish.
* Proposed workflow (might slightly deviate in implementations):
* <ol>
- * <li>messages of input type are pushed in (via {@link QueueKeeper#push(Object, org.opendaylight.openflowplugin.api.openflow.md.core.ConnectionConductor, org.opendaylight.openflowplugin.api.openflow.md.queue.QueueKeeper.QueueType)} and similar)</li>
+ * <li>messages of input type are pushed in (via {@link QueueKeeper and similar})</li>
* <li>ticket (executable task) is build upon each pushed message and enqueued</li>
* <li>ticket is translated using appropriate translator</li>
* <li>ticket is dequeued and result is published by appropriate popListener</li>
public interface QueueProcessor<I, O> extends MessageSourcePollRegistrator<QueueKeeper<I>>, Enqueuer<QueueItem<I>> {
/**
+ * translators for message processing.
* @param translatorMapping translators for message processing
*/
void setTranslatorMapping(Map<TranslatorKey, Collection<IMDMessageTranslator<I, List<O>>>> translatorMapping);
/**
+ * listeners invoked when processing done.
* @param popListenersMapping listeners invoked when processing done
*/
void setPopListenersMapping(Map<Class<? extends O>, Collection<PopListener<O>>> popListenersMapping);
public interface WaterMarkListener {
/**
- * When HighWaterMark reached and currently not flooded
+ * When HighWaterMark reached and currently not flooded.
*/
void onHighWaterMark();
/**
- * When LowWaterMark reached and currently flooded
+ * When LowWaterMark reached and currently flooded.
*/
void onLowWaterMark();
}
package org.opendaylight.openflowplugin.api.openflow.md.util;
/**
- * @deprecated enum in api is not something what we would like to see in case it is evolving
- * TODO: remove class for lithium release
- *
- * List of Openflow versions supported by the plugin
- * Note: If you add a version here, make sure to update org.opendaylight.openflowplugin.openflow.md.util.OpenflowPortsUtil as well.
- * Created by kramesha on 5/2/14.
+ * List of Openflow versions supported by the plugin.
+ * Note: If you add a version here,
+ * make sure to update org.opendaylight.openflowplugin.openflow.md.util.OpenflowPortsUtil as well.
+ * @deprecated enum in api is not something what we would like to see in case it is evolving.
*/
public enum OpenflowVersion {
}
/**
+ * Getter.
* @return the version
*/
public short getVersion() {
-/**
+/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
* still null result, poll will return null. <br>
* Iterating keeps last position so this polling is supposed to be fairly
* distributed.
- *
+ *
* @param <T> common item type of zipped queues
*/
public class PollableQueuesPriorityZipper<T> {
private Queue<T> prioritizedSource;
private PollableQueuesZipper<T> zipper;
- /**
- * default ctor
- */
public PollableQueuesPriorityZipper() {
zipper = new PollableQueuesZipper<>();
}
/**
- * Add all member queues before first invocation of
- * {@link PollableQueuesPriorityZipper#poll()}
- *
- * @param queue
- * to be added to group
+ * Add all member queues before first invocation of {@link PollableQueuesPriorityZipper#poll()}.
+ * @param queue to be added to group
*/
public void addSource(Queue<T> queue) {
zipper.addSource(queue);
}
/**
+ * Next common product.
* @return next common product of polling member groups
*/
public T poll() {
-/**
+/*
* Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
+ *
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
/**
* Zipper groups together a list of queues and exposes one poll method. Polling iterates through
- * all groups and returns first not-null result of poll method on each queue. If after polling each
- * grouped queue for one time there is still null result, poll will return null.
+ * all groups and returns first not-null result of poll method on each queue. If after polling each
+ * grouped queue for one time there is still null result, poll will return null.
* <br>
* Iterating keeps last position so this polling is supposed to be fairly distributed.
- *
+ *
* @param <T> common item type of zipped queues
*/
public class PollableQueuesZipper<T> {
-
+
private List<Queue<T>> sources;
private Iterator<Queue<T>> cursor;
-
- /**
- * default ctor
- */
+
public PollableQueuesZipper() {
sources = new ArrayList<>();
}
-
+
/**
- * Add all member queues before first invocation of {@link PollableQueuesZipper#poll()}
+ * Add all member queues before first invocation of {@link PollableQueuesZipper#poll()}.
* @param queue to be added to group
*/
public void addSource(Queue<T> queue) {
}
/**
+ * Next common product.
* @return next common product of polling member groups
*/
public T poll() {
if (cursor == null) {
cursor = Iterators.cycle(sources);
}
-
+
Queue<T> queue;
for (int i = 0; i < sources.size(); i++) {
queue = cursor.next();
break;
}
}
-
+
return item;
}
}
--- /dev/null
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.api.openflow.registry;
+
+import java.util.function.Consumer;
+
+public interface CommonDeviceRegistry<KEY> extends AutoCloseable {
+
+ /**
+ * Store KEY in device registry.
+ * @param key device registry key
+ */
+ void store(KEY key);
+
+ /**
+ * Add mark for specified KEY.
+ * @param key device registry key
+ */
+ void addMark(KEY key);
+
+ /**
+ * Process marked keys.
+ */
+ void processMarks();
+
+ /**
+ * Iterate over all keys in device registry.
+ * @param consumer key consumer
+ */
+ void forEach(Consumer<KEY> consumer);
+
+ /**
+ * Get device registry size.
+ * @return device registry size
+ */
+ int size();
+
+ @Override
+ void close();
+
+}
import com.google.common.base.Optional;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
-import java.util.Map;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.api.openflow.registry.CommonDeviceRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
/**
* Registry for mapping composite-key of flow ({@link FlowRegistryKey}) from device view
* to flow descriptor ({@link FlowDescriptor}) as the identifier of the same flow in data store.
*/
-public interface DeviceFlowRegistry extends AutoCloseable {
+public interface DeviceFlowRegistry extends CommonDeviceRegistry<FlowRegistryKey> {
ListenableFuture<List<Optional<FlowCapableNode>>> fill();
- FlowDescriptor retrieveIdForFlow(FlowRegistryKey flowRegistryKey);
+ void storeDescriptor(@Nonnull FlowRegistryKey flowRegistryKey, @Nonnull FlowDescriptor flowDescriptor);
- void store(FlowRegistryKey flowRegistryKey, FlowDescriptor flowDescriptor);
+ @Nullable
+ FlowDescriptor retrieveDescriptor(@Nonnull FlowRegistryKey flowRegistryKey);
- FlowId storeIfNecessary(FlowRegistryKey flowRegistryKey);
-
- void removeDescriptor(FlowRegistryKey flowRegistryKey);
-
- void update(FlowRegistryKey newFlowRegistryKey,FlowDescriptor flowDescriptor);
-
- Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors();
-
- @Override
- void close();
-}
\ No newline at end of file
+}
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 9.4.2015.
- */
public interface FlowDescriptor {
FlowId getFlowId();
TableKey getTableKey();
+
}
package org.opendaylight.openflowplugin.api.openflow.registry.group;
-import java.util.List;
+import org.opendaylight.openflowplugin.api.openflow.registry.CommonDeviceRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 15.4.2015.
- */
-public interface DeviceGroupRegistry extends AutoCloseable {
-
- void store(GroupId groupId);
-
- void markToBeremoved(GroupId groupId);
-
- void removeMarked();
-
- List<GroupId> getAllGroupIds();
+public interface DeviceGroupRegistry extends CommonDeviceRegistry<GroupId> {
- @Override
- void close();
}
package org.opendaylight.openflowplugin.api.openflow.registry.meter;
-import java.util.List;
+import org.opendaylight.openflowplugin.api.openflow.registry.CommonDeviceRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 15.4.2015.
- */
-public interface DeviceMeterRegistry extends AutoCloseable {
-
- void store(MeterId meterId);
-
- void markToBeremoved(MeterId meterId);
-
- void removeMarked();
-
- List<MeterId> getAllMeterIds();
-
- @Override
- void close();
+public interface DeviceMeterRegistry extends CommonDeviceRegistry<MeterId> {
}
import org.opendaylight.openflowplugin.api.openflow.OFPManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceInfo;
-import org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler;
/**
* The RPC Manager will maintain an RPC Context for each online switch. RPC context for device is created when
- * {@link DeviceInitializationPhaseHandler#onDeviceContextLevelUp(DeviceInfo, org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleService)}
+ * {@link org.opendaylight.openflowplugin.api.openflow.device.handlers.DeviceInitializationPhaseHandler#onDeviceContextLevelUp(DeviceInfo, org.opendaylight.openflowplugin.api.openflow.lifecycle.LifecycleService)}
* is called.
*/
public interface RpcManager extends OFPManager {
void setStatisticsRpcEnabled(boolean statisticsRpcEnabled);
+
+ void setRpcRequestQuota(int rpcRequestQuota);
+
}
container openflow-provider-config {
leaf rpc-requests-quota {
- type uint32;
+ description "Quota for maximum number of RPC requests";
+ type non-zero-uint16-type;
default 20000;
}
leaf switch-features-mandatory {
+ description "This parameter indicates whether it is mandatory for
+ switch to support OF1.3 features : table, flow, meter,group. If this
+ is set to true and switch doesn't support these features its
+ connection will be denied.";
type boolean;
default false;
}
leaf global-notification-quota {
+ description "Global notification quota";
type uint32;
default 64000;
}
leaf is-statistics-polling-on {
+ description "If enabled, periodic statistics gathering will be
+ turned on";
type boolean;
default "true";
}
leaf is-statistics-rpc-enabled {
- description "Deprecated - exposing backward compatible statistics rpcs providing result in form of async notification";
+ status deprecated;
+ description "Expose backward compatible statistics rpcs providing
+ result in form of async notification. This is deprecated, use direct
+ statistics instead.";
type boolean;
default "false";
}
leaf barrier-interval-timeout-limit {
+ description "Barrier timeout";
type non-zero-uint32-type;
default 500;
}
leaf barrier-count-limit {
+ description "Barrier limit";
type non-zero-uint16-type;
default 25600;
}
leaf echo-reply-timeout {
+ description "How long we should wait for echo reply (value is in
+ milliseconds";
type non-zero-uint32-type;
default 2000;
}
}
leaf thread-pool-timeout {
- description "After how much time (in seconds) of inactivity will be threads in pool terminated";
+ description "After how much time (in seconds) of inactivity will be
+ threads in pool terminated";
type uint32;
default 60;
}
}
leaf skip-table-features {
- description "Ability to skip pulling and storing of large table features. These features are still
- available via rpc but if set to true then maintenance in DS will be omitted";
+ description "Ability to skip pulling and storing of large table
+ features. These features are still available via rpc but if set to
+ true then maintenance in DS will be omitted";
type boolean;
default "true";
}
leaf basic-timer-delay {
- description "initial delay used in polling the statistics, value is in milliseconds";
+ description "Initial delay used in polling the statistics, value is
+ in milliseconds";
type non-zero-uint32-type;
default 3000;
}
leaf maximum-timer-delay {
- description "maximum timer delay is the wait time to collect next statistics
- used in polling the statistics, value is in milliseconds";
+ description "Maximum timer delay is the wait time to collect next
+ statistics used in polling the statistics, value is in
+ milliseconds";
type non-zero-uint32-type;
default 900000;
}
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
-#All config parameters listed herein
-#require to be configured before switch connection, else requires a switch restart
-#All parameters here will be persisted, however need to change it explicitly for
-#each node of the cluster.
-#Flag to turn flow removed notification on/off.
-#Flow removed notification is turned on by default. Default value true
-enable-flow-removed-notification=true
-#Ability to skip pulling and storing of large table features. These features are still
-#available via rpc but if set to true then maintenance in DS will be omitted
-#Turned off by default. Default value true
-skip-table-features=true
-#Flag exposing backward compatible statistics rpcs providing result in form of async
-#notification.Usage deprecated as of Bo upwards.
-#Turned off by default. Default value false
-is-statistics-rpc-enabled=false
-#Timeout interval in milliseconds between each barrier message.
-#Default value is set to 500 milliseconds
-barrier-interval-timeout-limit=500
-#Maximum outbound queue depth
-#Default value is set to 25600
-barrier-count-limit=25600
-#Echo reply timeout specified on the controller side beyond which the connection is deemed dead
-#Default value is set to 2000 milliseconds
-echo-reply-timeout=2000
-#flag to turn on/off statistics polling
-#can be changed on demand
-#Turned on by default. Default value is true
-is-statistics-polling-on=true
-#Default value of basic-timer-delay is 3000
-#basic timer delay is the initial delay used in polling the statistics, value is in milliseconds
-basic-timer-delay=3000
-#Default value of maximum-timer-delay is 900000
-#maximum timer delay is the wait time to collect next statistics
-#used in polling the statistics, value is in milliseconds
-maximum-timer-delay=900000
+# vim:set ft=jproperties:
+################################################################################
+# OpenFlowPlugin configuration file
+#
+# All config parameters listed here require to be configured before switch
+# connection, else requires a switch restart.
+# All parameters here will be persisted, however need to change it explicitly
+# for each node of the cluster.
+################################################################################
+
+#
+# Quota for maximum number of RPC requests
+#
+# rpc-requests-quota=20000
+
+#
+# This parameter indicates whether it is mandatory for switch to support OF1.3
+# features : table, flow, meter,group. If this is set to true and switch doesn't
+# support these features its connection will be denied.
+#
+# switch-features-mandatory=false
+
+#
+# Global notification quota
+#
+# global-notification-quota=64000
+
+#
+# If enabled, periodic statistics gathering will be turned on
+#
+# is-statistics-polling-on=true
+
+#
+# Expose backward compatible statistics RPCs providing result in form of
+# asynchronous notification. This is deprecated, use direct statistics instead.
+#
+# is-statistics-rpc-enabled=false
+
+#
+# Barrier timeout
+#
+# barrier-interval-timeout-limit=500
+
+#
+# Barrier limit
+#
+# barrier-count-limit=25600
+
+#
+# How long we should wait for echo reply (value is in milliseconds)
+#
+# echo-reply-timeout=2000
+
+#
+# Minimum (starting) number of threads in thread pool
+#
+# thread-pool-min-threads=1
+
+#
+# Maximum number of threads in thread pool
+#
+# thread-pool-max-threads=32000
+
+#
+# After how much time (in seconds) of inactivity will be threads in pool
+# terminated
+#
+# thread-pool-timeout=60
+
+#
+# Turning on flow removed notification
+#
+# enable-flow-removed-notification=true
+
+#
+# Ability to skip pulling and storing of large table features. These features
+# are still available via rpc but if set to true then maintenance in DS will be
+# omitted
+#
+# skip-table-features=true
+
+#
+# Initial delay used in polling the statistics, value is in milliseconds
+#
+# basic-timer-delay=3000
+
+#
+# Maximum timer delay is the wait time to collect next statistics used in
+# polling the statistics, value is in milliseconds
+#
+# maximum-timer-delay=900000
+
+
+
+#############################################################################
+# #
+# Forwarding Rule Manager Application Configuration #
+# #
+#############################################################################
+
+# Disable the default switch reconciliation mechanism
+# disable-reconciliation=false
+
+# Enable stale marking for switch reconciliation. Once user enable this feature
+# forwarding rule manager will keep track of any change to the config data store
+# while the switch is disconnected from controller. Once switch reconnect to the
+# controller it will apply those changes to the switch and do the reconciliation
+# of other configuration as well.
+# NOTE: This option will be effective only if disable_reconciliation=false.
+# stale-marking-enabled=false
+
+# Number of time forwarding rules manager should retry to reconcile any specific
+# configuration.
+# reconciliation-retry-count=5
\ No newline at end of file
<reference id="openflowPluginProviderFactory"
interface="org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginProviderFactory"/>
- <bean id="openflowPluginProvider" factory-ref="openflowPluginProviderFactory" factory-method="newInstance">
- <argument ref="openflowProviderConfig"/>
- <argument ref="dataBroker"/>
- <argument ref="rpcRegistry"/>
- <argument ref="notificationService"/>
- <argument ref="notificationPublishService"/>
- <argument ref="entityOwnershipService"/>
- <argument>
- <list>
- <ref component-id="defaultSwitchConnProvider"/>
- <ref component-id="legacySwitchConnProvider"/>
- </list>
- </argument>
- <cm:managed-properties persistent-id="org.opendaylight.openflowplugin"
- update-strategy="component-managed"
- update-method="update"/>
- <argument ref="clusterSingletonServiceProvider"/>
- </bean>
+ <bean id="openflowPluginProvider" factory-ref="openflowPluginProviderFactory" factory-method="newInstance">
+ <argument ref="openflowProviderConfig"/>
+ <argument ref="dataBroker"/>
+ <argument ref="rpcRegistry"/>
+ <argument ref="notificationService"/>
+ <argument ref="notificationPublishService"/>
+ <argument ref="entityOwnershipService"/>
+ <argument>
+ <list>
+ <ref component-id="defaultSwitchConnProvider"/>
+ <ref component-id="legacySwitchConnProvider"/>
+ </list>
+ </argument>
+ <argument ref="clusterSingletonServiceProvider"/>
+ <argument ref="blueprintBundleContext" />
+ <cm:managed-properties persistent-id="org.opendaylight.openflowplugin"
+ update-strategy="component-managed"
+ update-method="update"/>
+ </bean>
<service ref="openflowPluginProvider" odl:type="openflow-plugin-provider-impl">
<interfaces>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-common-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.compendium</artifactId>
+ </dependency>
</dependencies>
</project>
/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others. All rights reserved.
+ * Copyright (c) 2016, 2017 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
*/
package org.opendaylight.openflowplugin.impl;
+import java.io.IOException;
+import java.util.Enumeration;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Optional;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.api.NotificationService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowjava.protocol.spi.connection.SwitchConnectionProvider;
+import org.opendaylight.openflowplugin.api.OFConstants;
+import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginConfigurationService.PropertyType;
import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginProvider;
import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginProviderFactory;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflow.provider.config.rev160510.OpenflowProviderConfig;
+import org.osgi.framework.BundleContext;
+import org.osgi.service.cm.Configuration;
+import org.osgi.service.cm.ConfigurationAdmin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(OpenFlowPluginProviderFactoryImpl.class);
@Override
- public OpenFlowPluginProvider newInstance(OpenflowProviderConfig providerConfig,
- DataBroker dataBroker,
- RpcProviderRegistry rpcRegistry,
- NotificationService notificationService,
- NotificationPublishService notificationPublishService,
- EntityOwnershipService entityOwnershipService,
- List<SwitchConnectionProvider> switchConnectionProviders,
- ClusterSingletonServiceProvider singletonServiceProvider) {
+ public OpenFlowPluginProvider newInstance(final OpenflowProviderConfig providerConfig,
+ final DataBroker dataBroker,
+ final RpcProviderRegistry rpcRegistry,
+ final NotificationService notificationService,
+ final NotificationPublishService notificationPublishService,
+ final EntityOwnershipService entityOwnershipService,
+ final List<SwitchConnectionProvider> switchConnectionProviders,
+ final ClusterSingletonServiceProvider singletonServiceProvider,
+ final BundleContext bundleContext) {
LOG.info("Initializing new OFP southbound.");
- OpenFlowPluginProvider openflowPluginProvider = new OpenFlowPluginProviderImpl(
- providerConfig.getRpcRequestsQuota(),
- providerConfig.getGlobalNotificationQuota(),
- providerConfig.getThreadPoolMinThreads(),
- providerConfig.getThreadPoolMaxThreads().getValue(),
- providerConfig.getThreadPoolTimeout());
+ final OpenFlowPluginProviderImpl openflowPluginProvider = new OpenFlowPluginProviderImpl(
+ switchConnectionProviders,
+ dataBroker,
+ rpcRegistry,
+ notificationService,
+ notificationPublishService,
+ singletonServiceProvider,
+ entityOwnershipService);
- openflowPluginProvider.setSwitchConnectionProviders(switchConnectionProviders);
- openflowPluginProvider.setDataBroker(dataBroker);
- openflowPluginProvider.setRpcProviderRegistry(rpcRegistry);
- openflowPluginProvider.setNotificationProviderService(notificationService);
- openflowPluginProvider.setNotificationPublishService(notificationPublishService);
- openflowPluginProvider.setSwitchFeaturesMandatory(providerConfig.isSwitchFeaturesMandatory());
- openflowPluginProvider.setFlowRemovedNotification(providerConfig.isEnableFlowRemovedNotification());
- openflowPluginProvider.setIsStatisticsRpcEnabled(providerConfig.isIsStatisticsRpcEnabled());
- openflowPluginProvider.setBarrierCountLimit(providerConfig.getBarrierCountLimit().getValue());
- openflowPluginProvider.setBarrierInterval(providerConfig.getBarrierIntervalTimeoutLimit().getValue());
- openflowPluginProvider.setEchoReplyTimeout(providerConfig.getEchoReplyTimeout().getValue());
- openflowPluginProvider.setStatisticsPollingOn(providerConfig.isIsStatisticsPollingOn());
- openflowPluginProvider.setClusteringSingletonServicesProvider(singletonServiceProvider);
- openflowPluginProvider.setSkipTableFeatures(providerConfig.isSkipTableFeatures());
- openflowPluginProvider.setBasicTimerDelay(providerConfig.getBasicTimerDelay().getValue());
- openflowPluginProvider.setMaximumTimerDelay(providerConfig.getMaximumTimerDelay().getValue());
+ LOG.info("Loading configuration from YANG file");
+ openflowPluginProvider.updateProperty(PropertyType.RPC_REQUESTS_QUOTA, providerConfig.getRpcRequestsQuota().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.GLOBAL_NOTIFICATION_QUOTA, providerConfig.getGlobalNotificationQuota());
+ openflowPluginProvider.updateProperty(PropertyType.SWITCH_FEATURES_MANDATORY, providerConfig.isSwitchFeaturesMandatory());
+ openflowPluginProvider.updateProperty(PropertyType.ENABLE_FLOW_REMOVED_NOTIFICATION, providerConfig.isEnableFlowRemovedNotification());
+ openflowPluginProvider.updateProperty(PropertyType.IS_STATISTICS_RPC_ENABLED, providerConfig.isIsStatisticsRpcEnabled());
+ openflowPluginProvider.updateProperty(PropertyType.BARRIER_COUNT_LIMIT, providerConfig.getBarrierCountLimit().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.BARRIER_INTERVAL_TIMEOUT_LIMIT, providerConfig.getBarrierIntervalTimeoutLimit().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.ECHO_REPLY_TIMEOUT, providerConfig.getEchoReplyTimeout().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.IS_STATISTICS_POLLING_ON, providerConfig.isIsStatisticsPollingOn());
+ openflowPluginProvider.updateProperty(PropertyType.SKIP_TABLE_FEATURES, providerConfig.isSkipTableFeatures());
+ openflowPluginProvider.updateProperty(PropertyType.BASIC_TIMER_DELAY, providerConfig.getBasicTimerDelay().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.MAXIMUM_TIMER_DELAY, providerConfig.getMaximumTimerDelay().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.THREAD_POOL_MIN_THREADS, providerConfig.getThreadPoolMinThreads());
+ openflowPluginProvider.updateProperty(PropertyType.THREAD_POOL_MAX_THREADS, providerConfig.getThreadPoolMaxThreads().getValue());
+ openflowPluginProvider.updateProperty(PropertyType.THREAD_POOL_TIMEOUT, providerConfig.getThreadPoolTimeout());
- openflowPluginProvider.initialize();
+ LOG.info("Loading configuration from properties file");
+ Optional.ofNullable(bundleContext.getServiceReference(ConfigurationAdmin.class.getName())).ifPresent(serviceReference -> {
+ final ConfigurationAdmin configurationAdmin = (ConfigurationAdmin) bundleContext.getService(serviceReference);
+
+ try {
+ final Configuration configuration = configurationAdmin.getConfiguration(OFConstants.CONFIG_FILE_ID);
+
+ Optional.ofNullable(configuration.getProperties()).ifPresent(properties -> {
+ final Enumeration<String> keys = properties.keys();
+ final Map<String, Object> mapProperties = new HashMap<>(properties.size());
- LOG.info("Configured values, " +
- "StatisticsPollingOn:{}, " +
- "SwitchFeaturesMandatory:{}, " +
- "BarrierCountLimit:{}, " +
- "BarrierTimeoutLimit:{}, " +
- "EchoReplyTimeout:{}, " +
- "ThreadPoolMinThreads:{}, " +
- "ThreadPoolMaxThreads:{}, " +
- "ThreadPoolTimeout:{}, " +
- "NotificationFlowRemovedOff:{}, " +
- "BasicTimerDelay:{}, "+
- "MaximumTimerDelay:{} ",
- providerConfig.isIsStatisticsPollingOn(),
- providerConfig.isSwitchFeaturesMandatory(),
- providerConfig.getBarrierCountLimit().getValue(),
- providerConfig.getBarrierIntervalTimeoutLimit().getValue(),
- providerConfig.getEchoReplyTimeout().getValue(),
- providerConfig.getThreadPoolMinThreads(),
- providerConfig.getThreadPoolMaxThreads().getValue(),
- providerConfig.getThreadPoolTimeout(),
- providerConfig.isEnableFlowRemovedNotification(),
- providerConfig.getBasicTimerDelay().getValue(),
- providerConfig.getMaximumTimerDelay().getValue());
+ while (keys.hasMoreElements()) {
+ final String key = keys.nextElement();
+ final Object value = properties.get(key);
+ mapProperties.put(key, value);
+ }
+ openflowPluginProvider.update(mapProperties);
+ });
+ } catch (IOException e) {
+ LOG.debug("Failed to load " + OFConstants.CONFIG_FILE_ID + " configuration file", e);
+ }
+ });
+
+ openflowPluginProvider.initialize();
return openflowPluginProvider;
}
}
package org.opendaylight.openflowplugin.impl;
-
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import io.netty.util.HashedWheelTimer;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.SynchronousQueue;
-import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
import javax.annotation.Nonnull;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanRegistrationException;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.api.NotificationService;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowjava.protocol.spi.connection.SwitchConnectionProvider;
+import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginConfigurationService;
import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginProvider;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionManager;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceManager;
import org.opendaylight.openflowplugin.openflow.md.core.session.OFSessionUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.netty.util.HashedWheelTimer;
-public class OpenFlowPluginProviderImpl implements OpenFlowPluginProvider, OpenFlowPluginExtensionRegistratorProvider {
+public class OpenFlowPluginProviderImpl implements OpenFlowPluginProvider, OpenFlowPluginConfigurationService, OpenFlowPluginExtensionRegistratorProvider {
private static final Logger LOG = LoggerFactory.getLogger(OpenFlowPluginProviderImpl.class);
private static final MessageIntelligenceAgency messageIntelligenceAgency = new MessageIntelligenceAgencyImpl();
private static final int TICKS_PER_WHEEL = 500;
// 0.5 sec.
private static final long TICK_DURATION = 10;
- private static final Integer DEFAULT_BARRIER_COUNT = 25600;
- private static final Long DEFAULT_ECHO_TIMEOUT = 2000L;
- private static final Long DEFAULT_BARRIER_TIMEOUT = 500L;
+ private static final String POOL_NAME = "ofppool";
private final HashedWheelTimer hashedWheelTimer = new HashedWheelTimer(TICK_DURATION, TimeUnit.MILLISECONDS, TICKS_PER_WHEEL);
- private final int rpcRequestsQuota;
- private final long globalNotificationQuota;
private final ConvertorManager convertorManager;
+ private final List<SwitchConnectionProvider> switchConnectionProviders;
+ private final DataBroker dataBroker;
+ private final NotificationPublishService notificationPublishService;
+ private final NotificationService notificationProviderService;
+ private final EntityOwnershipService entityOwnershipService;
+ private int rpcRequestsQuota;
+ private long globalNotificationQuota;
private long barrierInterval;
private int barrierCountLimit;
private long echoReplyTimeout;
private RpcProviderRegistry rpcProviderRegistry;
private StatisticsManager statisticsManager;
private ConnectionManager connectionManager;
- private NotificationService notificationProviderService;
- private NotificationPublishService notificationPublishService;
- private ExtensionConverterManager extensionConverterManager;
- private DataBroker dataBroker;
- private Collection<SwitchConnectionProvider> switchConnectionProviders;
- private boolean switchFeaturesMandatory = false;
- private boolean isStatisticsPollingOn = true;
+ private boolean switchFeaturesMandatory;
+ private boolean isStatisticsPollingOn;
private boolean isStatisticsRpcEnabled;
- private boolean isFlowRemovedNotificationOn = true;
- private boolean skipTableFeatures = true;
+ private boolean isFlowRemovedNotificationOn;
+ private boolean skipTableFeatures;
private long basicTimerDelay;
private long maximumTimerDelay;
-
- private final ThreadPoolExecutor threadPool;
private ClusterSingletonServiceProvider singletonServicesProvider;
+ private ExtensionConverterManager extensionConverterManager;
+ private int threadPoolMinThreads;
+ private int threadPoolMaxThreads;
+ private long threadPoolTimeout;
+ private boolean initialized = false;
+ private ThreadPoolLoggingExecutor threadPool;
- public OpenFlowPluginProviderImpl(final long rpcRequestsQuota,
- final long globalNotificationQuota,
- final int threadPoolMinThreads,
- final int threadPoolMaxThreads,
- final long threadPoolTimeout) {
- Preconditions.checkArgument(rpcRequestsQuota > 0 && rpcRequestsQuota <= Integer.MAX_VALUE, "rpcRequestQuota has to be in range <1,%s>", Integer.MAX_VALUE);
- this.rpcRequestsQuota = (int) rpcRequestsQuota;
- this.globalNotificationQuota = Preconditions.checkNotNull(globalNotificationQuota);
-
- // Creates a thread pool that creates new threads as needed, but will reuse previously
- // constructed threads when they are available.
- // Threads that have not been used for x seconds are terminated and removed from the cache.
- threadPool = new ThreadPoolLoggingExecutor(
- Preconditions.checkNotNull(threadPoolMinThreads),
- Preconditions.checkNotNull(threadPoolMaxThreads),
- Preconditions.checkNotNull(threadPoolTimeout), TimeUnit.SECONDS,
- new SynchronousQueue<>(), "ofppool");
- convertorManager = ConvertorManagerFactory.createDefaultManager();
+ public static MessageIntelligenceAgency getMessageIntelligenceAgency() {
+ return messageIntelligenceAgency;
}
- @Override
- public boolean isStatisticsPollingOn() {
- return isStatisticsPollingOn;
+ public OpenFlowPluginProviderImpl(final List<SwitchConnectionProvider> switchConnectionProviders,
+ final DataBroker dataBroker,
+ final RpcProviderRegistry rpcProviderRegistry,
+ final NotificationService notificationProviderService,
+ final NotificationPublishService notificationPublishService,
+ final ClusterSingletonServiceProvider singletonServiceProvider,
+ final EntityOwnershipService entityOwnershipService) {
+ this.switchConnectionProviders = switchConnectionProviders;
+ this.dataBroker = dataBroker;
+ this.rpcProviderRegistry = rpcProviderRegistry;
+ this.notificationProviderService = notificationProviderService;
+ this.notificationPublishService = notificationPublishService;
+ this.singletonServicesProvider = singletonServiceProvider;
+ this.entityOwnershipService = entityOwnershipService;
+ convertorManager = ConvertorManagerFactory.createDefaultManager();
+ extensionConverterManager = new ExtensionConverterManagerImpl();
}
- @Override
- public void setStatisticsPollingOn(final boolean isStatisticsPollingOn) {
- this.isStatisticsPollingOn = isStatisticsPollingOn;
- }
private void startSwitchConnections() {
final List<ListenableFuture<Boolean>> starterChain = new ArrayList<>(switchConnectionProviders.size());
});
}
- @Override
- public boolean isSwitchFeaturesMandatory() {
- return switchFeaturesMandatory;
- }
-
- @Override
- public void setBarrierCountLimit(final int barrierCountLimit) {
- this.barrierCountLimit = barrierCountLimit;
- }
-
- @Override
- public void setBarrierInterval(final long barrierTimeoutLimit) {
- this.barrierInterval = barrierTimeoutLimit;
- }
-
- @Override
- public void setEchoReplyTimeout(final long echoReplyTimeout) {
- this.echoReplyTimeout = echoReplyTimeout;
- }
-
- @Override
- public void setFlowRemovedNotification(boolean isFlowRemovedNotificationOn) {
- this.isFlowRemovedNotificationOn = this.isFlowRemovedNotificationOn;
- }
-
- @Override
- public void setClusteringSingletonServicesProvider(ClusterSingletonServiceProvider singletonServicesProvider) {
- this.singletonServicesProvider = singletonServicesProvider;
- }
-
- @Override
- public void setSkipTableFeatures(final boolean skipTableFeatures){
- this.skipTableFeatures = skipTableFeatures;
- }
-
- @Override
- public void setBasicTimerDelay(long basicTimerDelay) {
- this.basicTimerDelay = basicTimerDelay;
- }
-
- @Override
- public void setMaximumTimerDelay(long maximumTimerDelay) {
- this.maximumTimerDelay = maximumTimerDelay;
- }
-
- @Override
- public void setSwitchFeaturesMandatory(final boolean switchFeaturesMandatory) {
- this.switchFeaturesMandatory = switchFeaturesMandatory;
- }
-
- public static MessageIntelligenceAgency getMessageIntelligenceAgency() {
- return OpenFlowPluginProviderImpl.messageIntelligenceAgency;
- }
-
- @Override
- public void setSwitchConnectionProviders(final Collection<SwitchConnectionProvider> switchConnectionProviders) {
- this.switchConnectionProviders = switchConnectionProviders;
- }
-
- @Override
- public void setDataBroker(final DataBroker dataBroker) {
- this.dataBroker = dataBroker;
- }
-
- @Override
- public void setRpcProviderRegistry(final RpcProviderRegistry rpcProviderRegistry) {
- this.rpcProviderRegistry = rpcProviderRegistry;
- }
-
@Override
public void initialize() {
Preconditions.checkNotNull(dataBroker, "missing data broker");
Preconditions.checkNotNull(notificationProviderService, "missing notification provider service");
Preconditions.checkNotNull(singletonServicesProvider, "missing singleton services provider");
- extensionConverterManager = new ExtensionConverterManagerImpl();
// TODO: copied from OpenFlowPluginProvider (Helium) misusesing the old way of distributing extension converters
// TODO: rewrite later!
OFSessionUtil.getSessionManager().setExtensionConverterProvider(extensionConverterManager);
- connectionManager = new ConnectionManagerImpl(echoReplyTimeout, threadPool);
+ // Creates a thread pool that creates new threads as needed, but will reuse previously
+ // constructed threads when they are available.
+ // Threads that have not been used for x seconds are terminated and removed from the cache.
+ threadPool = new ThreadPoolLoggingExecutor(
+ Preconditions.checkNotNull(threadPoolMinThreads),
+ Preconditions.checkNotNull(threadPoolMaxThreads),
+ Preconditions.checkNotNull(threadPoolTimeout),
+ TimeUnit.SECONDS, new SynchronousQueue<>(), POOL_NAME);
+
+ connectionManager = new ConnectionManagerImpl(threadPool);
+ connectionManager.setEchoReplyTimeout(echoReplyTimeout);
registerMXBean(messageIntelligenceAgency);
deviceManager = new DeviceManagerImpl(dataBroker,
- globalNotificationQuota,
- switchFeaturesMandatory,
- barrierInterval,
- barrierCountLimit,
getMessageIntelligenceAgency(),
- isFlowRemovedNotificationOn,
singletonServicesProvider,
- notificationPublishService,
+ entityOwnershipService,
hashedWheelTimer,
convertorManager,
- skipTableFeatures);
+ notificationPublishService);
+
+ deviceManager.setGlobalNotificationQuota(globalNotificationQuota);
+ deviceManager.setSwitchFeaturesMandatory(switchFeaturesMandatory);
+ deviceManager.setBarrierInterval(barrierInterval);
+ deviceManager.setBarrierCountLimit(barrierCountLimit);
+ deviceManager.setFlowRemovedNotificationOn(isFlowRemovedNotificationOn);
+ deviceManager.setSkipTableFeatures(skipTableFeatures);
((ExtensionConverterProviderKeeper) deviceManager).setExtensionConverterProvider(extensionConverterManager);
- rpcManager = new RpcManagerImpl(rpcProviderRegistry, rpcRequestsQuota, extensionConverterManager, convertorManager, notificationPublishService);
- statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, isStatisticsPollingOn, hashedWheelTimer,
- convertorManager,basicTimerDelay,maximumTimerDelay);
+ rpcManager = new RpcManagerImpl(rpcProviderRegistry, extensionConverterManager, convertorManager, notificationPublishService);
+ rpcManager.setRpcRequestQuota(rpcRequestsQuota);
+
+ statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, hashedWheelTimer, convertorManager);
+ statisticsManager.setBasicTimerDelay(basicTimerDelay);
+ statisticsManager.setMaximumTimerDelay(maximumTimerDelay);
+ statisticsManager.setIsStatisticsPollingOn(isStatisticsPollingOn);
/* Initialization Phase ordering - OFP Device Context suite */
// CM -> DM -> SM -> RPC -> Role -> DM
deviceManager.initialize();
startSwitchConnections();
+ initialized = true;
}
@Override
- public void update(Map<String,Object> props) {
- LOG.debug("Update managed properties = {}", props.toString());
-
- if(deviceManager != null) {
- if (props.containsKey("notification-flow-removed-off")) {
- deviceManager.setFlowRemovedNotificationOn(Boolean.valueOf(props.get("enable-flow-removed-notification").toString()));
+ public void update(@Nonnull final Map<String, Object> properties) {
+ properties.forEach((key, value) -> {
+ final PropertyType propertyType = PropertyType.forValue(key);
+
+ if (Objects.nonNull(propertyType)) {
+ updateProperty(propertyType, value);
+ } else if (!key.equals("service.pid") && !key.equals("felix.fileinstall.filename")) {
+ LOG.warn("Unsupported configuration property '{}={}'", key, value);
}
- if (props.containsKey("skip-table-features")) {
- deviceManager.setSkipTableFeatures(Boolean.valueOf(props.get("skip-table-features").toString()));
- }
- if (props.containsKey("barrier-count-limit")) {
- try {
- deviceManager.setBarrierCountLimit(Integer.valueOf(props.get("barrier-count-limit").toString()));
- } catch (NumberFormatException ex) {
- deviceManager.setBarrierCountLimit(DEFAULT_BARRIER_COUNT);
- }
- }
- if (props.containsKey("barrier-interval-timeout-limit")){
- try {
- deviceManager.setBarrierInterval(Long.valueOf(props.get("barrier-interval-timeout-limit").toString()));
- } catch (NumberFormatException ex) {
- deviceManager.setBarrierInterval(DEFAULT_BARRIER_TIMEOUT);
- }
+ });
+ }
+
+ private void doPropertyUpdate(final PropertyType propertyType,
+ final boolean modifiable,
+ final Object origValue,
+ final Object newValue,
+ final Consumer<Object> successCallback) {
+ if (initialized) {
+ if (Objects.equals(origValue, newValue)) {
+ LOG.debug("{} config parameter is already set to {})", propertyType, origValue);
+ return;
+ } else if (!modifiable) {
+ LOG.warn("{} update ({} -> {}) is not allowed after controller start", propertyType, origValue, newValue);
+ return;
}
}
- if(rpcManager != null && props.containsKey("is-statistics-rpc-enabled")){
- rpcManager.setStatisticsRpcEnabled(Boolean.valueOf((props.get("is-statistics-rpc-enabled").toString())));
- }
+ successCallback.accept(newValue);
+ LOG.info("{} config parameter is updated ({} -> {})", propertyType, origValue, newValue);
+ }
- if (connectionManager != null && props.containsKey("echo-reply-timeout") ){
- try {
- connectionManager.setEchoReplyTimeout(Long.valueOf(props.get("echo-reply-timeout").toString()));
- }catch (NumberFormatException ex){
- connectionManager.setEchoReplyTimeout(DEFAULT_ECHO_TIMEOUT);
+ @Override
+ public void updateProperty(@Nonnull final PropertyType key, @Nonnull final Object value) {
+ try {
+ final String sValue = value.toString();
+ final Consumer<Object> successCallback;
+ final boolean modifiable;
+ final Object oldValue;
+ final Object newValue;
+
+ switch (key) {
+ case RPC_REQUESTS_QUOTA:
+ successCallback = (result) -> {
+ rpcRequestsQuota = (int) result;
+
+ if (initialized) {
+ rpcManager.setRpcRequestQuota(rpcRequestsQuota);
+ }
+ };
+
+ oldValue = rpcRequestsQuota;
+ newValue = Integer.valueOf(sValue);
+ modifiable = true;
+ break;
+ case SWITCH_FEATURES_MANDATORY:
+ successCallback = (result) -> {
+ switchFeaturesMandatory = (boolean) result;
+
+ if (initialized) {
+ deviceManager.setSwitchFeaturesMandatory(switchFeaturesMandatory);
+ }
+ };
+
+ oldValue = switchFeaturesMandatory;
+ newValue = Boolean.valueOf(sValue);
+ modifiable = true;
+ break;
+ case GLOBAL_NOTIFICATION_QUOTA:
+ successCallback = (result) -> {
+ globalNotificationQuota = (long) result;
+
+ if (initialized) {
+ deviceManager.setGlobalNotificationQuota(globalNotificationQuota);
+ }
+ };
+
+ oldValue = globalNotificationQuota;
+ newValue = Long.valueOf(sValue);
+ modifiable = true;
+ break;
+ case IS_STATISTICS_POLLING_ON:
+ successCallback = (result) -> {
+ isStatisticsPollingOn = (boolean) result;
+
+ if (initialized) {
+ statisticsManager.setIsStatisticsPollingOn(isStatisticsPollingOn);
+ }
+ };
+
+ oldValue = isStatisticsPollingOn;
+ newValue = Boolean.valueOf(sValue);
+ modifiable = true;
+ break;
+ case IS_STATISTICS_RPC_ENABLED:
+ successCallback = (result) -> {
+ isStatisticsRpcEnabled = (boolean) result;
+
+ if (initialized) {
+ rpcManager.setStatisticsRpcEnabled(isStatisticsRpcEnabled);
+ }
+ };
+
+ oldValue = isStatisticsRpcEnabled;
+ newValue = Boolean.valueOf(sValue);
+ modifiable = true;
+ break;
+ case BARRIER_INTERVAL_TIMEOUT_LIMIT:
+ successCallback = (result) -> {
+ barrierInterval = (long) result;
+
+ if (initialized) {
+ deviceManager.setBarrierInterval(barrierInterval);
+ }
+ };
+
+ oldValue = barrierInterval;
+ newValue = Long.valueOf(sValue);
+ modifiable = true;
+ break;
+ case BARRIER_COUNT_LIMIT:
+ successCallback = (result) -> {
+ barrierCountLimit = (int) result;
+
+ if (initialized) {
+ deviceManager.setBarrierCountLimit(barrierCountLimit);
+ }
+ };
+
+ oldValue = barrierCountLimit;
+ newValue = Integer.valueOf(sValue);
+ modifiable = true;
+ break;
+ case ECHO_REPLY_TIMEOUT:
+ successCallback = (result) -> {
+ echoReplyTimeout = (long) result;
+
+ if (initialized) {
+ connectionManager.setEchoReplyTimeout(echoReplyTimeout);
+ }
+ };
+
+ oldValue = echoReplyTimeout;
+ newValue = Long.valueOf(sValue);
+ modifiable = true;
+ break;
+ case THREAD_POOL_MIN_THREADS:
+ successCallback = (result) -> threadPoolMinThreads = (int) result;
+ oldValue = threadPoolMinThreads;
+ newValue = Integer.valueOf(sValue);
+ modifiable = false;
+ break;
+ case THREAD_POOL_MAX_THREADS:
+ successCallback = (result) -> threadPoolMaxThreads = (int) result;
+ oldValue = threadPoolMaxThreads;
+ newValue = Integer.valueOf(sValue);
+ modifiable = false;
+ break;
+ case THREAD_POOL_TIMEOUT:
+ successCallback = (result) -> threadPoolTimeout = (long) result;
+ oldValue = threadPoolTimeout;
+ newValue = Long.valueOf(sValue);
+ modifiable = false;
+ break;
+ case ENABLE_FLOW_REMOVED_NOTIFICATION:
+ successCallback = (result) -> {
+ isFlowRemovedNotificationOn = (boolean) result;
+
+ if (initialized) {
+ deviceManager.setFlowRemovedNotificationOn(isFlowRemovedNotificationOn);
+ }
+ };
+
+ oldValue = isFlowRemovedNotificationOn;
+ newValue = Boolean.valueOf(sValue);
+ modifiable = true;
+ break;
+ case SKIP_TABLE_FEATURES:
+ successCallback = (result) -> {
+ skipTableFeatures = (boolean) result;
+
+ if (initialized) {
+ deviceManager.setSkipTableFeatures(skipTableFeatures);
+ }
+ };
+
+ oldValue = skipTableFeatures;
+ newValue = Boolean.valueOf(sValue);
+ modifiable = true;
+ break;
+ case BASIC_TIMER_DELAY:
+ successCallback = (result) -> {
+ basicTimerDelay = (long) result;
+
+ if (initialized) {
+ statisticsManager.setBasicTimerDelay(basicTimerDelay);
+ }
+ };
+
+ oldValue = basicTimerDelay;
+ newValue = Long.valueOf(sValue);
+ modifiable = true;
+ break;
+ case MAXIMUM_TIMER_DELAY:
+ successCallback = (result) -> {
+ maximumTimerDelay = (long) result;
+
+ if (initialized) {
+ statisticsManager.setMaximumTimerDelay(maximumTimerDelay);
+ }
+ };
+
+ oldValue = maximumTimerDelay;
+ newValue = Long.valueOf(sValue);
+ modifiable = true;
+ break;
+ default:
+ return;
}
- }
-
- if(statisticsManager != null && props.containsKey("is-statistics-polling-on")){
- statisticsManager.setIsStatisticsPollingOn(Boolean.valueOf(props.get("is-statistics-polling-on").toString()));
- }
- if(statisticsManager != null && props.containsKey("basic-timer-delay")){
- statisticsManager.setBasicTimerDelay(Long.valueOf(props.get("basic-timer-delay").toString()));
+ doPropertyUpdate(key, modifiable, oldValue, newValue, successCallback);
+ } catch (final Exception ex) {
+ LOG.warn("Failed to read configuration property '{}={}', error: {}", key, value, ex);
}
+ }
- if(statisticsManager != null && props.containsKey("maximum-timer-delay")){
- statisticsManager.setMaximumTimerDelay(Long.valueOf(props.get("maximum-timer-delay").toString()));
- }
+ @Override
+ public ExtensionConverterRegistrator getExtensionConverterRegistrator() {
+ return extensionConverterManager;
}
+ @Override
+ public void close() throws Exception {
+ initialized = false;
+ //TODO: consider wrapping each manager into try-catch
+ deviceManager.close();
+ rpcManager.close();
+ statisticsManager.close();
+
+ // Manually shutdown all remaining running threads in pool
+ threadPool.shutdown();
+ }
private static void registerMXBean(final MessageIntelligenceAgency messageIntelligenceAgency) {
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
LOG.warn("Error registering MBean {}", e);
}
}
-
- @Override
- public void setNotificationProviderService(final NotificationService notificationProviderService) {
- this.notificationProviderService = notificationProviderService;
- }
-
- @Override
- public void setNotificationPublishService(final NotificationPublishService notificationPublishProviderService) {
- this.notificationPublishService = notificationPublishProviderService;
- }
-
- @Override
- public ExtensionConverterRegistrator getExtensionConverterRegistrator() {
- return extensionConverterManager;
- }
-
- @Override
- public void setIsStatisticsRpcEnabled(final boolean isStatisticsRpcEnabled) {
- this.isStatisticsRpcEnabled = isStatisticsRpcEnabled;
- }
-
- @Override
- public void close() throws Exception {
- //TODO: consider wrapping each manager into try-catch
- deviceManager.close();
- rpcManager.close();
- statisticsManager.close();
-
- // Manually shutdown all remaining running threads in pool
- threadPool.shutdown();
- }
-}
\ No newline at end of file
+}
private long echoReplyTimeout;
private final ThreadPoolExecutor threadPool;
- public ConnectionManagerImpl(long echoReplyTimeout, final ThreadPoolExecutor threadPool) {
+ public ConnectionManagerImpl(final ThreadPoolExecutor threadPool) {
this.echoReplyTimeout = echoReplyTimeout;
this.threadPool = threadPool;
}
private HandshakeManager createHandshakeManager(final ConnectionAdapter connectionAdapter,
final HandshakeListener handshakeListener) {
HandshakeManagerImpl handshakeManager = new HandshakeManagerImpl(connectionAdapter,
- ConnectionConductor.versionOrder.get(0),
- ConnectionConductor.versionOrder);
+ ConnectionConductor.VERSION_ORDER.get(0),
+ ConnectionConductor.VERSION_ORDER);
handshakeManager.setUseVersionBitmap(BITMAP_NEGOTIATION_ENABLED);
handshakeManager.setHandshakeListener(handshakeListener);
handshakeManager.setErrorHandler(new ErrorHandlerSimpleImpl());
this.deviceConnectedHandler = deviceConnectedHandler;
}
+ @Override
public void setEchoReplyTimeout(long echoReplyTimeout){
this.echoReplyTimeout = echoReplyTimeout;
}
@Override
public void onSuccess(@Nullable final RpcResult<BarrierOutput> result) {
if (LOG.isDebugEnabled()) {
- LOG.debug("succeeded by getting sweep barrier after post-handshake for device {}", connectionContext.getNodeId().getValue());
+ LOG.debug("succeeded by getting sweep barrier after post-handshake for device {}", connectionContext.getDeviceInfo().getLOGValue());
}
try {
ConnectionStatus connectionStatusResult = deviceConnectedHandler.deviceConnected(connectionContext);
if (!ConnectionStatus.MAY_CONTINUE.equals(connectionStatusResult)) {
connectionContext.closeConnection(true);
}
- SessionStatistics.countEvent(connectionContext.getNodeId().toString(),
+ SessionStatistics.countEvent(connectionContext.getDeviceInfo().getLOGValue(),
SessionStatistics.ConnectionStatus.CONNECTION_CREATED);
} catch (final Exception e) {
- LOG.error("ConnectionContext initial processing failed: ", e);
- SessionStatistics.countEvent(connectionContext.getNodeId().toString(),
+ LOG.error("ConnectionContext initial processing failed for device {}", connectionContext.getDeviceInfo().getLOGValue(), e);
+ SessionStatistics.countEvent(connectionContext.getDeviceInfo().getLOGValue(),
SessionStatistics.ConnectionStatus.CONNECTION_DISCONNECTED_BY_OFP);
connectionContext.closeConnection(true);
}
@Override
public void onFailure(final Throwable t) {
- LOG.error("failed to get sweep barrier after post-handshake for device {}", connectionContext.getNodeId());
+ LOG.error("failed to get sweep barrier after post-handshake for device {}", connectionContext.getDeviceInfo().getLOGValue(), t);
connectionContext.closeConnection(false);
}
};
*/
public class SystemNotificationsListenerImpl implements SystemNotificationsListener {
- private final ConnectionContext connectionContext;
private static final Logger LOG = LoggerFactory.getLogger(SystemNotificationsListenerImpl.class);
+ private static final long ECHO_XID = 0L;
+
+ private final ConnectionContext connectionContext;
@VisibleForTesting
static final long MAX_ECHO_REPLY_TIMEOUT = 2000;
private final long echoReplyTimeout;
connectionContext.changeStateToTimeouting();
EchoInputBuilder builder = new EchoInputBuilder();
builder.setVersion(features.getVersion());
- Xid xid = new Xid(0L);
+ Xid xid = new Xid(ECHO_XID);
builder.setXid(xid.getValue());
Future<RpcResult<EchoOutput>> echoReplyFuture = connectionContext.getConnectionAdapter().echo(builder.build());
try {
RpcResult<EchoOutput> echoReplyValue = echoReplyFuture.get(echoReplyTimeout, TimeUnit.MILLISECONDS);
- if (echoReplyValue.isSuccessful()) {
+ if (echoReplyValue.isSuccessful() && echoReplyValue.getResult().getXid() == ECHO_XID) {
connectionContext.changeStateToWorking();
shouldBeDisconnected = false;
} else {
import com.google.common.base.Preconditions;
import com.google.common.base.Verify;
import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import io.netty.util.HashedWheelTimer;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
-import java.math.BigInteger;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
public class DeviceContextImpl implements DeviceContext, ExtensionConverterProviderKeeper {
private static final Logger LOG = LoggerFactory.getLogger(DeviceContextImpl.class);
private volatile CONTEXT_STATE state;
private ClusterInitializationPhaseHandler clusterInitializationPhaseHandler;
private final DeviceManager myManager;
+ private Boolean isAddNotificationSent = false;
DeviceContextImpl(
@Nonnull final ConnectionContext primaryConnectionContext,
final ItemLifecycleListener itemLifecycleListener = flowLifeCycleKeeper.getItemLifecycleListener();
if (itemLifecycleListener != null) {
//2. create registry key
- final FlowRegistryKey flowRegKey = FlowRegistryKeyFactory.create(flowRemovedNotification);
+ final FlowRegistryKey flowRegKey = FlowRegistryKeyFactory.create(getDeviceInfo().getVersion(), flowRemovedNotification);
//3. lookup flowId
- final FlowDescriptor flowDescriptor = deviceFlowRegistry.retrieveIdForFlow(flowRegKey);
+ final FlowDescriptor flowDescriptor = deviceFlowRegistry.retrieveDescriptor(flowRegKey);
//4. if flowId present:
if (flowDescriptor != null) {
// a) construct flow path
}
}
+ @Override
+ public void sendNodeAddedNotification() {
+ if (!isAddNotificationSent) {
+ isAddNotificationSent = true;
+ NodeUpdatedBuilder builder = new NodeUpdatedBuilder();
+ builder.setId(getDeviceInfo().getNodeId());
+ builder.setNodeRef(new NodeRef(getDeviceInfo().getNodeInstanceIdentifier()));
+ LOG.debug("Publishing node added notification for {}", builder.build());
+ notificationPublishService.offerNotification(builder.build());
+ }
+ }
+
+ @Override
+ public void sendNodeRemovedNotification() {
+ NodeRemovedBuilder builder = new NodeRemovedBuilder();
+ builder.setNodeRef(new NodeRef(getDeviceInfo().getNodeInstanceIdentifier()));
+ LOG.debug("Publishing node removed notification for {}", builder.build());
+ if (notificationPublishService != null) {
+ notificationPublishService.offerNotification(builder.build());
+ }
+ }
+
@Override
public void processPortStatusMessage(final PortStatusMessage portStatus) {
messageSpy.spyMessage(portStatus.getImplementedInterface(), MessageSpy.STATISTIC_GROUP.FROM_SWITCH_PUBLISHED_SUCCESS);
}
@Override
- public ListenableFuture<Void> stopClusterServices(boolean connectionInterrupted) {
+ public ListenableFuture<Void> stopClusterServices() {
final ListenableFuture<Void> deactivateTxManagerFuture = initialized
? transactionChainManager.deactivateTransactionManager()
: Futures.immediateFuture(null);
+ final boolean connectionInterrupted =
+ this.getPrimaryConnectionContext()
+ .getConnectionState()
+ .equals(ConnectionContext.CONNECTION_STATE.RIP);
if (!connectionInterrupted) {
- final ListenableFuture<Void> makeSlaveFuture = Futures.transform(makeDeviceSlave(), new Function<RpcResult<SetRoleOutput>, Void>() {
- @Nullable
- @Override
- public Void apply(@Nullable RpcResult<SetRoleOutput> setRoleOutputRpcResult) {
- return null;
- }
- });
-
- Futures.addCallback(makeSlaveFuture, new FutureCallback<Void>() {
- @Override
- public void onSuccess(@Nullable Void aVoid) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Role SLAVE was successfully propagated on device, node {}", deviceInfo.getLOGValue());
- }
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.warn("Was not able to set role SLAVE to device on node {} ", deviceInfo.getLOGValue());
- LOG.trace("Error occurred on device role setting, probably connection loss: ", throwable);
- }
- });
-
- return Futures.transform(deactivateTxManagerFuture, new AsyncFunction<Void, Void>() {
- @Override
- public ListenableFuture<Void> apply(Void aVoid) throws Exception {
- // Add fallback to remove device from operational DS if setting slave fails
- return Futures.withFallback(makeSlaveFuture, t ->
- myManager.removeDeviceFromOperationalDS(deviceInfo));
- }
- });
- } else {
- return Futures.transform(deactivateTxManagerFuture, new AsyncFunction<Void, Void>() {
- @Override
- public ListenableFuture<Void> apply(Void aVoid) throws Exception {
- return myManager.removeDeviceFromOperationalDS(deviceInfo);
- }
- });
+ LOG.info("This controller instance is now acting as a non-owner for node {}", deviceInfo.getLOGValue());
}
+
+ return deactivateTxManagerFuture;
+ }
+
+ @Override
+ public void cleanupDeviceData() {
+ myManager.removeDeviceFromOperationalDS(deviceInfo);
}
@Override
} else {
this.state = CONTEXT_STATE.TERMINATION;
}
+ sendNodeRemovedNotification();
}
@Override
LOG.debug("Transaction chain manager for node {} created", deviceInfo.getLOGValue());
}
this.transactionChainManager = new TransactionChainManager(dataBroker, deviceInfo);
- this.deviceFlowRegistry = new DeviceFlowRegistryImpl(dataBroker, deviceInfo.getNodeInstanceIdentifier());
+ this.deviceFlowRegistry = new DeviceFlowRegistryImpl(deviceInfo.getVersion(), dataBroker, deviceInfo.getNodeInstanceIdentifier());
this.deviceGroupRegistry = new DeviceGroupRegistryImpl();
this.deviceMeterRegistry = new DeviceMeterRegistryImpl();
this.initialized = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Role MASTER was successfully set on device, node {}", deviceInfo.getLOGValue());
}
+ sendNodeAddedNotification();
}
@Override
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.base.Verify;
import com.google.common.collect.Iterators;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.util.HashedWheelTimer;
import io.netty.util.TimerTask;
+import java.math.BigInteger;
import java.util.Collections;
import java.util.Iterator;
import java.util.Objects;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipChange;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowplugin.impl.device.listener.OpenflowProtocolListenerFullImpl;
import org.opendaylight.openflowplugin.impl.lifecycle.LifecycleServiceImpl;
import org.opendaylight.openflowplugin.impl.services.SalRoleServiceImpl;
+import org.opendaylight.openflowplugin.impl.util.DeviceStateUtil;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.role.service.rev150727.SetRoleOutput;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DeviceManagerImpl implements DeviceManager, ExtensionConverterProviderKeeper {
private static final Logger LOG = LoggerFactory.getLogger(DeviceManagerImpl.class);
+ private static final String SERVICE_ENTITY_TYPE = "org.opendaylight.mdsal.ServiceEntityType";
- private final long globalNotificationQuota;
- private final boolean switchFeaturesMandatory;
+ private long globalNotificationQuota;
+ private boolean switchFeaturesMandatory;
+ private final EntityOwnershipListenerRegistration eosListenerRegistration;
private boolean isFlowRemovedNotificationOn;
private boolean skipTableFeatures;
private static final int SPY_RATE = 10;
private DeviceTerminationPhaseHandler deviceTerminPhaseHandler;
private final ConcurrentMap<DeviceInfo, DeviceContext> deviceContexts = new ConcurrentHashMap<>();
+ private final ConcurrentMap<DeviceInfo, DeviceContext> removeddeviceContexts = new ConcurrentHashMap<>();
private final ConcurrentMap<DeviceInfo, LifecycleService> lifecycleServices = new ConcurrentHashMap<>();
private long barrierIntervalNanos;
private ExtensionConverterProvider extensionConverterProvider;
private ScheduledThreadPoolExecutor spyPool;
private final ClusterSingletonServiceProvider singletonServiceProvider;
+ private final EntityOwnershipService entityOwnershipService;
private final NotificationPublishService notificationPublishService;
private final MessageSpy messageSpy;
private final HashedWheelTimer hashedWheelTimer;
public DeviceManagerImpl(@Nonnull final DataBroker dataBroker,
- final long globalNotificationQuota,
- final boolean switchFeaturesMandatory,
- final long barrierInterval,
- final int barrierCountLimit,
final MessageSpy messageSpy,
- final boolean isFlowRemovedNotificationOn,
final ClusterSingletonServiceProvider singletonServiceProvider,
- final NotificationPublishService notificationPublishService,
+ final EntityOwnershipService entityOwnershipService,
final HashedWheelTimer hashedWheelTimer,
final ConvertorExecutor convertorExecutor,
- final boolean skipTableFeatures) {
+ final NotificationPublishService notificationPublishService) {
+
this.dataBroker = dataBroker;
+ this.entityOwnershipService = entityOwnershipService;
+ this.convertorExecutor = convertorExecutor;
+ this.hashedWheelTimer = hashedWheelTimer;
+ this.spyPool = new ScheduledThreadPoolExecutor(1);
+ this.singletonServiceProvider = singletonServiceProvider;
+ this.notificationPublishService = notificationPublishService;
+ this.messageSpy = messageSpy;
+
+ this.eosListenerRegistration = Verify.verifyNotNull(entityOwnershipService.registerListener
+ (SERVICE_ENTITY_TYPE, this));
/* merge empty nodes to oper DS to predict any problems with missing parent for Node */
final WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
LOG.error("Creation of node failed.", e);
throw new IllegalStateException(e);
}
-
- this.switchFeaturesMandatory = switchFeaturesMandatory;
- this.globalNotificationQuota = globalNotificationQuota;
- this.isFlowRemovedNotificationOn = isFlowRemovedNotificationOn;
- this.skipTableFeatures = skipTableFeatures;
- this.convertorExecutor = convertorExecutor;
- this.hashedWheelTimer = hashedWheelTimer;
- this.barrierIntervalNanos = TimeUnit.MILLISECONDS.toNanos(barrierInterval);
- this.barrierCountLimit = barrierCountLimit;
- this.spyPool = new ScheduledThreadPoolExecutor(1);
- this.singletonServiceProvider = singletonServiceProvider;
- this.notificationPublishService = notificationPublishService;
- this.messageSpy = messageSpy;
}
* in {@link org.opendaylight.openflowplugin.impl.connection.org.opendaylight.openflowplugin.impl.connection.HandshakeContextImpl}
* If context already exist we are in state closing process (connection flapping) and we should not propagate connection close
*/
- if (deviceContexts.containsKey(deviceInfo)) {
- DeviceContext deviceContext = deviceContexts.get(deviceInfo);
- LOG.warn("Node {} already connected disconnecting device. Rejecting connection", deviceInfo.getLOGValue());
- if (!deviceContext.getState().equals(OFPContext.CONTEXT_STATE.TERMINATION)) {
+ DeviceContext current = deviceContexts.get(deviceInfo);
+ if (current != null) {
+ LOG.warn("New connection received from the already connected Node {}. Disconnecting both the connection " +
+ "to add the switch back gracefully.", deviceInfo.getLOGValue());
+ if (!current.getState().equals(OFPContext.CONTEXT_STATE.TERMINATION)) {
LOG.warn("Node {} context state not in TERMINATION state.",
connectionContext.getDeviceInfo().getLOGValue());
+ //Lets disconnect the existing connection as well and ask switch to connect fresh.
+ //This will re-add the node properly
+ current.getPrimaryConnectionContext().closeConnection(true);
return ConnectionStatus.ALREADY_CONNECTED;
} else {
+ // Device context already exist with a terminated connection. Reject the connection to receive fresh
+ // connection and properly add the device.
+ LOG.debug("Old connection from {} is terminated but still exist in device context." +
+ "Rejecting new connection and cleaning up the old connection.", deviceInfo.getLOGValue());
+ deviceContexts.remove(deviceInfo);
+ current.getPrimaryConnectionContext().closeConnection(true);
return ConnectionStatus.CLOSING;
}
}
Optional.ofNullable(spyPool).ifPresent(ScheduledThreadPoolExecutor::shutdownNow);
spyPool = null;
+ if (Objects.nonNull(eosListenerRegistration)) {
+ try {
+ LOG.debug("Closing entity ownership listener");
+ eosListenerRegistration.close();
+ } catch (Exception e) {
+ LOG.debug("Failed to close entity ownership listener registration with exception",e);
+ }
+ }
+
}
@Override
return;
}
+ if (connectionContext.getFeatures() != null
+ && connectionContext.getFeatures().getAuxiliaryId() != null
+ && connectionContext.getFeatures().getAuxiliaryId() > 0
+ && deviceCtx.getAuxiliaryConnectionContexts(new BigInteger(String.valueOf(
+ connectionContext.getFeatures().getAuxiliaryId().intValue()))) != null) {
+
+ LOG.debug("Node {} disconnected, but not primary connection.", connectionContext.getDeviceInfo().getLOGValue());
+ // Connection is not PrimaryConnection so try to remove from Auxiliary Connections
+ deviceCtx.removeAuxiliaryConnectionContext(connectionContext);
+ // If this is not primary connection, we should not continue disabling everything
+ return;
+ }
+
if (deviceCtx.getState().equals(OFPContext.CONTEXT_STATE.TERMINATION)) {
LOG.info("Device context for node {} is already is termination state, waiting for close all context", deviceInfo.getLOGValue());
return;
deviceCtx.close();
- if (!connectionContext.equals(deviceCtx.getPrimaryConnectionContext())) {
- LOG.debug("Node {} disconnected, but not primary connection.", connectionContext.getDeviceInfo().getLOGValue());
- // Connection is not PrimaryConnection so try to remove from Auxiliary Connections
- deviceCtx.removeAuxiliaryConnectionContext(connectionContext);
- }
-
// TODO: Auxiliary connections supported ?
// Device is disconnected and so we need to close TxManager
final ListenableFuture<Void> future = deviceCtx.shuttingDownDataStoreTransactions();
@Override
public CheckedFuture<Void, TransactionCommitFailedException> removeDeviceFromOperationalDS(final DeviceInfo deviceInfo) {
+ return removeDeviceFromOperationalDS(deviceInfo.getNodeInstanceIdentifier(), deviceInfo.getLOGValue());
+ }
+
+ @Override
+ public void setGlobalNotificationQuota(final long globalNotificationQuota) {
+ this.globalNotificationQuota = globalNotificationQuota;
+ }
+
+ @Override
+ public void setSwitchFeaturesMandatory(final boolean switchFeaturesMandatory) {
+ this.switchFeaturesMandatory = switchFeaturesMandatory;
+ }
+
+ private CheckedFuture<Void, TransactionCommitFailedException> removeDeviceFromOperationalDS(
+ final KeyedInstanceIdentifier<Node, NodeKey> nodeIid, final String nodeName) {
+ Preconditions.checkNotNull(nodeIid, "Node IID must not be null");
+
final WriteTransaction delWtx = dataBroker.newWriteOnlyTransaction();
- delWtx.delete(LogicalDatastoreType.OPERATIONAL, deviceInfo.getNodeInstanceIdentifier());
+ delWtx.delete(LogicalDatastoreType.OPERATIONAL, nodeIid);
final CheckedFuture<Void, TransactionCommitFailedException> delFuture = delWtx.submit();
Futures.addCallback(delFuture, new FutureCallback<Void>() {
@Override
public void onSuccess(final Void result) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Delete Node {} was successful", deviceInfo.getLOGValue());
+ LOG.debug("Delete Node {} was successful", nodeName);
}
}
@Override
public void onFailure(@Nonnull final Throwable t) {
- LOG.warn("Delete node {} failed with exception {}", deviceInfo.getLOGValue(), t);
+ LOG.warn("Delete node {} failed with exception {}", nodeName, t);
}
});
return delFuture;
}
-
private void addCallbackToDeviceInitializeToSlave(final DeviceInfo deviceInfo, final DeviceContext deviceContext, final LifecycleService lifecycleService) {
Futures.addCallback(deviceContext.makeDeviceSlave(), new FutureCallback<RpcResult<SetRoleOutput>>() {
@Override
if (LOG.isDebugEnabled()) {
LOG.debug("Role SLAVE was successfully propagated on device, node {}", deviceInfo.getLOGValue());
}
+ deviceContext.sendNodeAddedNotification();
}
@Override
}
public void onDeviceRemoved(DeviceInfo deviceInfo) {
- deviceContexts.remove(deviceInfo);
+ DeviceContext deviceContext = deviceContexts.remove(deviceInfo);
+ removeddeviceContexts.putIfAbsent(deviceInfo, deviceContext);
LOG.debug("Device context removed for node {}", deviceInfo.getLOGValue());
lifecycleServices.remove(deviceInfo);
LOG.debug("Lifecycle service removed for node {}", deviceInfo.getLOGValue());
}
+
+ @Override
+ public void ownershipChanged(EntityOwnershipChange entityOwnershipChange) {
+ if (!entityOwnershipChange.hasOwner()) {
+ final YangInstanceIdentifier yii = entityOwnershipChange.getEntity().getId();
+ final YangInstanceIdentifier.NodeIdentifierWithPredicates niiwp =
+ (YangInstanceIdentifier.NodeIdentifierWithPredicates) yii.getLastPathArgument();
+ String entityName = niiwp.getKeyValues().values().iterator().next().toString();
+ LOG.info("Entity ownership changed for device : {} : {}", entityName, entityOwnershipChange);
+
+ if (entityName != null ){
+ if (!removeddeviceContexts.isEmpty()) {
+ for (DeviceInfo device : removeddeviceContexts.keySet()) {
+ if (device.getNodeId().getValue().equals(entityName)) {
+ LOG.info("Cleaning up operational data of the node : {}", entityName);
+ // No owner present for the entity, clean up the data and remove it from
+ // removed context.
+ removeddeviceContexts.remove(device).cleanupDeviceData();
+ return;
+ }
+ }
+ }
+ removeDeviceFromOperationalDS(DeviceStateUtil.createNodeInstanceIdentifier(new NodeId(entityName)),
+ entityName);
+ }
+ }
+ }
}
@GuardedBy("txLock")
private ListenableFuture<Void> lastSubmittedFuture;
- private boolean initCommit;
+ private volatile boolean initCommit;
@GuardedBy("txLock")
private TransactionChainManagerStatus transactionChainManagerStatus = TransactionChainManagerStatus.SLEEPING;
@Nonnull final DeviceInfo deviceInfo) {
this.dataBroker = dataBroker;
this.nodeId = deviceInfo.getNodeInstanceIdentifier().getKey().getId().getValue();
- this.transactionChainManagerStatus = TransactionChainManagerStatus.SLEEPING;
this.lastSubmittedFuture = Futures.immediateFuture(null);
}
LOG.debug("activateTransactionManager for node {} transaction submit is set to {}", this.nodeId, submitIsEnabled);
}
synchronized (txLock) {
- if (TransactionChainManagerStatus.SLEEPING.equals(transactionChainManagerStatus)) {
+ if (TransactionChainManagerStatus.SLEEPING == transactionChainManagerStatus) {
Preconditions.checkState(txChainFactory == null, "TxChainFactory survive last close.");
Preconditions.checkState(wTx == null, "We have some unexpected WriteTransaction.");
this.transactionChainManagerStatus = TransactionChainManagerStatus.WORKING;
}
final ListenableFuture<Void> future;
synchronized (txLock) {
- if (TransactionChainManagerStatus.WORKING.equals(transactionChainManagerStatus)) {
+ if (TransactionChainManagerStatus.WORKING == transactionChainManagerStatus) {
transactionChainManagerStatus = TransactionChainManagerStatus.SLEEPING;
future = txChainShuttingDown();
Preconditions.checkState(wTx == null, "We have some unexpected WriteTransaction.");
}
return true;
}
- Preconditions.checkState(TransactionChainManagerStatus.WORKING.equals(transactionChainManagerStatus),
+ Preconditions.checkState(TransactionChainManagerStatus.WORKING == transactionChainManagerStatus,
"we have here Uncompleted Transaction for node {} and we are not MASTER", this.nodeId);
final CheckedFuture<Void, TransactionCommitFailedException> submitFuture = wTx.submit();
+ lastSubmittedFuture = submitFuture;
+ wTx = null;
+
Futures.addCallback(submitFuture, new FutureCallback<Void>() {
@Override
public void onSuccess(final Void result) {
- if (initCommit) {
- initCommit = false;
- }
+ initCommit = false;
}
@Override
}
}
if (initCommit) {
- wTx = null;
Optional.ofNullable(lifecycleService).ifPresent(LifecycleService::closeConnection);
}
}
});
- lastSubmittedFuture = submitFuture;
- wTx = null;
}
return true;
}
<T extends DataObject> void addDeleteOperationTotTxChain(final LogicalDatastoreType store,
final InstanceIdentifier<T> path){
- final WriteTransaction writeTx = getTransactionSafely();
- if (Objects.nonNull(writeTx)) {
- writeTx.delete(store, path);
- } else {
- if (LOG.isDebugEnabled()) {
+ synchronized (txLock) {
+ ensureTransaction();
+ if (wTx == null) {
LOG.debug("WriteTx is null for node {}. Delete {} was not realized.", this.nodeId, path);
+ throw new TransactionChainClosedException(CANNOT_WRITE_INTO_TRANSACTION);
}
- throw new TransactionChainClosedException(CANNOT_WRITE_INTO_TRANSACTION);
+
+ wTx.delete(store, path);
}
}
final InstanceIdentifier<T> path,
final T data,
final boolean createParents){
- final WriteTransaction writeTx = getTransactionSafely();
- if (Objects.nonNull(writeTx)) {
- writeTx.put(store, path, data, createParents);
- } else {
- if (LOG.isDebugEnabled()) {
+ synchronized (txLock) {
+ ensureTransaction();
+ if (wTx == null) {
LOG.debug("WriteTx is null for node {}. Write data for {} was not realized.", this.nodeId, path);
+ throw new TransactionChainClosedException(CANNOT_WRITE_INTO_TRANSACTION);
}
- throw new TransactionChainClosedException(CANNOT_WRITE_INTO_TRANSACTION);
+
+ wTx.put(store, path, data, createParents);
}
}
@Override
public void onTransactionChainFailed(final TransactionChain<?, ?> chain,
final AsyncTransaction<?, ?> transaction, final Throwable cause) {
- if (transactionChainManagerStatus.equals(TransactionChainManagerStatus.WORKING)) {
- LOG.warn("Transaction chain failed, recreating chain due to ", cause);
- recreateTxChain();
+ synchronized (txLock) {
+ if (TransactionChainManagerStatus.WORKING == transactionChainManagerStatus) {
+ LOG.warn("Transaction chain failed, recreating chain due to ", cause);
+ createTxChain();
+ wTx = null;
+ }
}
}
// NOOP
}
- private void recreateTxChain() {
- synchronized (txLock) {
- createTxChain();
- wTx = null;
- }
- }
-
+ @GuardedBy("txLock")
@Nullable
- private WriteTransaction getTransactionSafely() {
- synchronized (txLock) {
- if (wTx == null && TransactionChainManagerStatus.WORKING.equals(transactionChainManagerStatus)) {
- Optional.ofNullable(txChainFactory).ifPresent(bindingTransactionChain -> wTx = txChainFactory.newWriteOnlyTransaction());
- }
- }
- return wTx;
+ private void ensureTransaction() {
+ if (wTx == null && TransactionChainManagerStatus.WORKING == transactionChainManagerStatus
+ && txChainFactory != null) {
+ wTx = txChainFactory.newWriteOnlyTransaction();
+ }
}
@VisibleForTesting
if (LOG.isDebugEnabled()) {
LOG.debug("TxManager is going SHUTTING_DOWN for node {}", this.nodeId);
}
- ListenableFuture<Void> future;
synchronized (txLock) {
this.transactionChainManagerStatus = TransactionChainManagerStatus.SHUTTING_DOWN;
- future = txChainShuttingDown();
+ return txChainShuttingDown();
}
- return future;
}
@GuardedBy("txLock")
import java.util.List;
import java.util.Objects;
import javax.annotation.Nullable;
+
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
import org.slf4j.LoggerFactory;
public class LifecycleServiceImpl implements LifecycleService {
-
private static final Logger LOG = LoggerFactory.getLogger(LifecycleServiceImpl.class);
+
+ private final List<DeviceRemovedHandler> deviceRemovedHandlers = new ArrayList<>();
+ private volatile CONTEXT_STATE state = CONTEXT_STATE.INITIALIZATION;
private DeviceContext deviceContext;
private RpcContext rpcContext;
private StatisticsContext statContext;
private ClusterSingletonServiceRegistration registration;
private ClusterInitializationPhaseHandler clusterInitializationPhaseHandler;
- private final List<DeviceRemovedHandler> deviceRemovedHandlers = new ArrayList<>();
- private volatile CONTEXT_STATE state = CONTEXT_STATE.INITIALIZATION;
@Override
@Override
public ListenableFuture<Void> closeServiceInstance() {
- final boolean connectionInterrupted =
- this.deviceContext
- .getPrimaryConnectionContext()
- .getConnectionState()
- .equals(ConnectionContext.CONNECTION_STATE.RIP);
// Chain all jobs that will stop our services
final List<ListenableFuture<Void>> futureList = new ArrayList<>();
- futureList.add(statContext.stopClusterServices(connectionInterrupted));
- futureList.add(rpcContext.stopClusterServices(connectionInterrupted));
- futureList.add(deviceContext.stopClusterServices(connectionInterrupted));
+ futureList.add(statContext.stopClusterServices());
+ futureList.add(rpcContext.stopClusterServices());
+ futureList.add(deviceContext.stopClusterServices());
return Futures.transform(Futures.successfulAsList(futureList), new Function<List<Void>, Void>() {
@Nullable
// If we are still registered and we are not already closing, then close the registration
if (Objects.nonNull(registration)) {
try {
- LOG.debug("Closing clustering MASTER services for node {}", getDeviceInfo().getLOGValue());
+ LOG.debug("Closing clustering singleton services for node {}", getDeviceInfo().getLOGValue());
registration.close();
} catch (Exception e) {
- LOG.debug("Failed to close clustering MASTER services for node {} with exception: ",
+ LOG.debug("Failed to close clustering singleton services for node {} with exception: ",
getDeviceInfo().getLOGValue(), e);
}
}
@Override
public void registerService(final ClusterSingletonServiceProvider singletonServiceProvider) {
- LOG.debug("Registered clustering MASTER services for node {}", getDeviceInfo().getLOGValue());
+ LOG.debug("Registered clustering singleton services for node {}", getDeviceInfo().getLOGValue());
// lifecycle service -> device context -> statistics context -> rpc context -> role context -> lifecycle service
this.clusterInitializationPhaseHandler = deviceContext;
// Register cluster singleton service
try {
this.registration = Verify.verifyNotNull(singletonServiceProvider.registerClusterSingletonService(this));
- LOG.info("Registered clustering MASTER services for node {}", getDeviceInfo().getLOGValue());
+ LOG.info("Registered clustering singleton services for node {}", getDeviceInfo().getLOGValue());
} catch (Exception e) {
LOG.warn("Failed to register cluster singleton service for node {}, with exception: {}", getDeviceInfo(), e);
closeConnection();
import com.google.common.util.concurrent.ListenableFuture;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.ThreadSafe;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+@ThreadSafe
public class DeviceFlowRegistryImpl implements DeviceFlowRegistry {
private static final Logger LOG = LoggerFactory.getLogger(DeviceFlowRegistryImpl.class);
private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
private final DataBroker dataBroker;
private final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier;
private final List<ListenableFuture<List<Optional<FlowCapableNode>>>> lastFillFutures = new ArrayList<>();
+ private final Consumer<Flow> flowConsumer;
- // Specifies what to do with flow read from datastore
- private final Consumer<Flow> flowConsumer = flow -> {
- // Create flow registry key from flow
- final FlowRegistryKey key = FlowRegistryKeyFactory.create(flow);
-
- // Now, we will update the registry, but we will also try to prevent duplicate entries
- if (!flowRegistry.containsKey(key)) {
- LOG.trace("Found flow with table ID : {} and flow ID : {}", flow.getTableId(), flow.getId().getValue());
- final FlowDescriptor descriptor = FlowDescriptorFactory.create(flow.getTableId(), flow.getId());
- store(key, descriptor);
- }
- };
-
- public DeviceFlowRegistryImpl(final DataBroker dataBroker, final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
+ public DeviceFlowRegistryImpl(final short version, final DataBroker dataBroker, final KeyedInstanceIdentifier<Node, NodeKey> instanceIdentifier) {
this.dataBroker = dataBroker;
this.instanceIdentifier = instanceIdentifier;
+
+ // Specifies what to do with flow read from data store
+ flowConsumer = flow -> {
+ final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(version, flow);
+
+ if (!flowRegistry.containsKey(flowRegistryKey)) {
+ // Now, we will update the registry
+ storeDescriptor(flowRegistryKey, FlowDescriptorFactory.create(flow.getTableId(), flow.getId()));
+ }
+ };
}
@Override
public ListenableFuture<List<Optional<FlowCapableNode>>> fill() {
- LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier.getKey().getId().getValue());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Filling flow registry with flows for node: {}", instanceIdentifier.getKey().getId().getValue());
+ }
// Prepare path for read transaction
// TODO: Read only Tables, and not entire FlowCapableNode (fix Yang model)
final InstanceIdentifier<FlowCapableNode> path = instanceIdentifier.augmentation(FlowCapableNode.class);
// First, try to fill registry with flows from DS/Configuration
- CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture = fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
+ final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> configFuture = fillFromDatastore(LogicalDatastoreType.CONFIGURATION, path);
// Now, try to fill registry with flows from DS/Operational
// in case of cluster fail over, when clients are not using DS/Configuration
// for adding flows, but only RPCs
- CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture = fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
+ final CheckedFuture<Optional<FlowCapableNode>, ReadFailedException> operationalFuture = fillFromDatastore(LogicalDatastoreType.OPERATIONAL, path);
// And at last, chain and return futures created above.
// Also, cache this future, so call to DeviceFlowRegistry.close() will be able
}
@Override
- public FlowDescriptor retrieveIdForFlow(final FlowRegistryKey flowRegistryKey) {
- LOG.trace("Retrieving flow descriptor for flow hash : {}", flowRegistryKey.hashCode());
- FlowDescriptor flowDescriptor = flowRegistry.get(flowRegistryKey);
- // Get FlowDescriptor from flow registry
- if(flowDescriptor == null){
- if (LOG.isTraceEnabled()) {
- LOG.trace("Failed to retrieve flow descriptor for flow hash : {}, trying with custom equals method", flowRegistryKey.hashCode());
- }
- for(Map.Entry<FlowRegistryKey, FlowDescriptor> fd : flowRegistry.entrySet()) {
- if (fd.getKey().equals(flowRegistryKey)) {
- flowDescriptor = fd.getValue();
- break;
- }
- }
+ public FlowDescriptor retrieveDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Retrieving flow descriptor for flow hash : {}", flowRegistryKey.hashCode());
}
- return flowDescriptor;
+
+ return flowRegistry.get(flowRegistryKey);
}
@Override
- public void store(final FlowRegistryKey flowRegistryKey, final FlowDescriptor flowDescriptor) {
+ public void storeDescriptor(@Nonnull final FlowRegistryKey flowRegistryKey,
+ @Nonnull final FlowDescriptor flowDescriptor) {
try {
- LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}",
- flowDescriptor.getTableKey().getId(), flowDescriptor.getFlowId().getValue(), flowRegistryKey.hashCode());
- flowRegistry.put(flowRegistryKey, flowDescriptor);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Storing flowDescriptor with table ID : {} and flow ID : {} for flow hash : {}",
+ flowDescriptor.getTableKey().getId(), flowDescriptor.getFlowId().getValue(), flowRegistryKey.hashCode());
+ }
+
+ flowRegistry.put(flowRegistryKey, flowDescriptor);
} catch (IllegalArgumentException ex) {
- LOG.warn("Flow with flowId {} already exists in table {}", flowDescriptor.getFlowId().getValue(),
- flowDescriptor.getTableKey().getId());
- final FlowId newFlowId = createAlienFlowId(flowDescriptor.getTableKey().getId());
- final FlowDescriptor newFlowDescriptor = FlowDescriptorFactory.
- create(flowDescriptor.getTableKey().getId(), newFlowId);
- flowRegistry.put(flowRegistryKey, newFlowDescriptor);
+ if (LOG.isWarnEnabled()) {
+ LOG.warn("Flow with flow ID {} already exists in table {}, generating alien flow ID", flowDescriptor.getFlowId().getValue(),
+ flowDescriptor.getTableKey().getId());
+ }
+
+ // We are trying to store new flow to flow registry, but we already have different flow with same flow ID
+ // stored in registry, so we need to create alien ID for this new flow here.
+ flowRegistry.put(
+ flowRegistryKey,
+ FlowDescriptorFactory.create(
+ flowDescriptor.getTableKey().getId(),
+ createAlienFlowId(flowDescriptor.getTableKey().getId())));
}
}
@Override
- public void update(final FlowRegistryKey newFlowRegistryKey, final FlowDescriptor flowDescriptor) {
- LOG.trace("Updating the entry with hash: {}", newFlowRegistryKey.hashCode());
- flowRegistry.forcePut(newFlowRegistryKey, flowDescriptor);
+ public void store(final FlowRegistryKey flowRegistryKey) {
+ if (Objects.isNull(retrieveDescriptor(flowRegistryKey))) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Flow descriptor for flow hash : {} not found, generating alien flow ID", flowRegistryKey.hashCode());
+ }
+
+ // We do not found flow in flow registry, that means it do not have any ID already assigned, so we need
+ // to generate new alien flow ID here.
+ storeDescriptor(
+ flowRegistryKey,
+ FlowDescriptorFactory.create(
+ flowRegistryKey.getTableId(),
+ createAlienFlowId(flowRegistryKey.getTableId())));
+ }
}
@Override
- public FlowId storeIfNecessary(final FlowRegistryKey flowRegistryKey) {
- LOG.trace("Trying to retrieve flow ID for flow hash : {}", flowRegistryKey.hashCode());
-
- // First, try to get FlowDescriptor from flow registry
- FlowDescriptor flowDescriptor = retrieveIdForFlow(flowRegistryKey);
-
- // We was not able to retrieve FlowDescriptor, so we will at least try to generate it
- if (flowDescriptor == null) {
- LOG.trace("Flow descriptor for flow hash : {} not found, generating alien flow ID", flowRegistryKey.hashCode());
- final short tableId = flowRegistryKey.getTableId();
- final FlowId alienFlowId = createAlienFlowId(tableId);
- flowDescriptor = FlowDescriptorFactory.create(tableId, alienFlowId);
-
- // Finally we got flowDescriptor, so now we will store it to registry,
- // so next time we won't need to generate it again
- store(flowRegistryKey, flowDescriptor);
+ public void addMark(final FlowRegistryKey flowRegistryKey) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Removing flow descriptor for flow hash : {}", flowRegistryKey.hashCode());
}
- return flowDescriptor.getFlowId();
+ flowRegistry.remove(flowRegistryKey);
}
@Override
- public void removeDescriptor(final FlowRegistryKey flowRegistryKey) {
- LOG.trace("Removing flow descriptor for flow hash : {}", flowRegistryKey.hashCode());
- flowRegistry.remove(flowRegistryKey);
+ public void processMarks() {
+ // Do nothing
}
@Override
- public Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors() {
- return Collections.unmodifiableMap(flowRegistry);
+ public void forEach(final Consumer<FlowRegistryKey> consumer) {
+ synchronized (flowRegistry) {
+ flowRegistry.keySet().forEach(consumer);
+ }
+ }
+
+ @Override
+ public int size() {
+ return flowRegistry.size();
}
@Override
public void close() {
final Iterator<ListenableFuture<List<Optional<FlowCapableNode>>>> iterator = lastFillFutures.iterator();
- while(iterator.hasNext()) {
+ // We need to force interrupt and clear all running futures that are trying to read flow IDs from data store
+ while (iterator.hasNext()) {
final ListenableFuture<List<Optional<FlowCapableNode>>> next = iterator.next();
boolean success = next.cancel(true);
LOG.trace("Cancelling filling flow registry with flows job {} with result: {}", next, success);
@VisibleForTesting
static FlowId createAlienFlowId(final short tableId) {
final String alienId = ALIEN_SYSTEM_FLOW_ID + tableId + '-' + UNACCOUNTED_FLOWS_COUNTER.incrementAndGet();
+ LOG.debug("Created alien flow id {} for table id {}", alienId, tableId);
return new FlowId(alienId);
}
-}
\ No newline at end of file
+
+ @VisibleForTesting
+ Map<FlowRegistryKey, FlowDescriptor> getAllFlowDescriptors() {
+ return flowRegistry;
+ }
+}
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
+import javax.annotation.Nonnull;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
* Created by Martin Bobak <mbobak@cisco.com> on 9.4.2015.
*/
public class FlowDescriptorFactory {
+
private FlowDescriptorFactory() {
// Hide implicit constructor
}
- public static FlowDescriptor create(final short tableId, final FlowId fLowId) {
- final TableKey tableKey = new TableKey(tableId);
- return new FlowDescriptorDto(tableKey, fLowId);
+ @Nonnull
+ public static FlowDescriptor create(final short tableId, @Nonnull final FlowId flowId) {
+ return new FlowDescriptorDto(
+ new TableKey(tableId),
+ Preconditions.checkNotNull(flowId));
}
private static final class FlowDescriptorDto implements FlowDescriptor {
private final FlowId flowId;
private final TableKey tableKey;
- private FlowDescriptorDto(final TableKey tableKey, final FlowId flowId) {
- Preconditions.checkNotNull(tableKey);
- Preconditions.checkNotNull(flowId);
+ private FlowDescriptorDto(@Nonnull final TableKey tableKey, @Nonnull final FlowId flowId) {
this.flowId = flowId;
this.tableKey = tableKey;
}
return tableKey;
}
}
+
}
\ No newline at end of file
import com.google.common.base.MoreObjects;
import com.google.common.base.Preconditions;
import java.math.BigInteger;
+import javax.annotation.Nonnull;
import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
-import org.opendaylight.openflowplugin.impl.util.MatchComparatorFactory;
+import org.opendaylight.openflowplugin.impl.util.MatchNormalizationUtil;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 8.4.2015.
- */
public class FlowRegistryKeyFactory {
private FlowRegistryKeyFactory() {
// Hide implicit constructor
}
- public static FlowRegistryKey create(final Flow flow) {
- return new FlowRegistryKeyDto(flow);
+ @Nonnull
+ public static FlowRegistryKey create(final short version, @Nonnull final Flow flow) {
+ //TODO: mandatory flow input values (or default values) should be specified via yang model
+ final short tableId = Preconditions.checkNotNull(flow.getTableId(), "flow tableId must not be null");
+ final int priority = MoreObjects.firstNonNull(flow.getPriority(), OFConstants.DEFAULT_FLOW_PRIORITY);
+ final BigInteger cookie = MoreObjects.firstNonNull(flow.getCookie(), OFConstants.DEFAULT_FLOW_COOKIE).getValue();
+ final Match match = MatchNormalizationUtil.normalizeMatch(MoreObjects.firstNonNull(flow.getMatch(), OFConstants.EMPTY_MATCH), version);
+ return new FlowRegistryKeyDto(tableId, priority, cookie, match);
}
private static final class FlowRegistryKeyDto implements FlowRegistryKey {
private final BigInteger cookie;
private final Match match;
- private FlowRegistryKeyDto(final Flow flow) {
- //TODO: mandatory flow input values (or default values) should be specified via yang model
- tableId = Preconditions.checkNotNull(flow.getTableId(), "flow tableId must not be null");
- priority = MoreObjects.firstNonNull(flow.getPriority(), OFConstants.DEFAULT_FLOW_PRIORITY);
- match = MoreObjects.firstNonNull(flow.getMatch(), OFConstants.EMPTY_MATCH);
- cookie = MoreObjects.firstNonNull(flow.getCookie(), OFConstants.DEFAULT_FLOW_COOKIE).getValue();
+ private FlowRegistryKeyDto(final short tableId,
+ final int priority,
+ @Nonnull final BigInteger cookie,
+ @Nonnull final Match match) {
+ this.tableId = tableId;
+ this.priority = priority;
+ this.cookie = cookie;
+ this.match = match;
}
@Override
return getPriority() == that.getPriority() &&
getTableId() == that.getTableId() &&
getCookie().equals(that.getCookie()) &&
- MatchComparatorFactory.createMatch().areObjectsEqual(getMatch(), that.getMatch());
+ getMatch().equals(that.getMatch());
}
@Override
return match;
}
}
+
}
package org.opendaylight.openflowplugin.impl.registry.group;
+import com.google.common.annotations.VisibleForTesting;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
+import java.util.function.Consumer;
+import javax.annotation.concurrent.ThreadSafe;
import org.opendaylight.openflowplugin.api.openflow.registry.group.DeviceGroupRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 15.4.2015.
- */
+@ThreadSafe
public class DeviceGroupRegistryImpl implements DeviceGroupRegistry {
- private final List<GroupId> groupIdList = new ArrayList<>();
- private final List<GroupId> marks = new ArrayList<>();
+ private final List<GroupId> groupIds = Collections.synchronizedList(new ArrayList<>());
+ private final List<GroupId> marks = Collections.synchronizedList(new ArrayList<>());
@Override
public void store(final GroupId groupId) {
- groupIdList.add(groupId);
+ groupIds.add(groupId);
}
@Override
- public void markToBeremoved(final GroupId groupId) {
+ public void addMark(final GroupId groupId) {
marks.add(groupId);
}
@Override
- public void removeMarked() {
- synchronized (groupIdList) {
- groupIdList.removeAll(marks);
- }
+ public void processMarks() {
+ groupIds.removeAll(marks);
marks.clear();
}
@Override
- public List<GroupId> getAllGroupIds() {
- return groupIdList;
+ public void forEach(final Consumer<GroupId> consumer) {
+ synchronized (groupIds) {
+ groupIds.forEach(consumer);
+ }
+ }
+
+ @Override
+ public int size() {
+ return groupIds.size();
}
@Override
public void close() {
- synchronized (groupIdList) {
- groupIdList.clear();
- }
- synchronized (marks) {
- marks.clear();
- }
+ groupIds.clear();
+ marks.clear();
+ }
+
+ @VisibleForTesting
+ List<GroupId> getAllGroupIds() {
+ return groupIds;
}
}
package org.opendaylight.openflowplugin.impl.registry.meter;
+import com.google.common.annotations.VisibleForTesting;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
+import java.util.function.Consumer;
+import javax.annotation.concurrent.ThreadSafe;
import org.opendaylight.openflowplugin.api.openflow.registry.meter.DeviceMeterRegistry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
-/**
- * Created by Martin Bobak <mbobak@cisco.com> on 15.4.2015.
- */
+@ThreadSafe
public class DeviceMeterRegistryImpl implements DeviceMeterRegistry {
- private final List<MeterId> meterIds = new ArrayList<>();
- private final List<MeterId> marks = new ArrayList<>();
+ private final List<MeterId> meterIds = Collections.synchronizedList(new ArrayList<>());
+ private final List<MeterId> marks = Collections.synchronizedList(new ArrayList<>());
@Override
public void store(final MeterId meterId) {
}
@Override
- public void markToBeremoved(final MeterId meterId) {
+ public void addMark(final MeterId meterId) {
marks.add(meterId);
}
@Override
- public void removeMarked() {
+ public void processMarks() {
+ meterIds.removeAll(marks);
+ marks.clear();
+ }
+
+ @Override
+ public void forEach(final Consumer<MeterId> consumer) {
synchronized (meterIds) {
- meterIds.removeAll(marks);
- }
- synchronized (marks) {
- marks.clear();
+ meterIds.forEach(consumer);
}
}
@Override
- public List<MeterId> getAllMeterIds() {
- return meterIds;
+ public int size() {
+ return meterIds.size();
}
@Override
public void close() {
- synchronized (meterIds) {
- meterIds.clear();
- }
- synchronized (marks) {
- marks.clear();
- }
+ meterIds.clear();
+ marks.clear();
+ }
+
+ @VisibleForTesting
+ List<MeterId> getAllMeterIds() {
+ return meterIds;
}
}
}
} else {
try {
- stopClusterServices(true).get();
+ stopClusterServices().get();
} catch (Exception e) {
LOG.debug("Failed to close RpcContext for node {} with exception: ", getDeviceInfo().getLOGValue(), e);
}
}
@Override
- public ListenableFuture<Void> stopClusterServices(boolean connectionInterrupted) {
+ public ListenableFuture<Void> stopClusterServices() {
if (CONTEXT_STATE.TERMINATION.equals(getState())) {
return Futures.immediateCancelledFuture();
}
private final RpcProviderRegistry rpcProviderRegistry;
private DeviceInitializationPhaseHandler deviceInitPhaseHandler;
private DeviceTerminationPhaseHandler deviceTerminationPhaseHandler;
- private final int maxRequestsQuota;
+ private int maxRequestsQuota;
private final ConcurrentMap<DeviceInfo, RpcContext> contexts = new ConcurrentHashMap<>();
private boolean isStatisticsRpcEnabled;
private final ExtensionConverterProvider extensionConverterProvider;
public RpcManagerImpl(
final RpcProviderRegistry rpcProviderRegistry,
- final int quotaValue,
final ExtensionConverterProvider extensionConverterProvider,
final ConvertorExecutor convertorExecutor,
final NotificationPublishService notificationPublishService) {
this.rpcProviderRegistry = rpcProviderRegistry;
- maxRequestsQuota = quotaValue;
this.extensionConverterProvider = extensionConverterProvider;
this.convertorExecutor = convertorExecutor;
this.notificationPublishService = notificationPublishService;
convertorExecutor,
notificationPublishService);
+ // Clean up any old context present
+ RpcContext staleContext = contexts.remove(deviceInfo);
+ if (staleContext != null){
+ LOG.warn("Previous rpc context for node {} was not closed, closing the context.", deviceInfo);
+ staleContext.close();
+ }
+
Verify.verify(contexts.putIfAbsent(deviceInfo, rpcContext) == null, "RpcCtx still not closed for node {}", deviceInfo.getNodeId());
lifecycleService.setRpcContext(rpcContext);
lifecycleService.registerDeviceRemovedHandler(this);
isStatisticsRpcEnabled = statisticsRpcEnabled;
}
+ @Override
+ public void setRpcRequestQuota(final int rpcRequestQuota) {
+ this.maxRequestsQuota = rpcRequestQuota;
+ }
+
@Override
public void onDeviceRemoved(DeviceInfo deviceInfo) {
contexts.remove(deviceInfo);
StatisticsGatheringUtils.writeFlowStatistics(allMultipartData, deviceInfo, registry, txFacade);
if (!multipartReply.getFlags().isOFPMPFREQMORE()) {
endCollecting();
+ registry.processMarks();
}
}
}
@Override
public Future<RpcResult<AddFlowOutput>> addFlow(final AddFlowInput input) {
- final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
+ final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(deviceContext.getDeviceInfo().getVersion(), input);
final ListenableFuture<RpcResult<AddFlowOutput>> future =
flowAdd.processFlowModInputBuilders(flowAdd.toFlowModInputs(input));
Futures.addCallback(future, new AddFlowCallback(input, flowRegistryKey));
if (Objects.nonNull(input.getFlowRef())) {
final FlowId flowId = input.getFlowRef().getValue().firstKeyOf(Flow.class, FlowKey.class).getId();
flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
- deviceContext.getDeviceFlowRegistry().store(flowRegistryKey, flowDescriptor);
+ deviceContext.getDeviceFlowRegistry().storeDescriptor(flowRegistryKey, flowDescriptor);
} else {
- final FlowId flowId = deviceContext.getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey);
- flowDescriptor = FlowDescriptorFactory.create(input.getTableId(), flowId);
+ deviceContext.getDeviceFlowRegistry().store(flowRegistryKey);
+ flowDescriptor = deviceContext.getDeviceFlowRegistry().retrieveDescriptor(flowRegistryKey);
}
if (LOG.isDebugEnabled()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Flow remove finished without error for flow={}", input);
}
- FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(input);
- deviceContext.getDeviceFlowRegistry().removeDescriptor(flowRegistryKey);
+ FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(deviceContext.getDeviceInfo().getVersion(), input);
+ deviceContext.getDeviceFlowRegistry().addMark(flowRegistryKey);
if (itemLifecycleListener != null) {
- final FlowDescriptor flowDescriptor =
- deviceContext.getDeviceFlowRegistry().retrieveIdForFlow(flowRegistryKey);
+ final FlowDescriptor flowDescriptor = deviceContext.getDeviceFlowRegistry().retrieveDescriptor(flowRegistryKey);
+
if (flowDescriptor != null) {
KeyedInstanceIdentifier<Flow, FlowKey> flowPath = createFlowPath(flowDescriptor,
deviceContext.getDeviceInfo().getNodeInstanceIdentifier());
final UpdatedFlow updated = input.getUpdatedFlow();
final OriginalFlow original = input.getOriginalFlow();
- final FlowRegistryKey origFlowRegistryKey = FlowRegistryKeyFactory.create(original);
- final FlowRegistryKey updatedFlowRegistryKey = FlowRegistryKeyFactory.create(updated);
- final FlowDescriptor origFlowDescriptor = deviceFlowRegistry.retrieveIdForFlow(origFlowRegistryKey);
+ final FlowRegistryKey origFlowRegistryKey = FlowRegistryKeyFactory.create(deviceContext.getDeviceInfo().getVersion(), original);
+ final FlowRegistryKey updatedFlowRegistryKey = FlowRegistryKeyFactory.create(deviceContext.getDeviceInfo().getVersion(), updated);
+ final FlowDescriptor origFlowDescriptor = deviceFlowRegistry.retrieveDescriptor(origFlowRegistryKey);
final boolean isUpdate = Objects.nonNull(origFlowDescriptor);
- final FlowId fLowId = Objects.nonNull(input.getFlowRef())
- ? input.getFlowRef().getValue().firstKeyOf(Flow.class).getId()
- : isUpdate ? origFlowDescriptor.getFlowId() : deviceFlowRegistry.storeIfNecessary(updatedFlowRegistryKey);
- final FlowDescriptor updatedFlowDescriptor = FlowDescriptorFactory.create(updated.getTableId(), fLowId);
+ final FlowDescriptor updatedFlowDescriptor;
+
+ if (Objects.nonNull(input.getFlowRef())) {
+ updatedFlowDescriptor = FlowDescriptorFactory.create(updated.getTableId(), input.getFlowRef().getValue().firstKeyOf(Flow.class).getId());
+ } else {
+ if (isUpdate) {
+ updatedFlowDescriptor = origFlowDescriptor;
+ } else {
+ deviceFlowRegistry.store(updatedFlowRegistryKey);
+ updatedFlowDescriptor = deviceFlowRegistry.retrieveDescriptor(updatedFlowRegistryKey);
+ }
+ }
+
if (isUpdate) {
- deviceFlowRegistry.removeDescriptor(origFlowRegistryKey);
- deviceFlowRegistry.store(updatedFlowRegistryKey, updatedFlowDescriptor);
+ deviceFlowRegistry.addMark(origFlowRegistryKey);
+ deviceFlowRegistry.storeDescriptor(updatedFlowRegistryKey, updatedFlowDescriptor);
}
if (itemLifecycleListener != null) {
LOG.warn("Service call for updating flow={} failed, reason: {}", input, throwable);
}
}
-}
\ No newline at end of file
+}
if (LOG.isDebugEnabled()) {
LOG.debug("Group remove with id={} finished without error", input.getGroupId().getValue());
}
- removeGroup.getDeviceRegistry().getDeviceGroupRegistry().markToBeremoved(input.getGroupId());
+ removeGroup.getDeviceRegistry().getDeviceGroupRegistry().addMark(input.getGroupId());
removeIfNecessaryFromDS(input.getGroupId());
} else {
if (LOG.isDebugEnabled()) {
@Override
public Future<RpcResult<RemoveMeterOutput>> removeMeter(final RemoveMeterInput input) {
- removeMeter.getDeviceRegistry().getDeviceMeterRegistry().markToBeremoved(input.getMeterId());
final ListenableFuture<RpcResult<RemoveMeterOutput>> resultFuture = removeMeter.handleServiceCall(input);
Futures.addCallback(resultFuture, new FutureCallback<RpcResult<RemoveMeterOutput>>() {
@Override
if (LOG.isDebugEnabled()) {
LOG.debug("Meter remove with id={} finished without error", input.getMeterId());
}
+ removeMeter.getDeviceRegistry().getDeviceMeterRegistry().addMark(input.getMeterId());
removeIfNecessaryFromDS(input.getMeterId());
} else {
if (LOG.isDebugEnabled()) {
private static KeyedInstanceIdentifier<org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter, MeterKey> createMeterPath(final MeterId meterId, final KeyedInstanceIdentifier<Node, NodeKey> nodePath) {
return nodePath.augmentation(FlowCapableNode.class).child(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter.class, new MeterKey(meterId));
}
-}
\ No newline at end of file
+}
}
} else {
try {
- stopClusterServices(true).get();
+ stopClusterServices().get();
} catch (Exception e) {
LOG.debug("Failed to close StatisticsContext for node {} with exception: ", getDeviceInfo().getLOGValue(), e);
}
case RIP:
final String errMsg = String.format("Device connection doesn't exist anymore. Primary connection status : %s",
deviceContext.getPrimaryConnectionContext().getConnectionState());
- resultingFuture = Futures.immediateFailedFuture(new Throwable(errMsg));
+ resultingFuture = Futures.immediateFailedFuture(new ConnectionException(errMsg));
break;
default:
resultingFuture = Futures.immediateCheckedFuture(Boolean.TRUE);
}
@Override
- public ListenableFuture<Void> stopClusterServices(boolean connectionInterrupted) {
+ public ListenableFuture<Void> stopClusterServices() {
if (CONTEXT_STATE.TERMINATION.equals(getState())) {
return Futures.immediateCancelledFuture();
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Objects;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceRegistry;
import org.opendaylight.openflowplugin.api.openflow.device.TxFacade;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
+import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
import org.opendaylight.openflowplugin.api.openflow.registry.group.DeviceGroupRegistry;
import org.opendaylight.openflowplugin.api.openflow.registry.meter.DeviceMeterRegistry;
writeFlowStatistics(data, deviceInfo, flowRegistry, txFacade);
txFacade.submitTransaction();
EventsTimeCounter.markEnd(eventIdentifier);
+ flowRegistry.processMarks();
return Boolean.TRUE;
});
}
refineFlowStatisticsAugmentation(flowStat).build());
final short tableId = flowStat.getTableId();
- final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(flowBuilder.build());
- final FlowId flowId = registry.storeIfNecessary(flowRegistryKey);
-
- final FlowKey flowKey = new FlowKey(flowId);
- flowBuilder.setKey(flowKey);
- final TableKey tableKey = new TableKey(tableId);
- final InstanceIdentifier<Flow> flowIdent
- = fNodeIdent.child(Table.class, tableKey).child(Flow.class, flowKey);
- txFacade.writeToTransaction(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowBuilder.build());
+ registry.store(flowRegistryKey);
+ final FlowDescriptor flowDescriptor = registry.retrieveDescriptor(flowRegistryKey);
+
+ if (Objects.nonNull(flowDescriptor)) {
+ final FlowId flowId = flowDescriptor.getFlowId();
+
+ final FlowKey flowKey = new FlowKey(flowId);
+ flowBuilder.setKey(flowKey);
+ final TableKey tableKey = new TableKey(tableId);
+ final InstanceIdentifier<Flow> flowIdent
+ = fNodeIdent.child(Table.class, tableKey).child(Flow.class, flowKey);
+ txFacade.writeToTransaction(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ }
}
}
} catch (TransactionChainClosedException e) {
final DeviceMeterRegistry meterRegistry,
final InstanceIdentifier<FlowCapableNode> flowNodeIdent,
final TxFacade txFacade) throws TransactionChainClosedException {
- for (final MeterId meterId : meterRegistry.getAllMeterIds()) {
+ meterRegistry.forEach(meterId -> {
final InstanceIdentifier<Meter> meterIdent = flowNodeIdent.child(Meter.class, new MeterKey(meterId));
txFacade.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, meterIdent);
- }
- meterRegistry.removeMarked();
+ });
+ meterRegistry.processMarks();
}
private static void processGroupDescStats(
final TxFacade txFacade,
final InstanceIdentifier<FlowCapableNode> flowNodeIdent,
final DeviceGroupRegistry groupRegistry) throws TransactionChainClosedException {
- for (final GroupId groupId : groupRegistry.getAllGroupIds()) {
+ groupRegistry.forEach(groupId -> {
final InstanceIdentifier<Group> groupIdent = flowNodeIdent.child(Group.class, new GroupKey(groupId));
txFacade.addDeleteToTxChain(LogicalDatastoreType.OPERATIONAL, groupIdent);
- }
- groupRegistry.removeMarked();
+ });
+ groupRegistry.processMarks();
}
private static void processGroupStatistics(
private final ConcurrentMap<DeviceInfo, StatisticsContext> contexts = new ConcurrentHashMap<>();
- private static long basicTimerDelay;
- private static long currentTimerDelay;
- private static long maximumTimerDelay; //wait time for next statistics
+ private long basicTimerDelay;
+ private long currentTimerDelay;
+ private long maximumTimerDelay; //wait time for next statistics
private StatisticsWorkMode workMode = StatisticsWorkMode.COLLECTALL;
private final Semaphore workModeGuard = new Semaphore(1, true);
}
public StatisticsManagerImpl(final RpcProviderRegistry rpcProviderRegistry,
- final boolean isStatisticsPollingOn,
final HashedWheelTimer hashedWheelTimer,
- final ConvertorExecutor convertorExecutor,
- final long basicTimerDelay,
- final long maximumTimerDelay) {
+ final ConvertorExecutor convertorExecutor) {
Preconditions.checkArgument(rpcProviderRegistry != null);
this.converterExecutor = convertorExecutor;
this.controlServiceRegistration = Preconditions.checkNotNull(
rpcProviderRegistry.addRpcImplementation(StatisticsManagerControlService.class, this)
);
- this.isStatisticsPollingOn = isStatisticsPollingOn;
- this.basicTimerDelay = basicTimerDelay;
- this.currentTimerDelay = basicTimerDelay;
- this.maximumTimerDelay = maximumTimerDelay;
this.hashedWheelTimer = hashedWheelTimer;
}
converterExecutor,
this);
+ // Clean up stale context if present
+ StatisticsContext staleContext = contexts.remove(deviceInfo);
+ if (staleContext != null){
+ LOG.warn("Previous statistics context for node {} was not closed, closing the context.", deviceInfo);
+ staleContext.close();
+ }
+
Verify.verify(
contexts.putIfAbsent(deviceInfo, statisticsContext) == null,
"StatisticsCtx still not closed for Node {}", deviceInfo.getLOGValue()
);
-
lifecycleService.setStatContext(statisticsContext);
lifecycleService.registerDeviceRemovedHandler(this);
deviceInitPhaseHandler.onDeviceContextLevelUp(deviceInfo, lifecycleService);
}
@VisibleForTesting
- static long getCurrentTimerDelay() {
+ long getCurrentTimerDelay() {
return currentTimerDelay;
}
@Override
public void setBasicTimerDelay(final long basicTimerDelay) {
this.basicTimerDelay = basicTimerDelay;
+ this.currentTimerDelay = basicTimerDelay;
}
@Override
package org.opendaylight.openflowplugin.impl.statistics.services.direct;
-import com.google.common.base.Function;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.Futures;
*/
public abstract class AbstractDirectStatisticsService<I extends StoreStatsGrouping, O> extends AbstractMultipartService<I> {
- private final Function<RpcResult<List<MultipartReply>>, RpcResult<O>> resultTransformFunction =
- new Function<RpcResult<List<MultipartReply>>, RpcResult<O>>() {
- @Nullable
- @Override
- public RpcResult<O> apply(@Nullable RpcResult<List<MultipartReply>> input) {
- Preconditions.checkNotNull(input);
- final O reply = buildReply(input.getResult(), input.isSuccessful());
- return RpcResultBuilder.success(reply).build();
- }
- };
-
private final AsyncFunction<RpcResult<O>, RpcResult<O>> resultStoreFunction =
new AsyncFunction<RpcResult<O>, RpcResult<O>>() {
@Nullable
*/
public Future<RpcResult<O>> handleAndReply(final I input) {
final ListenableFuture<RpcResult<List<MultipartReply>>> rpcReply = handleServiceCall(input);
- ListenableFuture<RpcResult<O>> rpcResult = Futures.transform(rpcReply, resultTransformFunction);
+ ListenableFuture<RpcResult<O>> rpcResult = Futures.transform(rpcReply, this::transformResult);
if (Boolean.TRUE.equals(input.isStoreStats())) {
rpcResult = Futures.transform(rpcResult, resultStoreFunction);
return rpcResult;
}
+ private RpcResult<O> transformResult(final RpcResult<List<MultipartReply>> input) {
+ return Preconditions.checkNotNull(input).isSuccessful()
+ ? RpcResultBuilder.success(buildReply(input.getResult(), input.isSuccessful())).build()
+ : RpcResultBuilder.<O>failed().withRpcErrors(input.getErrors()).build();
+ }
+
@Override
protected OfHeader buildRequest(Xid xid, I input) throws ServiceException {
return RequestInputUtils.createMultipartHeader(multipartType, xid.getValue(), getVersion())
import org.opendaylight.openflowplugin.impl.registry.flow.FlowRegistryKeyFactory;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.ConvertorExecutor;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.FlowStatsResponseConvertorData;
-import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.data.VersionDatapathIdConvertorData;
import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.match.MatchReactor;
import org.opendaylight.yang.gen.v1.urn.opendaylight.direct.statistics.rev160511.GetFlowStatisticsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.direct.statistics.rev160511.GetFlowStatisticsOutput;
final FlowBuilder flowBuilder = new FlowBuilder(flowStatistics)
.addAugmentation(FlowStatisticsData.class, flowStatisticsDataBld.build());
- final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(flowBuilder.build());
- return getDeviceRegistry().getDeviceFlowRegistry().storeIfNecessary(flowRegistryKey);
+ final FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(getVersion(), flowBuilder.build());
+ getDeviceRegistry().getDeviceFlowRegistry().store(flowRegistryKey);
+ return getDeviceRegistry().getDeviceFlowRegistry().retrieveDescriptor(flowRegistryKey).getFlowId();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Objects;
+import javax.annotation.Nullable;
+import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
+import org.opendaylight.openflowplugin.openflow.md.core.sal.convertor.common.IpConversionUtil;
+import org.opendaylight.openflowplugin.openflow.md.util.InventoryDataServiceUtil;
+import org.opendaylight.openflowplugin.openflow.md.util.OpenflowPortsUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.IetfInetUtil;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Address;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Address;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.DottedQuad;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.MacAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.opendaylight.ipv6.arbitrary.bitmask.fields.rev160224.Ipv6ArbitraryMask;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class used for converting OpenFlow port numbers, Ipv4 and Ipv6 addresses to normalized format.
+ */
+public class AddressNormalizationUtil {
+ private static final Logger LOG = LoggerFactory.getLogger(AddressNormalizationUtil.class);
+
+ private static final String NO_ETH_MASK = "ff:ff:ff:ff:ff:ff";
+ private static final String PREFIX_SEPARATOR = "/";
+
+ /**
+ * Extract port number from URI and convert it to OpenFlow specific textual representation.
+ *
+ * @param port the OpenFlow port
+ * @param protocolVersion the OpenFLow protocol version
+ * @return normalized uri
+ */
+ @Nullable
+ public static Uri normalizeProtocolAgnosticPort(@Nullable final Uri port, final short protocolVersion) {
+ if (Objects.isNull(port)) {
+ return null;
+ }
+
+ return OpenflowPortsUtil.getProtocolAgnosticPortUri(protocolVersion, InventoryDataServiceUtil
+ .portNumberfromNodeConnectorId(OpenflowVersion.get(protocolVersion), port.getValue()));
+ }
+
+ /**
+ * Normalize Ipv6 address with prefix mask (ex. 1234:5678:9ABC::/76) and apply prefix mask to Ipv6 address.
+ *
+ * @param ipv6Prefix the Ipv6 prefix
+ * @return normalized Ipv6 prefix
+ */
+ @Nullable
+ public static Ipv6Prefix normalizeIpv6Prefix(@Nullable final Ipv6Prefix ipv6Prefix) {
+ if (Objects.isNull(ipv6Prefix)) {
+ return null;
+ }
+
+ final byte[] address = IetfInetUtil.INSTANCE.ipv6AddressBytes(IpConversionUtil.extractIpv6Address(ipv6Prefix));
+ final byte[] mask = IpConversionUtil.convertIpv6PrefixToByteArray(IpConversionUtil.extractIpv6Prefix(ipv6Prefix));
+ return normalizeIpv6Address(address, mask);
+ }
+
+ /**
+ * Normalize Ipv6 address and arbitrary mask and apply arbitrary mask to Ipv6 address.
+ *
+ * @param ipv6Address the Ipv4 address
+ * @param ipv4Mask the Ipv4 mask
+ * @return normalized Ipv6 prefix
+ */
+ @Nullable
+ public static Ipv6Prefix normalizeIpv6Arbitrary(@Nullable final Ipv6Address ipv6Address, @Nullable final Ipv6ArbitraryMask ipv4Mask) {
+ if (Objects.isNull(ipv6Address)) {
+ return null;
+ }
+
+ final byte[] address = IetfInetUtil.INSTANCE.ipv6AddressBytes(ipv6Address);
+ final byte[] mask = IpConversionUtil.convertIpv6ArbitraryMaskToByteArray(ipv4Mask);
+ return normalizeIpv6Address(address, mask);
+ }
+
+ /**
+ * Normalize ipv 6 address without mask.
+ *
+ * @param ipv6Address the Ipv6 address
+ * @return normalized Ipv6 address
+ */
+ @Nullable
+ public static Ipv6Address normalizeIpv6AddressWithoutMask(@Nullable final Ipv6Address ipv6Address) {
+ final Ipv6Prefix ipv6Prefix = normalizeIpv6Arbitrary(ipv6Address, null);
+ return Objects.nonNull(ipv6Prefix)
+ ? new Ipv6Address(ipv6Prefix.getValue().split(PREFIX_SEPARATOR)[0])
+ : null;
+ }
+
+ /**
+ * Normalize Ipv4 address with prefix mask (ex. 192.168.0.1/24) and apply prefix mask to Ipv4 address.
+ *
+ * @param ipv4Prefix the Ipv4 prefix
+ * @return normalized Ipv4 prefix
+ */
+ @Nullable
+ public static Ipv4Prefix normalizeIpv4Prefix(@Nullable final Ipv4Prefix ipv4Prefix) {
+ if (Objects.isNull(ipv4Prefix)) {
+ return null;
+ }
+
+ final byte[] address = IetfInetUtil.INSTANCE.ipv4AddressBytes(IpConversionUtil.extractIpv4Address(ipv4Prefix));
+ final byte[] mask = IpConversionUtil.convertArbitraryMaskToByteArray(IpConversionUtil.extractIpv4AddressMask(ipv4Prefix));
+ return normalizeIpv4Address(address, mask);
+ }
+
+ /**
+ * Normalize Ipv4 address and arbitrary mask and apply arbitrary mask to Ipv4 address.
+ *
+ * @param ipv4Address the Ipv4 address
+ * @param ipv4Mask the Ipv4 mask
+ * @return normalized Ipv4 prefix
+ */
+ @Nullable
+ public static Ipv4Prefix normalizeIpv4Arbitrary(@Nullable final Ipv4Address ipv4Address, @Nullable final DottedQuad ipv4Mask) {
+ if (Objects.isNull(ipv4Address)) {
+ return null;
+ }
+
+ final byte[] address = IetfInetUtil.INSTANCE.ipv4AddressBytes(ipv4Address);
+ final byte[] mask = IpConversionUtil.convertArbitraryMaskToByteArray(ipv4Mask);
+ return normalizeIpv4Address(address, mask);
+ }
+
+ /**
+ * Normalize Ipv4 address and arbitrary mask in byte array format and apply arbitrary mask to Ipv4 address.
+ *
+ * @param address Ipv4 address byte array
+ * @param mask Ipv4 mask byte array
+ * @return normalized Ipv4 prefix
+ */
+ @Nullable
+ public static Ipv4Prefix normalizeIpv4Address(@Nullable final byte[] address, @Nullable final byte[] mask) {
+ final String addressPrefix = normalizeInetAddressWithMask(normalizeIpAddress(address, mask), mask);
+
+ if (Objects.isNull(addressPrefix)) {
+ return null;
+ }
+
+ return new Ipv4Prefix(addressPrefix);
+ }
+
+
+ /**
+ * Normalize Ipv6 address and arbitrary mask in byte array format and apply arbitrary mask to Ipv6 address.
+ *
+ * @param address Ipv6 address byte array
+ * @param mask Ipv6 mask byte array
+ * @return normalized Ipv6 prefix
+ */
+ @Nullable
+ public static Ipv6Prefix normalizeIpv6Address(@Nullable final byte[] address, @Nullable final byte[] mask) {
+ final String addressPrefix = normalizeInetAddressWithMask(normalizeIpAddress(address, mask), mask);
+
+ if (Objects.isNull(addressPrefix)) {
+ return null;
+ }
+
+ return new Ipv6Prefix(addressPrefix);
+ }
+
+ /**
+ * Normalize generic IP address and arbitrary mask in byte array format and apply arbitrary mask to IP address.
+ *
+ * @param address address byte array
+ * @param mask mask byte array
+ * @return normalized Inet address
+ */
+ @Nullable
+ public static InetAddress normalizeIpAddress(@Nullable final byte[] address, @Nullable final byte[] mask) {
+ if (Objects.isNull(address)) {
+ return null;
+ }
+
+ final byte[] result = new byte[address.length];
+
+ for (int i = 0; i < address.length; i++) {
+ result[i] = Objects.nonNull(mask) ?
+ (byte) (address[i] & mask[i]) :
+ address[i];
+ }
+
+ try {
+ return InetAddress.getByAddress(result);
+ } catch (UnknownHostException e) {
+ LOG.warn("Failed to recognize the host while normalizing IP address from bytes ", e);
+ return null;
+ }
+ }
+
+ /**
+ * Convert arbitrary mask to prefix mask and append it to textual representation of Inet address
+ *
+ * @param address the address
+ * @param mask the mask
+ * @return the string
+ */
+ @Nullable
+ public static String normalizeInetAddressWithMask(@Nullable final InetAddress address, @Nullable final byte[] mask) {
+ if (Objects.isNull(address)) {
+ return null;
+ }
+
+ return address.getHostAddress() +
+ (Objects.nonNull(mask)
+ ? PREFIX_SEPARATOR + String.valueOf(IpConversionUtil.countBits(mask))
+ : "");
+ }
+
+ /**
+ * Convert MAC address to it's lower case format
+ *
+ * @param macAddress the MAC address
+ * @return normalized MAC address
+ */
+ @Nullable
+ public static MacAddress normalizeMacAddress(@Nullable final MacAddress macAddress) {
+ if (Objects.isNull(macAddress)) {
+ return null;
+ }
+
+ return new MacAddress(macAddress.getValue().toLowerCase());
+ }
+
+ /**
+ * Convert MAC address mask to it's lower case format and if it is full F mask, return null
+ *
+ * @param macAddress the MAC address
+ * @return normalized MAC address
+ */
+ @Nullable
+ public static MacAddress normalizeMacAddressMask(@Nullable final MacAddress macAddress) {
+ final MacAddress normalizedMacAddress = normalizeMacAddress(macAddress);
+
+ if (Objects.isNull(normalizedMacAddress)) {
+ return null;
+ }
+
+ if (NO_ETH_MASK.equals(normalizedMacAddress.getValue())) {
+ return null;
+ }
+
+ return normalizedMacAddress;
+ }
+
+}
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
import org.opendaylight.openflowplugin.api.ConnectionException;
final Capabilities capabilities = connectionContext.getFeatures().getCapabilities();
LOG.debug("Setting capabilities for device {}", deviceInfo.getNodeId());
DeviceStateUtil.setDeviceStateBasedOnV13Capabilities(deviceState, capabilities);
- createDeviceFeaturesForOF13(deviceContext, switchFeaturesMandatory, convertorExecutor).get();
+ try {
+ // Collect device feature
+ if (createDeviceFeaturesForOF13(
+ deviceContext, switchFeaturesMandatory, convertorExecutor).get(30,TimeUnit.SECONDS) == null){
+ LOG.warn("Device features are empty for node {}, returning an unexpected exception.", deviceInfo
+ .getLOGValue());
+ throw new ExecutionException(new Exception("Device features were not retrieved."));
+ }
+ }catch (TimeoutException e){
+ LOG.warn("Timeout occurred while retrieving features for node {}.", deviceInfo.getLOGValue());
+ throw new ExecutionException(new TimeoutException("Device features were not retrieved in time"));
+ }
} else {
throw new ExecutionException(new ConnectionException("Unsupported version " + version));
}
+++ /dev/null
-/**
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.openflowplugin.impl.util;
-
-/**
- * 4B base + 4B mask wrapper
- */
-public class IntegerIpAddress {
-
- int ip;
- int mask;
-
- public IntegerIpAddress(final int ip, final int mask) {
- this.ip = ip;
- this.mask = mask;
- }
-
- public int getIp() {
- return ip;
- }
-
- public int getMask() {
- return mask;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 IBM Corporation and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.openflowplugin.impl.util;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
-
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- * Provides comparator for comparing according to various {@link Match} attributes
- *
- */
-public final class MatchComparatorFactory {
-
- private MatchComparatorFactory() {
- // NOOP
- }
-
- private static final Collection<SimpleComparator<Match>> MATCH_COMPARATORS = new ArrayList<>();
- static {
- MATCH_COMPARATORS.add(MatchComparatorFactory.createEthernet());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createIcmpv4());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createInPhyPort());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createInPort());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createIp());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createL3());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createL4());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createProtocolMatchFields());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createMetadata());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createNull());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createTunnel());
- MATCH_COMPARATORS.add(MatchComparatorFactory.createVlan());
- }
-
- public static SimpleComparator<Match> createNull() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by whole object
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- return (statsMatch == null) == (storedMatch == null);
- }
- };
- }
-
- public static SimpleComparator<Match> createVlan() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by VLAN
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getVlanMatch() == null) {
- if (statsMatch.getVlanMatch() != null) {
- return false;
- }
- } else if (!storedMatch.getVlanMatch().equals(statsMatch.getVlanMatch())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createTunnel() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by tunnel
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getTunnel() == null) {
- if (statsMatch.getTunnel() != null) {
- return false;
- }
- } else if (!storedMatch.getTunnel().equals(statsMatch.getTunnel())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createProtocolMatchFields() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by protocol fields
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getProtocolMatchFields() == null) {
- if (statsMatch.getProtocolMatchFields() != null) {
- return false;
- }
- } else if (!storedMatch.getProtocolMatchFields().equals(statsMatch.getProtocolMatchFields())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createMetadata() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by metadata
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getMetadata() == null) {
- if (statsMatch.getMetadata() != null) {
- return false;
- }
- } else if (!storedMatch.getMetadata().equals(statsMatch.getMetadata())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createL4() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by layer4
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getLayer4Match() == null) {
- if (statsMatch.getLayer4Match() != null) {
- return false;
- }
- } else if (!storedMatch.getLayer4Match().equals(statsMatch.getLayer4Match())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createL3() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by layer3
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getLayer3Match() == null) {
- if (statsMatch.getLayer3Match() != null) {
- return false;
- }
- } else if (!MatchComparatorHelper.layer3MatchEquals(statsMatch.getLayer3Match(), storedMatch.getLayer3Match())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createIp() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by Ip
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getIpMatch() == null) {
- if (statsMatch.getIpMatch() != null) {
- return false;
- }
- } else if (!storedMatch.getIpMatch().equals(statsMatch.getIpMatch())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createInPort() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by InPort
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getInPort() == null) {
- if (statsMatch.getInPort() != null) {
- return false;
- }
- } else if (!storedMatch.getInPort().equals(statsMatch.getInPort())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createInPhyPort() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by InPhyPort
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getInPhyPort() == null) {
- if (statsMatch.getInPhyPort() != null) {
- return false;
- }
- } else if (!storedMatch.getInPhyPort().equals(statsMatch.getInPhyPort())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createEthernet() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by Ethernet
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getEthernetMatch() == null) {
- if (statsMatch.getEthernetMatch() != null) {
- return false;
- }
- } else if (!MatchComparatorHelper.ethernetMatchEquals(statsMatch.getEthernetMatch(), storedMatch.getEthernetMatch())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createIcmpv4() {
- return new SimpleComparator<Match>() {
- /**
- * Comparation by Icmpv4
- */
- @Override
- public boolean areObjectsEqual(Match statsMatch, Match storedMatch) {
- if (storedMatch == null) {
- return false;
- }
- if (storedMatch.getIcmpv4Match() == null) {
- if (statsMatch.getIcmpv4Match() != null) {
- return false;
- }
- } else if (!storedMatch.getIcmpv4Match().equals(statsMatch.getIcmpv4Match())) {
- return false;
- }
- return true;
- }
- };
- }
-
- public static SimpleComparator<Match> createMatch() {
- return new SimpleComparator<Match>() {
- /**
- * Compares flows by whole match
- */
- @Override
- public boolean areObjectsEqual(final Match statsFlow, final Match storedFlow) {
- if (statsFlow == null) {
- if (storedFlow != null) {
- return false;
- }
- } else if (!compareMatches(statsFlow, storedFlow)) {
- return false;
- }
- return true;
- }
- };
- }
-
-
- /**
- * Explicit equals method to compare the 'match' for flows stored in the data-stores and flow fetched from the switch.
- * Flow installation process has three steps
- * 1) Store flow in config data store
- * 2) and send it to plugin for installation
- * 3) Flow gets installed in switch
- *
- * The flow user wants to install and what finally gets installed in switch can be slightly different.
- * E.g, If user installs flow with src/dst ip=10.0.0.1/24, when it get installed in the switch
- * src/dst ip will be changes to 10.0.0.0/24 because of netmask of 24. When statistics manager fetch
- * stats it gets 10.0.0.0/24 rather then 10.0.0.1/24. Custom match takes care of by using masked ip
- * while comparing two ip addresses.
- *
- * Sometimes when user don't provide few values that is required by flow installation request, like
- * priority,hard timeout, idle timeout, cookies etc, plugin usages default values before sending
- * request to the switch. So when statistics manager gets flow statistics, it gets the default value.
- * But the flow stored in config data store don't have those defaults value. I included those checks
- * in the customer flow/match equal function.
- *
- *
- * @param statsMatch
- * @param storedMatch
- * @return
- */
- private static boolean compareMatches(final Match statsMatch, final Match storedMatch) {
- if (statsMatch == storedMatch) {
- return true;
- }
-
- for (SimpleComparator<Match> matchComp : MATCH_COMPARATORS) {
- if (!matchComp.areObjectsEqual(statsMatch, storedMatch)) {
- return false;
- }
- }
- return true;
- }
-}
+++ /dev/null
-/**
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.openflowplugin.impl.util;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.net.InetAddresses;
-import com.google.common.primitives.UnsignedBytes;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Address;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Prefix;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Address;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Prefix;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.DottedQuad;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.MacAddressFilter;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.Layer3Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.ArpMatch;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchArbitraryBitMask;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6Match;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6MatchArbitraryBitMask;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.opendaylight.ipv6.arbitrary.bitmask.fields.rev160224.Ipv6ArbitraryMask;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.math.BigInteger;
-import java.net.Inet4Address;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-/**
- * @author joe
- * @author sai.marapareddy@gmail.com
- *
- */
-public class MatchComparatorHelper {
-
- private static final Logger LOG = LoggerFactory.getLogger(MatchComparatorHelper.class);
- private static final int DEFAULT_SUBNET = 32;
- private static final int IPV4_MASK_LENGTH = 32;
- private static final int SHIFT_OCTET_1 = 24;
- private static final int SHIFT_OCTET_2 = 16;
- private static final int SHIFT_OCTET_3 = 8;
- private static final int SHIFT_OCTET_4 = 0;
- private static final int POSITION_OCTET_1 = 0;
- private static final int POSITION_OCTET_2 = 1;
- private static final int POSITION_OCTET_3 = 2;
- private static final int POSITION_OCTET_4 = 3;
- private static final String DEFAULT_ARBITRARY_BIT_MASK = "255.255.255.255";
- private static final String DEFAULT_IPV6_ARBITRARY_BIT_MASK = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff";
- private static final String PREFIX_SEPARATOR = "/";
- private static final int IPV4_ADDRESS_LENGTH = 32;
- private static final int IPV6_ADDRESS_LENGTH = 128;
- private static final int BYTE_SIZE = 8;
-
- /*
- * Custom EthernetMatch is required because mac address string provided by user in EthernetMatch can be in any case
- * (upper or lower or mix). Ethernet Match which controller receives from switch is always an upper case string.
- * Default EthernetMatch equals doesn't use equalsIgnoreCase() and hence it fails. E.g User provided mac address
- * string in flow match is aa:bb:cc:dd:ee:ff and when controller fetch statistic data, openflow driver library
- * returns AA:BB:CC:DD:EE:FF and default eqauls fails here.
- */
- @VisibleForTesting
- static boolean ethernetMatchEquals(final EthernetMatch statsEthernetMatch, final EthernetMatch storedEthernetMatch) {
- boolean verdict = true;
- final Boolean checkNullValues = checkNullValues(statsEthernetMatch, storedEthernetMatch);
- if (checkNullValues != null) {
- verdict = checkNullValues;
- } else {
- verdict = ethernetMatchFieldsEquals(statsEthernetMatch.getEthernetSource(),
- storedEthernetMatch.getEthernetSource());
- if (verdict) {
- verdict = ethernetMatchFieldsEquals(statsEthernetMatch.getEthernetDestination(),
- storedEthernetMatch.getEthernetDestination());
- }
- if (verdict) {
- if (statsEthernetMatch.getEthernetType() == null) {
- if (storedEthernetMatch.getEthernetType() != null) {
- verdict = false;
- }
- } else {
- verdict = statsEthernetMatch.getEthernetType().equals(storedEthernetMatch.getEthernetType());
- }
- }
- }
- return verdict;
- }
-
- static boolean ethernetMatchFieldsEquals(final MacAddressFilter statsEthernetMatchFields,
- final MacAddressFilter storedEthernetMatchFields) {
- boolean verdict = true;
- final Boolean checkNullValues = checkNullValues(statsEthernetMatchFields, storedEthernetMatchFields);
- if (checkNullValues != null) {
- verdict = checkNullValues;
- } else {
- verdict = macAddressEquals(statsEthernetMatchFields.getAddress(), storedEthernetMatchFields.getAddress());
- if (verdict) {
- verdict = macAddressEquals(statsEthernetMatchFields.getMask(), storedEthernetMatchFields.getMask());
- }
- }
- return verdict;
- }
-
- static boolean macAddressEquals(final MacAddress statsMacAddress, final MacAddress storedMacAddress) {
- boolean verdict = true;
- final Boolean checkNullValues = checkNullValues(statsMacAddress, storedMacAddress);
- if (checkNullValues != null) {
- verdict = checkNullValues;
- } else {
- verdict = statsMacAddress.getValue().equalsIgnoreCase(storedMacAddress.getValue());
- }
- return verdict;
- }
-
- @VisibleForTesting
- static boolean layer3MatchEquals(final Layer3Match statsLayer3Match, final Layer3Match storedLayer3Match) {
- boolean verdict = true;
- if (statsLayer3Match instanceof Ipv4Match && storedLayer3Match instanceof Ipv4Match) {
- final Ipv4Match statsIpv4Match = (Ipv4Match) statsLayer3Match;
- final Ipv4Match storedIpv4Match = (Ipv4Match) storedLayer3Match;
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(storedIpv4Match.getIpv4Destination(),
- statsIpv4Match.getIpv4Destination());
- if (verdict) {
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(statsIpv4Match.getIpv4Source(),
- storedIpv4Match.getIpv4Source());
- }
- } else if (statsLayer3Match instanceof Ipv6Match && storedLayer3Match instanceof Ipv6Match) {
- final Ipv6Match statsIpv6Match = (Ipv6Match) statsLayer3Match;
- final Ipv6Match storedIpv6Match = (Ipv6Match) storedLayer3Match;
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(storedIpv6Match.getIpv6Destination(),
- statsIpv6Match.getIpv6Destination());
- if (verdict) {
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(statsIpv6Match.getIpv6Source(),
- storedIpv6Match.getIpv6Source());
- }
- } else if (statsLayer3Match instanceof Ipv4MatchArbitraryBitMask && storedLayer3Match instanceof Ipv4MatchArbitraryBitMask) {
- // At this moment storedIpv4MatchArbitraryBitMask & statsIpv4MatchArbitraryBitMask will always have non null arbitrary masks.
- // In case of no / null arbitrary mask, statsLayer3Match will be an instance of Ipv4Match.
- // Eg:- stats -> 1.0.1.0/255.0.255.0 stored -> 1.1.1.0/255.0.255.0
- final Ipv4MatchArbitraryBitMask statsIpv4MatchArbitraryBitMask= (Ipv4MatchArbitraryBitMask) statsLayer3Match;
- final Ipv4MatchArbitraryBitMask storedIpv4MatchArbitraryBitMask = (Ipv4MatchArbitraryBitMask) storedLayer3Match;
- if ((storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask() != null |
- storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask() != null)) {
- if (storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask() != null) {
- String storedDstIpAddress = normalizeIpv4Address(storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask(),
- storedIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask());
- String statsDstIpAddress = normalizeIpv4Address(statsIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask(),
- statsIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask());
- if (MatchComparatorHelper.compareStringNullSafe(storedIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask().getValue(),
- statsIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask().getValue())) {
- verdict = MatchComparatorHelper.compareStringNullSafe(storedDstIpAddress,
- statsDstIpAddress);
- } else {
- verdict = false;
- return verdict;
- }
- }
- if (storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask() != null) {
- String storedSrcIpAddress = normalizeIpv4Address(storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask()
- ,storedIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask());
- String statsSrcIpAddress = normalizeIpv4Address(statsIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask()
- ,statsIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask());
- if (MatchComparatorHelper.compareStringNullSafe(storedIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask().getValue(),
- statsIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask().getValue())) {
- verdict = MatchComparatorHelper.compareStringNullSafe(storedSrcIpAddress,
- statsSrcIpAddress);
- } else {
- verdict = false;
- }
- }
- } else {
- final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
- if (nullCheckOut != null) {
- verdict = nullCheckOut;
- } else {
- verdict = storedLayer3Match.equals(statsLayer3Match);
- }
- }
- } else if (statsLayer3Match instanceof Ipv4Match && storedLayer3Match instanceof Ipv4MatchArbitraryBitMask) {
- // Here stored netmask is an instance of Ipv4MatchArbitraryBitMask, when it is pushed in to switch
- // it automatically converts it in to cidr format in case of certain subnet masks ( consecutive ones or zeroes)
- // Eg:- stats src/dest -> 1.1.1.0/24 stored src/dest -> 1.1.1.0/255.255.255.0
- final Ipv4Match statsIpv4Match = (Ipv4Match) statsLayer3Match;
- final Ipv4MatchArbitraryBitMask storedIpv4MatchArbitraryBitMask = (Ipv4MatchArbitraryBitMask) storedLayer3Match;
- if (storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask() != null) {
- Ipv4Prefix ipv4PrefixDestination;
- if (storedIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask() != null) {
- byte[] destByteMask = convertArbitraryMaskToByteArray(storedIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask());
- ipv4PrefixDestination = createPrefix(storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask(), destByteMask);
- } else {
- ipv4PrefixDestination = createPrefix(storedIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(ipv4PrefixDestination, statsIpv4Match.getIpv4Destination());
- if (verdict == false) {
- return verdict;
- }
- }
- if (storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask() != null) {
- Ipv4Prefix ipv4PrefixSource;
- if (storedIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask() != null) {
- byte[] srcByteMask = convertArbitraryMaskToByteArray(storedIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask());
- ipv4PrefixSource = createPrefix(storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask(), srcByteMask);
- } else {
- ipv4PrefixSource = createPrefix(storedIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(ipv4PrefixSource, statsIpv4Match.getIpv4Source());
- }
- } else if (statsLayer3Match instanceof Ipv4MatchArbitraryBitMask && storedLayer3Match instanceof Ipv4Match) {
- // Here stored netmask is an instance of Ipv4MatchArbitraryBitMask, when it is pushed in to switch
- // it automatically converts it in to cidr format in case of certain subnet masks ( consecutive ones or zeroes)
- // Eg:- stats src/dest -> 1.1.1.0/24 stored src/dest -> 1.1.1.0/255.255.255.0
- final Ipv4Match storedIpv4Match = (Ipv4Match) storedLayer3Match;
- final Ipv4MatchArbitraryBitMask statsIpv4MatchArbitraryBitMask = (Ipv4MatchArbitraryBitMask) statsLayer3Match;
- if (statsIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask() != null) {
- Ipv4Prefix ipv4PrefixDestination;
- if (statsIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask() != null) {
- byte[] destByteMask = convertArbitraryMaskToByteArray(statsIpv4MatchArbitraryBitMask.getIpv4DestinationArbitraryBitmask());
- ipv4PrefixDestination = createPrefix(statsIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask(), destByteMask);
- } else {
- ipv4PrefixDestination = createPrefix(statsIpv4MatchArbitraryBitMask.getIpv4DestinationAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(ipv4PrefixDestination, storedIpv4Match.getIpv4Destination());
- if (verdict == false) {
- return verdict;
- }
- }
- if (statsIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask() != null) {
- Ipv4Prefix ipv4PrefixSource;
- if (statsIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask() != null) {
- byte[] srcByteMask = convertArbitraryMaskToByteArray(statsIpv4MatchArbitraryBitMask.getIpv4SourceArbitraryBitmask());
- ipv4PrefixSource = createPrefix(statsIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask(), srcByteMask);
- } else {
- ipv4PrefixSource = createPrefix(statsIpv4MatchArbitraryBitMask.getIpv4SourceAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv4PrefixNullSafe(ipv4PrefixSource, storedIpv4Match.getIpv4Source());
- }
- } else if (statsLayer3Match instanceof Ipv6MatchArbitraryBitMask && storedLayer3Match instanceof Ipv6MatchArbitraryBitMask) {
- // At this moment storedIpv6MatchArbitraryBitMask & statsIpv6MatchArbitraryBitMask will always have non null arbitrary masks.
- // In case of no / null arbitrary mask, statsLayer3Match will be an instance of Ipv6Match.
- // Eg:- stats src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/FFFF:FFFF:FFFF:FFFF:0000:FFFF:FFFF:FFF0
- // stored src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/FFFF:FFFF:FFFF:FFFF:0000:FFFF:FFFF:FFF0
- final Ipv6MatchArbitraryBitMask statsIpv6MatchArbitraryBitMask= (Ipv6MatchArbitraryBitMask) statsLayer3Match;
- final Ipv6MatchArbitraryBitMask storedIpv6MatchArbitraryBitMask = (Ipv6MatchArbitraryBitMask) storedLayer3Match;
- if ((storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask() != null |
- storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask() != null)) {
- if (storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask() != null) {
- String storedDstIpAddress = normalizeIpv6Address(storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask(),
- storedIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask());
- String statsDstIpAddress = normalizeIpv6Address(statsIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask(),
- statsIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask());
- String storedDstMask = extractIpv6CanonicalForm(storedIpv6MatchArbitraryBitMask.
- getIpv6DestinationArbitraryBitmask().getValue()).getHostAddress();
- String statsDstMask = extractIpv6CanonicalForm(statsIpv6MatchArbitraryBitMask.
- getIpv6DestinationArbitraryBitmask().getValue()).getHostAddress();
- if (MatchComparatorHelper.compareStringNullSafe(storedDstMask,statsDstMask)) {
- verdict = MatchComparatorHelper.compareStringNullSafe(storedDstIpAddress,
- statsDstIpAddress);
- } else {
- verdict = false;
- return verdict;
- }
- }
- if (storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask() != null) {
- String storedSrcIpAddress = normalizeIpv6Address(storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask()
- ,storedIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask());
- String statsSrcIpAddress = normalizeIpv6Address(statsIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask()
- ,statsIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask());
- String storedSrcMask = extractIpv6CanonicalForm(storedIpv6MatchArbitraryBitMask.
- getIpv6SourceArbitraryBitmask().getValue()).getHostAddress();
- String statsSrcMask = extractIpv6CanonicalForm(statsIpv6MatchArbitraryBitMask.
- getIpv6SourceArbitraryBitmask().getValue()).getHostAddress();
- if (MatchComparatorHelper.compareStringNullSafe(storedSrcMask, statsSrcMask)) {
- verdict = MatchComparatorHelper.compareStringNullSafe(storedSrcIpAddress,
- statsSrcIpAddress);
- } else {
- verdict = false;
- }
- }
- } else {
- final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
- if (nullCheckOut != null) {
- verdict = nullCheckOut;
- } else {
- verdict = storedLayer3Match.equals(statsLayer3Match);
- }
- }
- } else if (statsLayer3Match instanceof Ipv6Match && storedLayer3Match instanceof Ipv6MatchArbitraryBitMask) {
- // Here stored netmask is an instance of Ipv6MatchArbitraryBitMask, when it is pushed in to switch
- // it automatically converts it in to cidr format in case of certain subnet masks ( consecutive ones or zeroes)
- // Eg:- stats src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/124
- // stored src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFF0
- final Ipv6Match statsIpv6Match = (Ipv6Match) statsLayer3Match;
- final Ipv6MatchArbitraryBitMask storedIpv6MatchArbitraryBitMask = (Ipv6MatchArbitraryBitMask) storedLayer3Match;
- if (storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask() != null) {
- Ipv6Prefix ipv6PrefixDestination;
- if (storedIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask() != null) {
- byte[] destByteMask = convertIpv6ArbitraryMaskToByteArray(storedIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask());
- ipv6PrefixDestination = createPrefix(storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask(), destByteMask);
- } else {
- ipv6PrefixDestination = createPrefix(storedIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(ipv6PrefixDestination, statsIpv6Match.getIpv6Destination());
- if (verdict == false) {
- return verdict;
- }
- }
- if (storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask() != null) {
- Ipv6Prefix ipv6PrefixSource;
- if (storedIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask() != null) {
- byte[] srcByteMask = convertIpv6ArbitraryMaskToByteArray(storedIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask());
- ipv6PrefixSource = createPrefix(storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask(), srcByteMask);
- } else {
- ipv6PrefixSource = createPrefix(storedIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(ipv6PrefixSource, statsIpv6Match.getIpv6Source());
- }
- } else if (statsLayer3Match instanceof Ipv6MatchArbitraryBitMask && storedLayer3Match instanceof Ipv6Match) {
- // Here stored netmask is an instance of Ipv6MatchArbitraryBitMask, when it is pushed in to switch
- // it automatically converts it in to cidr format in case of certain subnet masks ( consecutive ones or zeroes)
- // Eg:- stats src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/124
- // stored src/dest -> 2001:2001:2001:2001:2001:2001:2001:2001/FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFF0
- final Ipv6Match storedIpv6Match = (Ipv6Match) storedLayer3Match;
- final Ipv6MatchArbitraryBitMask statsIpv6MatchArbitraryBitMask = (Ipv6MatchArbitraryBitMask) statsLayer3Match;
- if (statsIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask() != null) {
- Ipv6Prefix ipv6PrefixDestination;
- if (statsIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask() != null) {
- byte[] destByteMask = convertIpv6ArbitraryMaskToByteArray(statsIpv6MatchArbitraryBitMask.getIpv6DestinationArbitraryBitmask());
- ipv6PrefixDestination = createPrefix(statsIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask(), destByteMask);
- } else {
- ipv6PrefixDestination = createPrefix(statsIpv6MatchArbitraryBitMask.getIpv6DestinationAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(ipv6PrefixDestination, storedIpv6Match.getIpv6Destination());
- if (verdict == false) {
- return verdict;
- }
- }
- if (statsIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask() != null) {
- Ipv6Prefix ipv6PrefixSource;
- if (statsIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask() != null) {
- byte[] srcByteMask = convertIpv6ArbitraryMaskToByteArray(statsIpv6MatchArbitraryBitMask.getIpv6SourceArbitraryBitmask());
- ipv6PrefixSource = createPrefix(statsIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask(), srcByteMask);
- } else {
- ipv6PrefixSource = createPrefix(statsIpv6MatchArbitraryBitMask.getIpv6SourceAddressNoMask());
- }
- verdict = MatchComparatorHelper.compareIpv6PrefixNullSafe(ipv6PrefixSource, storedIpv6Match.getIpv6Source());
- }
- } else if (statsLayer3Match instanceof ArpMatch && storedLayer3Match instanceof ArpMatch) {
- verdict = arpMatchEquals((ArpMatch)statsLayer3Match, (ArpMatch)storedLayer3Match);
- } else {
- final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
- if (nullCheckOut != null) {
- verdict = nullCheckOut;
- } else {
- verdict = storedLayer3Match.equals(statsLayer3Match);
- }
- }
- return verdict;
- }
-
- static boolean arpMatchEquals(final ArpMatch statsArpMatch, final ArpMatch storedArpMatch) {
-
- Integer statsOp = statsArpMatch.getArpOp();
- Integer storedOp = storedArpMatch.getArpOp();
-
- Boolean nullCheck = checkNullValues(statsOp, storedOp);
- if (nullCheck != null) {
- if (nullCheck == false) {
- return false;
- }
- } else if (!statsOp.equals(storedOp)) {
- return false;
- }
-
- Ipv4Prefix statsIp = statsArpMatch.getArpSourceTransportAddress();
- Ipv4Prefix storedIp = storedArpMatch.getArpSourceTransportAddress();
- if (!compareIpv4PrefixNullSafe(statsIp, storedIp)) {
- return false;
- }
-
- statsIp = statsArpMatch.getArpTargetTransportAddress();
- storedIp = storedArpMatch.getArpTargetTransportAddress();
- if (!compareIpv4PrefixNullSafe(statsIp, storedIp)) {
- return false;
- }
-
- MacAddressFilter statsMac = statsArpMatch.getArpSourceHardwareAddress();
- MacAddressFilter storedMac = storedArpMatch.getArpSourceHardwareAddress();
- if (!ethernetMatchFieldsEquals(statsMac, storedMac)) {
- return false;
- }
-
- statsMac = statsArpMatch.getArpTargetHardwareAddress();
- storedMac = storedArpMatch.getArpTargetHardwareAddress();
- if (!ethernetMatchFieldsEquals(statsMac, storedMac)) {
- return false;
- }
-
- return true;
- }
-
-
- /**
- * TODO: why don't we use the default Ipv4Prefix.equals()?
- *
- * @param statsIpAddress
- * @param storedIpAddress
- * @return true if IPv4prefixes equals
- */
- static boolean IpAddressEquals(final Ipv4Prefix statsIpAddress, final Ipv4Prefix storedIpAddress) {
- final IntegerIpAddress statsIpAddressInt = MatchComparatorHelper.strIpToIntIp(statsIpAddress.getValue());
- final IntegerIpAddress storedIpAddressInt = MatchComparatorHelper.strIpToIntIp(storedIpAddress.getValue());
-
- if (ipAndMaskBasedMatch(statsIpAddressInt, storedIpAddressInt)) {
- return true;
- }
- if (ipBasedMatch(statsIpAddressInt, storedIpAddressInt)) {
- return true;
- }
- return false;
- }
-
- static boolean ipAndMaskBasedMatch(final IntegerIpAddress statsIpAddressInt,
- final IntegerIpAddress storedIpAddressInt) {
- return ((statsIpAddressInt.getIp() & statsIpAddressInt.getMask()) == (storedIpAddressInt.getIp() & storedIpAddressInt
- .getMask()));
- }
-
- static boolean ipBasedMatch(final IntegerIpAddress statsIpAddressInt, final IntegerIpAddress storedIpAddressInt) {
- return (statsIpAddressInt.getIp() == storedIpAddressInt.getIp());
- }
-
-
- private static boolean IpAddressEquals(Ipv6Prefix statsIpv6, Ipv6Prefix storedIpv6) {
- final String[] statsIpMask = statsIpv6.getValue().split("/");
- final String[] storedIpMask = storedIpv6.getValue().split("/");
- if (! (statsIpMask.length > 1 && storedIpMask.length > 1 && statsIpMask[1].equals(storedIpMask[1]))){
- return false;
- }
-
- final int prefix = Integer.parseInt(statsIpMask[1]);
- final int byteIndex = prefix/BYTE_SIZE;
- final int lastByteBits = BYTE_SIZE - (prefix % BYTE_SIZE);
- final InetAddress statsIp = InetAddresses.forString(statsIpMask[0]);
- final InetAddress storedIp = InetAddresses.forString(storedIpMask[0]);
- byte[] statsIpArr = Arrays.copyOfRange(statsIp.getAddress(),0,byteIndex+1);
- byte[] storedIpArr = Arrays.copyOfRange(storedIp.getAddress(),0,byteIndex+1);
- statsIpArr[byteIndex] = (byte) (statsIpArr[byteIndex] & (0XFF << lastByteBits));
- storedIpArr[byteIndex] = (byte) (storedIpArr[byteIndex] & (0XFF << lastByteBits));
- if(Arrays.equals(statsIpArr,storedIpArr)) {
- return true;
- }
- return false;
- }
-
- static Boolean checkNullValues(final Object v1, final Object v2) {
- Boolean verdict = null;
- if (v1 == null && v2 != null) {
- verdict = Boolean.FALSE;
- } else if (v1 != null && v2 == null) {
- verdict = Boolean.FALSE;
- } else if (v1 == null && v2 == null) {
- verdict = Boolean.TRUE;
- }
- return verdict;
- }
-
- static boolean compareIpv4PrefixNullSafe(final Ipv4Prefix statsIpv4, final Ipv4Prefix storedIpv4) {
- boolean verdict = true;
- final Boolean checkDestNullValuesOut = checkNullValues(storedIpv4, statsIpv4);
- if (checkDestNullValuesOut != null) {
- verdict = checkDestNullValuesOut;
- } else if (!IpAddressEquals(statsIpv4, storedIpv4)) {
- verdict = false;
- }
- return verdict;
- }
-
- static boolean compareStringNullSafe(final String stringA, final String stringB) {
- boolean verdict = true;
- final Boolean checkDestNullValuesOut = checkNullValues(stringA,stringB);
- if (checkDestNullValuesOut != null) {
- verdict = checkDestNullValuesOut;
- } else if (!stringA.equals(stringB)) {
- verdict = false;
- }
- return verdict;
- }
-
- private static boolean compareIpv6PrefixNullSafe(Ipv6Prefix statsIpv6, Ipv6Prefix storedIpv6) {
- boolean verdict = true;
- final Boolean checkDestNullValuesOut = checkNullValues(statsIpv6, storedIpv6);
- if (checkDestNullValuesOut != null) {
- verdict = checkDestNullValuesOut;
- } else if (!IpAddressEquals(statsIpv6, storedIpv6)) {
- verdict = false;
- }
- return verdict;
- }
-
- /**
- * Method return integer version of ip address. Converted int will be mask if mask specified
- */
- static IntegerIpAddress strIpToIntIp(final String ipAddresss) {
-
- final String[] parts = ipAddresss.split("/");
- final String ip = parts[0];
- int prefix;
-
- if (parts.length < 2) {
- prefix = DEFAULT_SUBNET;
- } else {
- prefix = Integer.parseInt(parts[1]);
- if (prefix < 0 || prefix > IPV4_MASK_LENGTH) {
- final StringBuilder stringBuilder = new StringBuilder(
- "Valid values for mask are from range 0 - 32. Value ");
- stringBuilder.append(prefix);
- stringBuilder.append(" is invalid.");
- throw new IllegalStateException(stringBuilder.toString());
- }
- }
-
- IntegerIpAddress integerIpAddress = null;
-
- final Inet4Address addr = ((Inet4Address) InetAddresses.forString(ip));
- final byte[] addrBytes = addr.getAddress();
- // FIXME: what is meaning of anding with 0xFF? Probably could be removed.
- final int ipInt = ((addrBytes[POSITION_OCTET_1] & 0xFF) << SHIFT_OCTET_1)
- | ((addrBytes[POSITION_OCTET_2] & 0xFF) << SHIFT_OCTET_2)
- | ((addrBytes[POSITION_OCTET_3] & 0xFF) << SHIFT_OCTET_3)
- | ((addrBytes[POSITION_OCTET_4] & 0xFF) << SHIFT_OCTET_4);
-
- // FIXME: Is this valid?
- final int mask = 0xffffffff << DEFAULT_SUBNET - prefix;
-
- integerIpAddress = new IntegerIpAddress(ipInt, mask);
-
- return integerIpAddress;
- }
-
- static boolean isArbitraryBitMask(byte[] byteMask) {
- if (byteMask == null) {
- return false;
- } else {
- ArrayList<Integer> integerMaskArrayList = new ArrayList<Integer>();
- String maskInBits;
- // converting byte array to bits
- maskInBits = new BigInteger(1, byteMask).toString(2);
- ArrayList<String> stringMaskArrayList = new ArrayList<String>(Arrays.asList(maskInBits.split("(?!^)")));
- for(String string:stringMaskArrayList){
- integerMaskArrayList.add(Integer.parseInt(string));
- }
- return checkArbitraryBitMask(integerMaskArrayList);
- }
- }
-
- static boolean checkArbitraryBitMask(ArrayList<Integer> arrayList) {
- if (arrayList.size()>0 && arrayList.size()< IPV4_MASK_LENGTH ) {
- // checks 0*1* case - Leading zeros in arrayList are truncated
- return true;
- } else {
- //checks 1*0*1 case
- for(int i=0; i<arrayList.size()-1;i++) {
- if(arrayList.get(i) ==0 && arrayList.get(i+1) == 1) {
- return true;
- }
- }
- }
- return false;
- }
-
- static final byte[] convertArbitraryMaskToByteArray(DottedQuad mask) {
- String maskValue;
- if (mask.getValue() != null) {
- maskValue = mask.getValue();
- } else {
- maskValue = DEFAULT_ARBITRARY_BIT_MASK;
- }
- InetAddress maskInIpFormat = null;
- try {
- maskInIpFormat = InetAddress.getByName(maskValue);
- } catch (UnknownHostException e) {
- LOG.error("Failed to recognize the host while converting mask ", e);
- }
- byte[] bytes = maskInIpFormat.getAddress();
- return bytes;
- }
-
- private static final byte[] convertIpv6ArbitraryMaskToByteArray ( final Ipv6ArbitraryMask mask) {
- String maskValue;
- if (mask.getValue() != null) {
- maskValue = mask.getValue();
- } else {
- maskValue = DEFAULT_IPV6_ARBITRARY_BIT_MASK;
- }
- InetAddress maskInIpv6Format = null;
- try {
- maskInIpv6Format = InetAddress.getByName(maskValue);
- } catch (UnknownHostException e) {
- LOG.error("Failed to convert string mask value to ipv6 format ", e);
- }
- return maskInIpv6Format.getAddress();
- }
-
- static String normalizeIpv4Address(Ipv4Address ipAddress, DottedQuad netMask) {
- String actualIpAddress="";
- String[] netMaskParts = netMask.getValue().split("\\.");
- String[] ipAddressParts = ipAddress.getValue().split("\\.");
-
- for (int i=0; i<ipAddressParts.length;i++) {
- int integerFormatIpAddress=Integer.parseInt(ipAddressParts[i]);
- int integerFormatNetMask=Integer.parseInt(netMaskParts[i]);
- int ipAddressPart=(integerFormatIpAddress) & (integerFormatNetMask);
- actualIpAddress += ipAddressPart;
- if (i != ipAddressParts.length -1 ) {
- actualIpAddress = actualIpAddress+".";
- }
- }
- return actualIpAddress;
- }
-
- private static String normalizeIpv6Address(final Ipv6Address ipAddress, final Ipv6ArbitraryMask netMask) {
- byte[] ipAddressParts = convertIpv6ToBytes(ipAddress.getValue());
- byte[] netMaskParts = convertIpv6ToBytes(netMask.getValue());
- byte[] actualIpv6Bytes = new byte[16];
-
- for (int i=0; i<ipAddressParts.length;i++) {
- byte ipAddressPart= (byte) (ipAddressParts[i] & netMaskParts[i]);
- actualIpv6Bytes[i] = ipAddressPart;
- }
- InetAddress ipv6Address = null;
- try {
- ipv6Address = InetAddress.getByAddress(actualIpv6Bytes);
- } catch (UnknownHostException e) {
- LOG.error("Failed to recognize the host while normalizing IPv6 address from bytes ", e);
- }
- return ipv6Address.getHostAddress();
- }
-
- private static byte[] convertIpv6ToBytes(final String ipv6Address) {
- return extractIpv6CanonicalForm(ipv6Address).getAddress();
- }
-
- private static InetAddress extractIpv6CanonicalForm(final String ipv6Address) {
- InetAddress address = null;
- try {
- address = InetAddress.getByName(ipv6Address);
- } catch (UnknownHostException e) {
- LOG.error("Failed to recognize the host while converting IPv6 to bytes ", e);
- }
- return address;
- }
-
- static Ipv4Prefix createPrefix(final Ipv4Address ipv4Address, final byte [] bytemask){
- return createPrefix(ipv4Address, String.valueOf(countBits(bytemask)));
- }
-
- private static Ipv6Prefix createPrefix(final Ipv6Address ipv6Address, final byte [] bytemask) {
- return createPrefix(ipv6Address, String.valueOf(countBits(bytemask)));
- }
-
- private static Ipv6Prefix createPrefix(final Ipv6Address ipv6Address, final String mask) {
- if (mask != null && !mask.isEmpty()) {
- return new Ipv6Prefix(ipv6Address.getValue() + PREFIX_SEPARATOR + mask);
- } else {
- return new Ipv6Prefix(ipv6Address.getValue() + PREFIX_SEPARATOR + IPV6_ADDRESS_LENGTH);
- }
- }
-
- private static Ipv6Prefix createPrefix(final Ipv6Address ipv6Address) {
- return new Ipv6Prefix(ipv6Address.getValue() + PREFIX_SEPARATOR + IPV6_ADDRESS_LENGTH);
- }
-
- static int countBits(final byte[] mask) {
- int netmask = 0;
- for (byte b : mask) {
- netmask += Integer.bitCount(UnsignedBytes.toInt(b));
- }
- return netmask;
- }
-
- static Ipv4Prefix createPrefix(final Ipv4Address ipv4Address){
- return new Ipv4Prefix(ipv4Address.getValue() + PREFIX_SEPARATOR + IPV4_ADDRESS_LENGTH);
- }
-
- static Ipv4Prefix createPrefix(final Ipv4Address ipv4Address, final String mask){
- /*
- * Ipv4Address has already validated the address part of the prefix,
- * It is mandated to comply to the same regexp as the address
- * There is absolutely no point rerunning additional checks vs this
- * Note - there is no canonical form check here!!!
- */
- if (null != mask && !mask.isEmpty()) {
- return new Ipv4Prefix(ipv4Address.getValue() + PREFIX_SEPARATOR + mask);
- } else {
- return new Ipv4Prefix(ipv4Address.getValue() + PREFIX_SEPARATOR + IPV4_ADDRESS_LENGTH);
- }
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeIpv4Arbitrary;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeIpv4Prefix;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeIpv6AddressWithoutMask;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeIpv6Arbitrary;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeIpv6Prefix;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeMacAddress;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeMacAddressMask;
+import static org.opendaylight.openflowplugin.impl.util.AddressNormalizationUtil.normalizeProtocolAgnosticPort;
+
+import com.google.common.collect.ImmutableMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nonnull;
+import org.opendaylight.openflowplugin.api.OFConstants;
+import org.opendaylight.openflowplugin.extension.api.GroupingLooseResolver;
+import org.opendaylight.openflowplugin.openflow.md.core.extension.ExtensionResolvers;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.arp.match.fields.ArpSourceHardwareAddressBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.arp.match.fields.ArpTargetHardwareAddressBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetDestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.ethernet.match.fields.EthernetSourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.ArpMatch;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.ArpMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchArbitraryBitMask;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv4MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6MatchArbitraryBitMask;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.Ipv6MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.TunnelIpv4Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.layer._3.match.TunnelIpv4MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralAugMatchNotifUpdateFlowStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralAugMatchNotifUpdateFlowStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.openflowplugin.extension.general.rev140714.GeneralExtensionListGrouping;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+
+/**
+ * Utility class for match normalization
+ */
+public final class MatchNormalizationUtil {
+ // Cache normalizers for common OpenFlow versions
+ private static final Map<Short, Set<Function<MatchBuilder, MatchBuilder>>> NORMALIZERS = ImmutableMap
+ .<Short, Set<Function<MatchBuilder, MatchBuilder>>>builder()
+ .put(OFConstants.OFP_VERSION_1_0, createNormalizers(OFConstants.OFP_VERSION_1_0).collect(Collectors.toSet()))
+ .put(OFConstants.OFP_VERSION_1_3, createNormalizers(OFConstants.OFP_VERSION_1_3).collect(Collectors.toSet()))
+ .build();
+
+ private MatchNormalizationUtil() {
+ throw new RuntimeException("Creating instance of util classes is prohibited");
+ }
+
+ /**
+ * Normalize match.
+ *
+ * @param match the OpenFlow match
+ * @param version the OpenFlow version
+ * @return normalized OpenFlow match
+ */
+ @Nonnull
+ public static Match normalizeMatch(@Nonnull final Match match, final short version) {
+ final MatchBuilder matchBuilder = new MatchBuilder(match);
+
+ Optional.ofNullable(NORMALIZERS.get(version))
+ .orElseGet(() -> createNormalizers(version).collect(Collectors.toSet()))
+ .forEach(normalizer -> normalizer.apply(matchBuilder));
+
+ return matchBuilder.build();
+ }
+
+ @Nonnull
+ private static Stream<Function<MatchBuilder, MatchBuilder>> createNormalizers(final short version) {
+ return Stream.of(
+ MatchNormalizationUtil::normalizeExtensionMatch,
+ MatchNormalizationUtil::normalizeEthernetMatch,
+ MatchNormalizationUtil::normalizeArpMatch,
+ MatchNormalizationUtil::normalizeTunnelIpv4Match,
+ MatchNormalizationUtil::normalizeIpv4Match,
+ MatchNormalizationUtil::normalizeIpv4MatchArbitraryBitMask,
+ MatchNormalizationUtil::normalizeIpv6Match,
+ MatchNormalizationUtil::normalizeIpv6MatchArbitraryBitMask,
+ (match) -> normalizeInPortMatch(match, version),
+ (match) -> normalizeInPhyPortMatch(match, version));
+ }
+
+ @Nonnull
+ @SuppressWarnings("unchecked")
+ private static MatchBuilder normalizeExtensionMatch(@Nonnull final MatchBuilder match) {
+ final GroupingLooseResolver<GeneralExtensionListGrouping> matchExtensionResolver =
+ ExtensionResolvers.getMatchExtensionResolver();
+
+ return matchExtensionResolver
+ .getExtension(match.build())
+ .transform(statExt -> Optional.ofNullable(statExt.getExtensionList())
+ .map(extensionLists -> {
+ matchExtensionResolver.getClasses().forEach(aClass -> match
+ .removeAugmentation((Class<? extends Augmentation<Match>>) aClass));
+
+ return match.addAugmentation(
+ GeneralAugMatchNotifUpdateFlowStats.class,
+ new GeneralAugMatchNotifUpdateFlowStatsBuilder()
+ .setExtensionList(extensionLists)
+ .build());
+ })
+ .orElse(match))
+ .or(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeInPortMatch(@Nonnull final MatchBuilder match, final short version) {
+ return Optional
+ .ofNullable(match.getInPort())
+ .flatMap(inPort -> Optional.ofNullable(normalizeProtocolAgnosticPort(inPort, version)))
+ .map(inPortUri -> match.setInPort(new NodeConnectorId(inPortUri)))
+ .orElse(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeInPhyPortMatch(@Nonnull final MatchBuilder match, final short version) {
+ return Optional
+ .ofNullable(match.getInPhyPort())
+ .flatMap(inPhyPort -> Optional.ofNullable(normalizeProtocolAgnosticPort(inPhyPort, version)))
+ .map(inPhyPortUri -> match.setInPhyPort(new NodeConnectorId(inPhyPortUri)))
+ .orElse(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeArpMatch(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(ArpMatch.class::isInstance)
+ .map(ArpMatch.class::cast)
+ .map(arp -> match.setLayer3Match(new ArpMatchBuilder(arp)
+ .setArpSourceHardwareAddress(Optional
+ .ofNullable(arp.getArpSourceHardwareAddress())
+ .map(arpSource -> new ArpSourceHardwareAddressBuilder(arpSource)
+ .setAddress(normalizeMacAddress(arpSource.getAddress()))
+ .setMask(normalizeMacAddress(arpSource.getMask()))
+ .build())
+ .orElse(arp.getArpSourceHardwareAddress()))
+ .setArpTargetHardwareAddress(Optional
+ .ofNullable(arp.getArpTargetHardwareAddress())
+ .map(arpTarget -> new ArpTargetHardwareAddressBuilder(arpTarget)
+ .setAddress(normalizeMacAddress(arpTarget.getAddress()))
+ .setMask(normalizeMacAddress(arpTarget.getMask()))
+ .build())
+ .orElse(arp.getArpTargetHardwareAddress()))
+ .setArpSourceTransportAddress(normalizeIpv4Prefix(arp.getArpSourceTransportAddress()))
+ .setArpTargetTransportAddress(normalizeIpv4Prefix(arp.getArpTargetTransportAddress()))
+ .build())
+ )
+ .orElse(match);
+ }
+
+
+ @Nonnull
+ private static MatchBuilder normalizeTunnelIpv4Match(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(TunnelIpv4Match.class::isInstance)
+ .map(TunnelIpv4Match.class::cast)
+ .map(tunnelIpv4 -> match.setLayer3Match(new TunnelIpv4MatchBuilder(tunnelIpv4)
+ .setTunnelIpv4Source(normalizeIpv4Prefix(tunnelIpv4.getTunnelIpv4Source()))
+ .setTunnelIpv4Destination(normalizeIpv4Prefix(tunnelIpv4.getTunnelIpv4Destination()))
+ .build()))
+ .orElse(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeIpv4Match(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(Ipv4Match.class::isInstance)
+ .map(Ipv4Match.class::cast)
+ .map(ipv4 -> match.setLayer3Match(new Ipv4MatchBuilder(ipv4)
+ .setIpv4Source(normalizeIpv4Prefix(ipv4.getIpv4Source()))
+ .setIpv4Destination(normalizeIpv4Prefix(ipv4.getIpv4Destination()))
+ .build()))
+ .orElse(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeIpv4MatchArbitraryBitMask(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(Ipv4MatchArbitraryBitMask.class::isInstance)
+ .map(Ipv4MatchArbitraryBitMask.class::cast)
+ .map(ipv4arbitrary -> match.setLayer3Match(new Ipv4MatchBuilder()
+ .setIpv4Source(normalizeIpv4Arbitrary(
+ ipv4arbitrary.getIpv4SourceAddressNoMask(),
+ ipv4arbitrary.getIpv4SourceArbitraryBitmask()))
+ .setIpv4Destination(normalizeIpv4Arbitrary(
+ ipv4arbitrary.getIpv4DestinationAddressNoMask(),
+ ipv4arbitrary.getIpv4DestinationArbitraryBitmask()))
+ .build()))
+ .orElse(match);
+ }
+
+
+ @Nonnull
+ private static MatchBuilder normalizeIpv6Match(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(Ipv6Match.class::isInstance)
+ .map(Ipv6Match.class::cast)
+ .map(ipv6 -> match.setLayer3Match(new Ipv6MatchBuilder(ipv6)
+ .setIpv6NdSll(normalizeMacAddress(ipv6.getIpv6NdSll()))
+ .setIpv6NdTll(normalizeMacAddress(ipv6.getIpv6NdTll()))
+ .setIpv6NdTarget(normalizeIpv6AddressWithoutMask(ipv6.getIpv6NdTarget()))
+ .setIpv6Source(normalizeIpv6Prefix(ipv6.getIpv6Source()))
+ .setIpv6Destination(normalizeIpv6Prefix(ipv6.getIpv6Destination()))
+ .build()))
+ .orElse(match);
+ }
+
+
+ @Nonnull
+ private static MatchBuilder normalizeIpv6MatchArbitraryBitMask(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getLayer3Match())
+ .filter(Ipv6MatchArbitraryBitMask.class::isInstance)
+ .map(Ipv6MatchArbitraryBitMask.class::cast)
+ .map(ipv6Arbitrary -> match.setLayer3Match(new Ipv6MatchBuilder()
+ .setIpv6Source(normalizeIpv6Arbitrary(
+ ipv6Arbitrary.getIpv6SourceAddressNoMask(),
+ ipv6Arbitrary.getIpv6SourceArbitraryBitmask()))
+ .setIpv6Destination(normalizeIpv6Arbitrary(
+ ipv6Arbitrary.getIpv6DestinationAddressNoMask(),
+ ipv6Arbitrary.getIpv6DestinationArbitraryBitmask()))
+ .build()))
+ .orElse(match);
+ }
+
+ @Nonnull
+ private static MatchBuilder normalizeEthernetMatch(@Nonnull final MatchBuilder match) {
+ return Optional
+ .ofNullable(match.getEthernetMatch())
+ .map(eth -> match.setEthernetMatch(new EthernetMatchBuilder(eth)
+ .setEthernetSource(Optional
+ .ofNullable(eth.getEthernetSource())
+ .map(filter -> new EthernetSourceBuilder(filter)
+ .setAddress(normalizeMacAddress(filter.getAddress()))
+ .setMask(normalizeMacAddressMask(filter.getMask()))
+ .build())
+ .orElse(eth.getEthernetSource()))
+ .setEthernetDestination(Optional
+ .ofNullable(eth.getEthernetDestination())
+ .map(filter -> new EthernetDestinationBuilder(filter)
+ .setAddress(normalizeMacAddress(filter.getAddress()))
+ .setMask(normalizeMacAddressMask(filter.getMask()))
+ .build())
+ .orElse(eth.getEthernetDestination()))
+ .build()))
+ .orElse(match);
+ }
+
+}
\ No newline at end of file
import com.google.common.base.Preconditions;
import com.google.common.reflect.TypeToken;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.rpc.RpcContext;
* @param deviceContext - every service needs {@link org.opendaylight.openflowplugin.api.openflow.device.DeviceContext} as input parameter
* @param convertorExecutor
*/
- public static void registerServices(@CheckForNull final RpcContext rpcContext,
- @CheckForNull final DeviceContext deviceContext,
+ public static void registerServices(@Nonnull final RpcContext rpcContext,
+ @Nonnull final DeviceContext deviceContext,
final ExtensionConverterProvider extensionConverterProvider,
final ConvertorExecutor convertorExecutor) {
Preconditions.checkArgument(rpcContext != null);
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 IBM Corporation and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.openflowplugin.impl.util;
-
-public interface SimpleComparator<T> {
-
- boolean areObjectsEqual(T obj1, T obj2);
-
-}
\ No newline at end of file
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
import org.opendaylight.controller.md.sal.binding.api.NotificationService;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.openflowjava.protocol.spi.connection.SwitchConnectionProvider;
+import org.opendaylight.openflowplugin.api.openflow.OpenFlowPluginConfigurationService.PropertyType;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.sm.control.rev150812.StatisticsManagerControlService;
@RunWith(MockitoJUnitRunner.class)
@Mock
NotificationService notificationService;
+ @Mock
+ NotificationPublishService notificationPublishService;
+
@Mock
WriteTransaction writeTransaction;
when(switchConnectionProvider.startup()).thenReturn(Futures.immediateCheckedFuture(null));
provider = new OpenFlowPluginProviderImpl(
- RPC_REQUESTS_QUOTA,
- GLOBAL_NOTIFICATION_QUOTA,
- THREAD_POOL_MIN_THREADS,
- THREAD_POOL_MAX_THREADS,
- THREAD_POOL_TIMEOUT);
-
- provider.setDataBroker(dataBroker);
- provider.setRpcProviderRegistry(rpcProviderRegistry);
- provider.setNotificationProviderService(notificationService);
- provider.setSwitchConnectionProviders(Lists.newArrayList(switchConnectionProvider));
- provider.setClusteringSingletonServicesProvider(clusterSingletonServiceProvider);
+ Lists.newArrayList(switchConnectionProvider),
+ dataBroker,
+ rpcProviderRegistry,
+ notificationService,
+ notificationPublishService,
+ clusterSingletonServiceProvider,
+ entityOwnershipService);
+
+ provider.updateProperty(PropertyType.THREAD_POOL_MIN_THREADS, THREAD_POOL_MIN_THREADS);
+ provider.updateProperty(PropertyType.THREAD_POOL_MAX_THREADS, THREAD_POOL_MAX_THREADS);
+ provider.updateProperty(PropertyType.THREAD_POOL_TIMEOUT, THREAD_POOL_TIMEOUT);
+ provider.updateProperty(PropertyType.RPC_REQUESTS_QUOTA, RPC_REQUESTS_QUOTA);
+ provider.updateProperty(PropertyType.GLOBAL_NOTIFICATION_QUOTA, GLOBAL_NOTIFICATION_QUOTA);
}
@After
import com.google.common.util.concurrent.SettableFuture;
import java.math.BigInteger;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.TimeUnit;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
60L, TimeUnit.SECONDS,
new SynchronousQueue<>(), "ofppool");
- connectionManagerImpl = new ConnectionManagerImpl(ECHO_REPLY_TIMEOUT, threadPool);
+ connectionManagerImpl = new ConnectionManagerImpl(threadPool);
+ connectionManagerImpl.setEchoReplyTimeout(ECHO_REPLY_TIMEOUT);
connectionManagerImpl.setDeviceConnectedHandler(deviceConnectedHandler);
final InetSocketAddress deviceAddress = InetSocketAddress.createUnresolved("yahoo", 42);
Mockito.when(connection.getRemoteAddress()).thenReturn(deviceAddress);
connectionContextSpy = Mockito.spy(new ConnectionContextImpl(connectionAdapter));
Mockito.when(connectionContextSpy.getConnectionAdapter()).thenReturn(connectionAdapter);
Mockito.when(features.getDatapathId()).thenReturn(BigInteger.TEN);
- Mockito.doNothing().when(connectionContextSpy).handshakeSuccessful();
handshakeListener = new HandshakeListenerImpl(connectionContextSpy, deviceConnectedHandler);
handshakeListener.setHandshakeContext(handshakeContext);
}
*/
@Test
public void testOnSwitchIdleEvent1() throws Exception {
- final Future<RpcResult<EchoOutput>> echoReply = Futures.immediateFuture(RpcResultBuilder.success(new EchoOutputBuilder().build()).build());
+ final Future<RpcResult<EchoOutput>> echoReply = Futures.immediateFuture(RpcResultBuilder.success(new EchoOutputBuilder().setXid(0L).build()).build());
Mockito.when(connectionAdapter.echo(Matchers.any(EchoInput.class))).thenReturn(echoReply);
.thenReturn(flowRemovedMdsalBld.build());
// insert flow+flowId into local registry
- final FlowRegistryKey flowRegKey = FlowRegistryKeyFactory.create(flowRemovedMdsalBld.build());
+ final FlowRegistryKey flowRegKey = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowRemovedMdsalBld.build());
final FlowDescriptor flowDescriptor = FlowDescriptorFactory.create((short) 0, new FlowId("ut-ofp:f456"));
- deviceContext.getDeviceFlowRegistry().store(flowRegKey, flowDescriptor);
+ deviceContext.getDeviceFlowRegistry().storeDescriptor(flowRegKey, flowDescriptor);
// plug in lifecycleListener
final ItemLifecycleListener itemLifecycleListener = Mockito.mock(ItemLifecycleListener.class);
public void testOnDeviceDisconnected() throws Exception {
final DeviceTerminationPhaseHandler deviceContextClosedHandler = mock(DeviceTerminationPhaseHandler.class);
- assertEquals(0, deviceContext.getDeviceFlowRegistry().getAllFlowDescriptors().size());
- assertEquals(0, deviceContext.getDeviceGroupRegistry().getAllGroupIds().size());
- assertEquals(0, deviceContext.getDeviceMeterRegistry().getAllMeterIds().size());
+ assertEquals(0, deviceContext.getDeviceFlowRegistry().size());
+ assertEquals(0, deviceContext.getDeviceGroupRegistry().size());
+ assertEquals(0, deviceContext.getDeviceMeterRegistry().size());
}
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
@Mock
private ClusterSingletonServiceProvider clusterSingletonServiceProvider;
@Mock
+ private EntityOwnershipService entityOwnershipService;
+ @Mock
+ private EntityOwnershipListenerRegistration entityOwnershipListenerRegistration;
+ @Mock
private ConvertorExecutor convertorExecutor;
@Mock
private KeyedInstanceIdentifier<Node, NodeKey> key;
when(mockedDataBroker.newWriteOnlyTransaction()).thenReturn(mockedWriteTransaction);
when(mockedWriteTransaction.submit()).thenReturn(mockedFuture);
+ when(entityOwnershipService.registerListener(any(), any())).thenReturn(entityOwnershipListenerRegistration);
final DeviceManagerImpl deviceManager = new DeviceManagerImpl(
mockedDataBroker,
- TEST_VALUE_GLOBAL_NOTIFICATION_QUOTA,
- false,
- barrierIntervalNanos,
- barrierCountLimit,
messageIntelligenceAgency,
- true,
clusterSingletonServiceProvider,
- null,
- new HashedWheelTimer(),
- convertorExecutor,
- false);
+ entityOwnershipService, new HashedWheelTimer(), convertorExecutor, null
+ );
deviceManager.setDeviceInitializationPhaseHandler(deviceInitPhaseHandler);
deviceManager.setDeviceTerminationPhaseHandler(deviceTerminationPhaseHandler);
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipListenerRegistration;
+import org.opendaylight.controller.md.sal.common.api.clustering.EntityOwnershipService;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
private ClusterSingletonServiceProvider clusterSingletonServiceProvider;
@Mock
private ClusterSingletonServiceRegistration clusterSingletonServiceRegistration;
+ @Mock
+ EntityOwnershipListenerRegistration entityOwnershipListenerRegistration;
private LifecycleService lifecycleService;
Mockito.when(clusterSingletonServiceProvider.registerClusterSingletonService(Mockito.any()))
.thenReturn(clusterSingletonServiceRegistration);
- Mockito.when(deviceContext.stopClusterServices(Mockito.anyBoolean())).thenReturn(Futures.immediateFuture(null));
- Mockito.when(statContext.stopClusterServices(Mockito.anyBoolean())).thenReturn(Futures.immediateFuture(null));
- Mockito.when(rpcContext.stopClusterServices(Mockito.anyBoolean())).thenReturn(Futures.immediateFuture(null));
+ Mockito.when(deviceContext.stopClusterServices()).thenReturn(Futures.immediateFuture(null));
+ Mockito.when(statContext.stopClusterServices()).thenReturn(Futures.immediateFuture(null));
+ Mockito.when(rpcContext.stopClusterServices()).thenReturn(Futures.immediateFuture(null));
lifecycleService = new LifecycleServiceImpl();
lifecycleService.setDeviceContext(deviceContext);
@Test
public void closeServiceInstance() throws Exception {
lifecycleService.closeServiceInstance().get();
- Mockito.verify(statContext).stopClusterServices(false);
- Mockito.verify(deviceContext).stopClusterServices(false);
- Mockito.verify(rpcContext).stopClusterServices(false);
+ Mockito.verify(statContext).stopClusterServices();
+ Mockito.verify(deviceContext).stopClusterServices();
+ Mockito.verify(rpcContext).stopClusterServices();
}
@Test
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowDescriptor;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.FlowRegistryKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
public void setUp() throws Exception {
nodeInstanceIdentifier = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(new NodeId(NODE_ID)));
when(dataBroker.newReadOnlyTransaction()).thenReturn(readOnlyTransaction);
- deviceFlowRegistry = new DeviceFlowRegistryImpl(dataBroker, nodeInstanceIdentifier);
+ deviceFlowRegistry = new DeviceFlowRegistryImpl(OFConstants.OFP_VERSION_1_3, dataBroker, nodeInstanceIdentifier);
final FlowAndStatisticsMapList flowStats = TestFlowHelper.createFlowAndStatisticsMapListBuilder(1).build();
- key = FlowRegistryKeyFactory.create(flowStats);
+ key = FlowRegistryKeyFactory.create(OFConstants.OFP_VERSION_1_3, flowStats);
descriptor = FlowDescriptorFactory.create(key.getTableId(), new FlowId("ut:1"));
Assert.assertEquals(0, deviceFlowRegistry.getAllFlowDescriptors().size());
- deviceFlowRegistry.store(key, descriptor);
+ deviceFlowRegistry.storeDescriptor(key, descriptor);
Assert.assertEquals(1, deviceFlowRegistry.getAllFlowDescriptors().size());
}
.build();
final Map<FlowRegistryKey, FlowDescriptor> allFlowDescriptors = testFill(path, flowCapableNode);
- final FlowRegistryKey key = FlowRegistryKeyFactory.create(flow);
+ final FlowRegistryKey key = FlowRegistryKeyFactory.create(OFConstants.OFP_VERSION_1_3, flow);
InOrder order = inOrder(dataBroker, readOnlyTransaction);
order.verify(dataBroker).newReadOnlyTransaction();
order.verify(readOnlyTransaction).read(LogicalDatastoreType.OPERATIONAL, path);
assertTrue(allFlowDescriptors.containsKey(key));
- deviceFlowRegistry.removeDescriptor(key);
+ deviceFlowRegistry.addMark(key);
}
@Test
@Test
public void testRetrieveIdForFlow() throws Exception {
- Assert.assertEquals(descriptor, deviceFlowRegistry.retrieveIdForFlow(key));
+ Assert.assertEquals(descriptor, deviceFlowRegistry.retrieveDescriptor(key));
}
@Test
public void testStore() throws Exception {
//store the same key with different value
final FlowDescriptor descriptor2 = FlowDescriptorFactory.create(key.getTableId(), new FlowId("ut:2"));
- deviceFlowRegistry.store(key, descriptor2);
+ deviceFlowRegistry.storeDescriptor(key, descriptor2);
Assert.assertEquals(1, deviceFlowRegistry.getAllFlowDescriptors().size());
- Assert.assertEquals("ut:2", deviceFlowRegistry.retrieveIdForFlow(key).getFlowId().getValue());
+ Assert.assertEquals("ut:2", deviceFlowRegistry.retrieveDescriptor(key).getFlowId().getValue());
// store new key with old value
final FlowAndStatisticsMapList flowStats = TestFlowHelper.createFlowAndStatisticsMapListBuilder(2).build();
- final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(flowStats);
- deviceFlowRegistry.store(key2, descriptor);
+ final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(OFConstants.OFP_VERSION_1_3, flowStats);
+ deviceFlowRegistry.storeDescriptor(key2, descriptor);
Assert.assertEquals(2, deviceFlowRegistry.getAllFlowDescriptors().size());
- Assert.assertEquals("ut:1", deviceFlowRegistry.retrieveIdForFlow(key2).getFlowId().getValue());
+ Assert.assertEquals("ut:1", deviceFlowRegistry.retrieveDescriptor(key2).getFlowId().getValue());
}
@Test
FlowId newFlowId;
//store existing key
- newFlowId = deviceFlowRegistry.storeIfNecessary(key);
+ deviceFlowRegistry.store(key);
+ newFlowId = deviceFlowRegistry.retrieveDescriptor(key).getFlowId();
Assert.assertEquals(1, deviceFlowRegistry.getAllFlowDescriptors().size());
- Assert.assertEquals(descriptor, deviceFlowRegistry.retrieveIdForFlow(key));
+ Assert.assertEquals(descriptor, deviceFlowRegistry.retrieveDescriptor(key));
Assert.assertEquals(descriptor.getFlowId(), newFlowId);
//store new key
final String alienPrefix = "#UF$TABLE*2-";
- final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(TestFlowHelper.createFlowAndStatisticsMapListBuilder(2).build());
- newFlowId = deviceFlowRegistry.storeIfNecessary(key2);
+ final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(OFConstants.OFP_VERSION_1_3, TestFlowHelper.createFlowAndStatisticsMapListBuilder(2).build());
+ deviceFlowRegistry.store(key2);
+ newFlowId = deviceFlowRegistry.retrieveDescriptor(key2).getFlowId();
Assert.assertTrue(newFlowId.getValue().startsWith(alienPrefix));
- Assert.assertTrue(deviceFlowRegistry.retrieveIdForFlow(key2).getFlowId().getValue().startsWith(alienPrefix));
+ Assert.assertTrue(deviceFlowRegistry.retrieveDescriptor(key2).getFlowId().getValue().startsWith(alienPrefix));
Assert.assertEquals(2, deviceFlowRegistry.getAllFlowDescriptors().size());
}
@Test
public void testRemoveDescriptor() throws Exception {
- deviceFlowRegistry.removeDescriptor(key);
+ deviceFlowRegistry.addMark(key);
Assert.assertEquals(0, deviceFlowRegistry.getAllFlowDescriptors().size());
}
return null;
}
-}
\ No newline at end of file
+}
Assert.assertNotNull(flowDescriptor.getTableKey());
}
- @Test(expected = NullPointerException.class)
+ @Test(expected = Exception.class)
public void testCreateNegative1() throws Exception {
FlowDescriptorFactory.create((short) 1, null);
}
HashSet<FlowRegistryKey> flowRegistryKeys = new HashSet<>();
for (FlowAndStatisticsMapList item : flowStats.getFlowAndStatisticsMapList()) {
- final FlowRegistryKey key1 = FlowRegistryKeyFactory.create(item);
- final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(item);
+ final FlowRegistryKey key1 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), item);
+ final FlowRegistryKey key2 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), item);
flowRegistryKeys.add(key1);
flowRegistryKeys.add(key1);
flowRegistryKeys.add(key2);
@Test
public void testEqualsNegative() throws Exception {
final FlowAndStatisticsMapList flowStatisticsMapList1 = TestFlowHelper.createFlowAndStatisticsMapListBuilder(1).build();
- final FlowRegistryKey key1 = FlowRegistryKeyFactory.create(flowStatisticsMapList1);
+ final FlowRegistryKey key1 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowStatisticsMapList1);
FlowRegistryKey key2;
FlowAndStatisticsMapListBuilder flowStatisticsMapListBld2;
// different priority
flowStatisticsMapListBld2 = new FlowAndStatisticsMapListBuilder(flowStatisticsMapList1);
flowStatisticsMapListBld2.setPriority(flowStatisticsMapListBld2.getPriority() + 1);
- key2 = FlowRegistryKeyFactory.create(flowStatisticsMapListBld2.build());
+ key2 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowStatisticsMapListBld2.build());
Assert.assertFalse(key1.equals(key2));
// different match
flowStatisticsMapListBld2 = new FlowAndStatisticsMapListBuilder(flowStatisticsMapList1);
flowStatisticsMapListBld2.setMatch(new MatchBuilder().build());
- key2 = FlowRegistryKeyFactory.create(flowStatisticsMapListBld2.build());
+ key2 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowStatisticsMapListBld2.build());
Assert.assertFalse(key1.equals(key2));
// different tableId
flowStatisticsMapListBld2 = new FlowAndStatisticsMapListBuilder(flowStatisticsMapList1);
flowStatisticsMapListBld2.setTableId((short) (flowStatisticsMapListBld2.getTableId() + 1));
- key2 = FlowRegistryKeyFactory.create(flowStatisticsMapListBld2.build());
+ key2 = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flowStatisticsMapListBld2.build());
Assert.assertFalse(key1.equals(key2));
Assert.assertFalse(key1.equals(null));
.setPriority(2)
.setTableId((short) 0);
- FlowRegistryKey flow1Hash = FlowRegistryKeyFactory.create(flow1Builder.build());
+ FlowRegistryKey flow1Hash = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flow1Builder.build());
LOG.info("flowHash1: {}", flow1Hash.hashCode());
.setCookie(new FlowCookie(BigInteger.valueOf(148)))
.setMatch(match2Builder.build());
- FlowRegistryKey flow2Hash = FlowRegistryKeyFactory.create(flow2Builder.build());
+ FlowRegistryKey flow2Hash = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), flow2Builder.build());
LOG.info("flowHash2: {}", flow2Hash.hashCode());
Assert.assertNotSame(flow1Hash, flow2Hash);
FlowBuilder fb1 = new FlowBuilder(flow1Builder.build());
fb1.setTableId(null);
try {
- FlowRegistryKeyFactory.create(fb1.build());
+ FlowRegistryKeyFactory.create(deviceInfo.getVersion(), fb1.build());
Assert.fail("hash creation should have failed because of NPE");
} catch (Exception e) {
// expected
FlowBuilder fb2 = new FlowBuilder(flow1Builder.build());
fb2.setPriority(null);
try {
- FlowRegistryKeyFactory.create(fb2.build());
+ FlowRegistryKeyFactory.create(deviceInfo.getVersion(), fb2.build());
} catch (Exception e) {
// not expected
Assert.fail("no exception was expected while hash was creating.");
FlowBuilder fb3 = new FlowBuilder(flow1Builder.build());
fb3.setCookie(null);
- FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(fb3.build());
+ FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), fb3.build());
Assert.assertNotNull(flowRegistryKey.getCookie());
Assert.assertEquals(OFConstants.DEFAULT_COOKIE, flowRegistryKey.getCookie());
}
FlowsStatisticsUpdate flowStats = FLOWS_STATISTICS_UPDATE_BUILDER.build();
for (FlowAndStatisticsMapList item : flowStats.getFlowAndStatisticsMapList()) {
- FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(item);
+ FlowRegistryKey flowRegistryKey = FlowRegistryKeyFactory.create(deviceInfo.getVersion(), item);
FlowRegistryKey lastHash = null;
if (null != lastHash) {
assertNotEquals(lastHash, flowRegistryKey);
@Test
public void testRemoveMarked() throws Exception {
- deviceGroupRegistry.markToBeremoved(groupId);
- deviceGroupRegistry.removeMarked();
+ deviceGroupRegistry.addMark(groupId);
+ deviceGroupRegistry.processMarks();
Assert.assertEquals(0, deviceGroupRegistry.getAllGroupIds().size());
}
@Test
public void testRemoveMarkedNegative() throws Exception {
- deviceGroupRegistry.markToBeremoved(groupId2);
- deviceGroupRegistry.removeMarked();
+ deviceGroupRegistry.addMark(groupId2);
+ deviceGroupRegistry.processMarks();
Assert.assertEquals(1, deviceGroupRegistry.getAllGroupIds().size());
}
@Test
public void testClose() throws Exception {
- deviceGroupRegistry.markToBeremoved(groupId);
+ deviceGroupRegistry.addMark(groupId);
deviceGroupRegistry.close();
Assert.assertEquals(0, deviceGroupRegistry.getAllGroupIds().size());
deviceGroupRegistry.store(groupId);
Assert.assertEquals(1, deviceGroupRegistry.getAllGroupIds().size());
- deviceGroupRegistry.removeMarked();
+ deviceGroupRegistry.processMarks();
Assert.assertEquals(1, deviceGroupRegistry.getAllGroupIds().size());
}
-}
\ No newline at end of file
+}
@Test
public void testRemoveMarked() throws Exception {
- deviceMeterRegistry.markToBeremoved(meterId);
- deviceMeterRegistry.removeMarked();
+ deviceMeterRegistry.addMark(meterId);
+ deviceMeterRegistry.processMarks();
Assert.assertEquals(0, deviceMeterRegistry.getAllMeterIds().size());
}
@Test
public void testRemoveMarkedNegative() throws Exception {
- deviceMeterRegistry.markToBeremoved(meterId2);
- deviceMeterRegistry.removeMarked();
+ deviceMeterRegistry.addMark(meterId2);
+ deviceMeterRegistry.processMarks();
Assert.assertEquals(1, deviceMeterRegistry.getAllMeterIds().size());
}
@Test
public void testClose() throws Exception {
- deviceMeterRegistry.markToBeremoved(meterId);
+ deviceMeterRegistry.addMark(meterId);
deviceMeterRegistry.close();
Assert.assertEquals(0, deviceMeterRegistry.getAllMeterIds().size());
deviceMeterRegistry.store(meterId);
Assert.assertEquals(1, deviceMeterRegistry.getAllMeterIds().size());
- deviceMeterRegistry.removeMarked();
+ deviceMeterRegistry.processMarks();
Assert.assertEquals(1, deviceMeterRegistry.getAllMeterIds().size());
}
-}
\ No newline at end of file
+}
@Before
public void setUp() {
final NodeKey nodeKey = new NodeKey(nodeId);
- rpcManager = new RpcManagerImpl(rpcProviderRegistry, QUOTA_VALUE, extensionConverterProvider, convertorExecutor, notificationPublishService);
+ rpcManager = new RpcManagerImpl(rpcProviderRegistry, extensionConverterProvider, convertorExecutor, notificationPublishService);
+ rpcManager.setRpcRequestQuota(QUOTA_VALUE);
rpcManager.setDeviceInitializationPhaseHandler(deviceINitializationPhaseHandler);
GetFeaturesOutput featuresOutput = new GetFeaturesOutputBuilder()
@Test
public void onDeviceContextLevelUpTwice() throws Exception {
rpcManager.onDeviceContextLevelUp(deviceInfo, lifecycleService);
- expectedException.expect(VerifyException.class);
rpcManager.onDeviceContextLevelUp(deviceInfo, lifecycleService);
}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
when(mockedDeviceContext.getDeviceInfo()).thenReturn(mockedDeviceInfo);
when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(mockedFlowRegistry);
- when(mockedFlowRegistry.retrieveIdForFlow(Matchers.any(FlowRegistryKey.class))).thenReturn(mockedFlowDescriptor);
+ when(mockedFlowRegistry.retrieveDescriptor(Matchers.any(FlowRegistryKey.class))).thenReturn(mockedFlowDescriptor);
final InstanceIdentifier<FlowCapableNode> nodePath = mockedDeviceInfo.getNodeInstanceIdentifier().augmentation(FlowCapableNode.class);
final FlowCapableNodeBuilder flowNodeBuilder = new FlowCapableNodeBuilder();
when(mockedFlowDescriptor.getFlowId()).thenReturn(flowId);
when(mockedFlowDescriptor.getTableKey()).thenReturn(new TableKey(DUMMY_TABLE_ID));
- when(deviceFlowRegistry.storeIfNecessary(Matchers.any(FlowRegistryKey.class))).thenReturn(flowId);
- when(deviceFlowRegistry.retrieveIdForFlow(Matchers.any(FlowRegistryKey.class))).thenReturn(mockedFlowDescriptor);
+ when(deviceFlowRegistry.retrieveDescriptor(Matchers.any(FlowRegistryKey.class))).thenReturn(mockedFlowDescriptor);
}
private <T extends DataObject> void verifyOutput(Future<RpcResult<T>> rpcResultFuture) throws ExecutionException, InterruptedException {
salGroupService.removeGroup(removeGroupInput);
verify(mockedRequestContextStack).createRequestContext();
- verify(mockedDeviceGroupRegistry).markToBeremoved(eq(dummyGroupId));
+ verify(mockedDeviceGroupRegistry).addMark(eq(dummyGroupId));
if (itemLifecycleListener != null) {
verify(itemLifecycleListener).onRemoved(Matchers.<KeyedInstanceIdentifier<Group, GroupKey>>any());
salMeterService.removeMeter(removeMeterInput);
verify(mockedRequestContextStack).createRequestContext();
- verify(mockedDeviceMeterRegistry).markToBeremoved(eq(dummyMeterId));
+ verify(mockedDeviceMeterRegistry).addMark(eq(dummyMeterId));
if (itemLifecycleListener != null) {
verify(itemLifecycleListener).onRemoved(Matchers.<KeyedInstanceIdentifier<Meter, MeterKey>>any());
when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(mockedPrimConnectionContext);
when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessagSpy);
- when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl(dataBroker, NODE_II));
+ when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl(DUMMY_VERSION, dataBroker, NODE_II));
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
when(mockedDeviceContext.getDeviceInfo()).thenReturn(mockedDeviceInfo);
when(mockedDeviceContext.getMultiMsgCollector(Matchers.any())).thenReturn(multiMessageCollector);
when(deviceContext.getDeviceFlowRegistry()).thenReturn(deviceFlowRegistry);
when(deviceContext.getDeviceGroupRegistry()).thenReturn(deviceGroupRegistry);
when(deviceContext.getDeviceMeterRegistry()).thenReturn(deviceMeterRegistry);
- when(deviceFlowRegistry.retrieveIdForFlow(Matchers.any(FlowRegistryKey.class))).thenReturn(flowDescriptor);
+ when(deviceFlowRegistry.retrieveDescriptor(Matchers.any(FlowRegistryKey.class))).thenReturn(flowDescriptor);
when(deviceContext.getReadTransaction()).thenReturn(readTx);
when(txFacade.getReadTransaction()).thenReturn(readTx);
when(deviceContext.getPrimaryConnectionContext()).thenReturn(connectionAdapter);
verify(deviceContext, Mockito.never()).addDeleteToTxChain(
Matchers.eq(LogicalDatastoreType.OPERATIONAL), Matchers.<InstanceIdentifier<?>>any());
- verify(deviceGroupRegistry).removeMarked();
+ verify(deviceGroupRegistry).processMarks();
verify(deviceGroupRegistry).store(storedGroupId);
verify(txFacade).writeToTransaction(
Matchers.eq(LogicalDatastoreType.OPERATIONAL), Matchers.eq(groupPath), Matchers.any(Group.class));
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
-import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import io.netty.util.HashedWheelTimer;
import io.netty.util.Timeout;
import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
import org.opendaylight.openflowjava.protocol.api.connection.ConnectionAdapter;
import org.opendaylight.openflowjava.protocol.api.connection.OutboundQueue;
+import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.connection.ConnectionContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceContext;
import org.opendaylight.openflowplugin.api.openflow.device.DeviceInfo;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.FeaturesReply;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.GetFeaturesOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartReply;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.MultipartRequestInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.OfHeader;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.sm.control.rev150812.ChangeStatisticsWorkModeInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.sm.control.rev150812.GetStatisticsWorkModeOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.sm.control.rev150812.StatisticsManagerControlService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.openflowplugin.sm.control.rev150812.StatisticsWorkMode;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
when(mockedDeviceContext.getDeviceInfo()).thenReturn(mockedDeviceInfo);
when(mockedDeviceContext.getPrimaryConnectionContext()).thenReturn(mockedPrimConnectionContext);
when(mockedDeviceContext.getMessageSpy()).thenReturn(mockedMessagSpy);
- when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl(dataBroker, nodePath));
+ when(mockedDeviceContext.getDeviceFlowRegistry()).thenReturn(new DeviceFlowRegistryImpl(OFConstants.OFP_VERSION_1_3, dataBroker, nodePath));
when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
when(mockedDeviceContext.getMultiMsgCollector(
Matchers.<RequestContext<List<MultipartReply>>>any())).thenAnswer(
final ConvertorManager convertorManager = ConvertorManagerFactory.createDefaultManager();
final long basicTimerDelay = 3000L;
final long maximumTimerDelay = 900000L;
- statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, false, new HashedWheelTimer(),
- convertorManager, basicTimerDelay, maximumTimerDelay);
- statisticsManager.setDeviceInitializationPhaseHandler(deviceInitializationPhaseHandler);
- }
-
- @Test
- public void testOnDeviceContextLevelUp() throws Exception {
- Mockito.doAnswer(new Answer<Void>() {
- @Override
- public Void answer(InvocationOnMock invocation) throws Throwable {
- final FutureCallback<OfHeader> callback = (FutureCallback<OfHeader>) invocation.getArguments()[2];
- LOG.debug("committing entry: {}", ((MultipartRequestInput) invocation.getArguments()[1]).getType());
- callback.onSuccess(null);
- currentRequestContext.setResult(RpcResultBuilder.<List<MultipartReply>>success().build());
- return null;
- }
- }).when(outboundQueue)
- .commitEntry(Matchers.anyLong(), Matchers.<OfHeader>any(), Matchers.<FutureCallback<OfHeader>>any());
-
- Mockito.when(lifecycleService.getDeviceContext()).thenReturn(mockedDeviceContext);
- Mockito.when(mockedDeviceContext.getDeviceState()).thenReturn(mockedDeviceState);
-
- statisticsManager.setDeviceInitializationPhaseHandler(mockedDevicePhaseHandler);
- statisticsManager.onDeviceContextLevelUp(deviceInfo, lifecycleService);
- verify(mockedDevicePhaseHandler).onDeviceContextLevelUp(deviceInfo, lifecycleService);
- }
-
- @Test
- public void testOnDeviceContextClosed() throws Exception {
- final StatisticsContext statisticContext = Mockito.mock(StatisticsContext.class);
- final Map<DeviceInfo, StatisticsContext> contextsMap = getContextsMap(statisticsManager);
-
- contextsMap.put(deviceInfo, statisticContext);
- Assert.assertEquals(1, contextsMap.size());
-
- statisticsManager.setDeviceTerminationPhaseHandler(mockedTerminationPhaseHandler);
- statisticsManager.onDeviceContextLevelDown(deviceInfo);
- verify(statisticContext).close();
- verify(mockedTerminationPhaseHandler).onDeviceContextLevelDown(deviceInfo);
- Assert.assertEquals(1, contextsMap.size());
+ statisticsManager = new StatisticsManagerImpl(rpcProviderRegistry, new HashedWheelTimer(),
+ convertorManager);
+ statisticsManager.setBasicTimerDelay(basicTimerDelay);
+ statisticsManager.setMaximumTimerDelay(maximumTimerDelay);
+ statisticsManager.setIsStatisticsPollingOn(false);
}
private static Map<DeviceInfo, StatisticsContext> getContextsMap(final StatisticsManagerImpl statisticsManager)
when(timeCounter.getAverageTimeBetweenMarks()).thenReturn(2000L, (Long)4000L);
statisticsManager.calculateTimerDelay(timeCounter);
- Assert.assertEquals(3000L, StatisticsManagerImpl.getCurrentTimerDelay());
+ Assert.assertEquals(3000L, statisticsManager.getCurrentTimerDelay());
statisticsManager.calculateTimerDelay(timeCounter);
- Assert.assertEquals(6000L, StatisticsManagerImpl.getCurrentTimerDelay());
+ Assert.assertEquals(6000L, statisticsManager.getCurrentTimerDelay());
}
@Test
import java.util.List;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.openflowplugin.api.openflow.registry.flow.DeviceFlowRegistry;
+import org.opendaylight.openflowplugin.impl.registry.flow.FlowDescriptorFactory;
+import org.opendaylight.openflowplugin.impl.statistics.services.direct.AbstractDirectStatisticsServiceTest;
import org.opendaylight.yang.gen.v1.urn.opendaylight.direct.statistics.rev160511.GetFlowStatisticsInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.direct.statistics.rev160511.GetFlowStatisticsOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
public void setUp() throws Exception {
service = new FlowDirectStatisticsService(requestContextStack, deviceContext, convertorManager);
final DeviceFlowRegistry registry = mock(DeviceFlowRegistry.class);
- when(registry.storeIfNecessary(any())).thenReturn(new FlowId("1"));
+ when(registry.retrieveDescriptor(any())).thenReturn(FlowDescriptorFactory.create(TABLE_NO, new FlowId("1")));
when(deviceContext.getDeviceFlowRegistry()).thenReturn(registry);
}
service.storeStatistics(output);
verify(deviceContext).writeToTransactionWithParentsSlow(eq(LogicalDatastoreType.OPERATIONAL), any(), any());
}
-}
\ No newline at end of file
+}
--- /dev/null
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.openflowplugin.impl.util;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+import org.opendaylight.openflowplugin.api.OFConstants;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Address;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Address;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv6Prefix;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Uri;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.DottedQuad;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev130715.MacAddress;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.opendaylight.ipv6.arbitrary.bitmask.fields.rev160224.Ipv6ArbitraryMask;
+
+public class AddressNormalizationUtilTest {
+
+ @Test
+ public void normalizeProtocolAgnosticPortOF10() throws Exception {
+ final Uri left = new Uri("openflow:1:INPORT");
+ final Uri right = new Uri("INPORT");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeProtocolAgnosticPort(left, OFConstants.OFP_VERSION_1_0)
+ );
+ }
+
+ @Test
+ public void normalizeProtocolAgnosticPortOF13() throws Exception {
+ final Uri left = new Uri("openflow:1:ANY");
+ final Uri right = new Uri("ANY");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeProtocolAgnosticPort(left, OFConstants.OFP_VERSION_1_3)
+ );
+ }
+
+ @Test
+ public void normalizeIpv6Prefix() throws Exception {
+ final Ipv6Prefix left = new Ipv6Prefix("1E3D:5678:9ABC::/24");
+ final Ipv6Prefix right = new Ipv6Prefix("1e3d:5600:0:0:0:0:0:0/24");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeIpv6Prefix(left)
+ );
+ }
+
+ @Test
+ public void normalizeIpv6Arbitrary() throws Exception {
+ final Ipv6Address leftAddress = new Ipv6Address("1E3D:5678:9ABC::");
+ final Ipv6ArbitraryMask leftMask = new Ipv6ArbitraryMask("FFFF:FF00::");
+ final Ipv6Prefix right = new Ipv6Prefix("1e3d:5600:0:0:0:0:0:0/24");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeIpv6Arbitrary(leftAddress, leftMask)
+ );
+ }
+
+ @Test
+ public void normalizeIpv6AddressWithoutMask() throws Exception {
+ final Ipv6Address left = new Ipv6Address("1E3D:5678:9ABC::");
+ final Ipv6Address right = new Ipv6Address("1e3d:5678:9abc:0:0:0:0:0");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeIpv6AddressWithoutMask(left)
+ );
+ }
+
+ @Test
+ public void normalizeIpv4Prefix() throws Exception {
+ final Ipv4Prefix left = new Ipv4Prefix("192.168.72.1/16");
+ final Ipv4Prefix right = new Ipv4Prefix("192.168.0.0/16");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeIpv4Prefix(left)
+ );
+ }
+
+ @Test
+ public void normalizeIpv4Arbitrary() throws Exception {
+ final Ipv4Address leftAddress = new Ipv4Address("192.168.72.1");
+ final DottedQuad leftMask = new DottedQuad("255.255.0.0");
+ final Ipv4Prefix right = new Ipv4Prefix("192.168.0.0/16");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeIpv4Arbitrary(leftAddress, leftMask)
+ );
+ }
+
+ @Test
+ public void normalizeMacAddress() throws Exception {
+ final MacAddress left = new MacAddress("01:23:45:AB:CD:EF");
+ final MacAddress right = new MacAddress("01:23:45:ab:cd:ef");
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeMacAddress(left)
+ );
+ }
+
+ @Test
+ public void normalizeMacAddressMask() throws Exception {
+ final MacAddress left = new MacAddress("FF:FF:FF:FF:FF:FF");
+ final MacAddress right = null;
+
+ assertEquals(
+ right,
+ AddressNormalizationUtil.normalizeMacAddressMask(left)
+ );
+ }
+
+}
\ No newline at end of file
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
package org.opendaylight.openflowplugin.openflow.md.core;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.Futures;
import java.util.concurrent.Future;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.opendaylight.openflowjava.protocol.api.connection.ConnectionAdapter;
import org.opendaylight.openflowjava.protocol.api.connection.ConnectionReadyListener;
private QueueProcessor<OfHeader, DataObject> queueProcessor;
private QueueKeeper<OfHeader> queue;
- private ThreadPoolExecutor hsPool;
private HandshakeManager handshakeManager;
private boolean firstHelloProcessed;
conductorState = CONDUCTOR_STATE.HANDSHAKING;
firstHelloProcessed = false;
handshakeManager = new HandshakeManagerImpl(connectionAdapter,
- ConnectionConductor.versionOrder.get(0),
- ConnectionConductor.versionOrder);
+ ConnectionConductor.VERSION_ORDER.get(0),
+ ConnectionConductor.VERSION_ORDER);
handshakeManager.setUseVersionBitmap(isBitmapNegotiationEnable);
handshakeManager.setHandshakeListener(this);
portFeaturesUtils = PortFeaturesUtil.getInstance();
@Override
public void init() {
- int handshakeThreadLimit = 1;
- hsPool = new ThreadPoolLoggingExecutor(handshakeThreadLimit,
- handshakeThreadLimit, 0L, TimeUnit.MILLISECONDS,
- new LinkedBlockingQueue<Runnable>(), "OFHandshake-"
- + conductorId);
-
connectionAdapter.setMessageListener(this);
connectionAdapter.setSystemListener(this);
connectionAdapter.setConnectionReadyListener(this);
* @param queueType enqueue type
*/
private void enqueueMessage(OfHeader message, QueueType queueType) {
- queue.push(message, this, queueType);
+ if (queue != null) {
+ queue.push(message, this, queueType);
+ } else {
+ LOG.debug("Queue is null");
+ }
}
@Override
checkState(CONDUCTOR_STATE.HANDSHAKING);
HandshakeStepWrapper handshakeStepWrapper = new HandshakeStepWrapper(
hello, handshakeManager, connectionAdapter);
- hsPool.submit(handshakeStepWrapper);
+ Thread t = new Thread(handshakeStepWrapper, "OFHandshake-" + conductorId);
+ t.setDaemon(true);
+ t.start();
}
/**
checkState(CONDUCTOR_STATE.HANDSHAKING);
HandshakeStepWrapper handshakeStepWrapper = new HandshakeStepWrapper(
null, handshakeManager, connectionAdapter);
- hsPool.execute(handshakeStepWrapper);
+ Thread t = new Thread(handshakeStepWrapper, "OFHandshake-" + conductorId);
+ t.setDaemon(true);
+ t.start();
firstHelloProcessed = true;
} else {
LOG.debug("already touched by hello message");
}
SessionContext sessionContext = OFSessionUtil.registerSession(this, featureOutput, negotiatedVersion);
- hsPool.shutdown();
- hsPool.purge();
conductorState = CONDUCTOR_STATE.WORKING;
QueueKeeperFactory.plugQueue(queueProcessor, queue);
}
}
} else {
//This condition will occure when Old Helium openflowplugin implementation will be used.
- shutdownPoolPolitely();
- }
- }
-
- private void shutdownPoolPolitely() {
- LOG.debug("Terminating handshake pool for node {}", connectionAdapter.getRemoteAddress());
- hsPool.shutdown();
- try {
- hsPool.awaitTermination(1, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- LOG.debug("Error while awaiting termination of pool. Will force shutdown now.");
- } finally {
- hsPool.purge();
- if (!hsPool.isTerminated()) {
- hsPool.shutdownNow();
+ try{
+ queue.close();
+ queue = null;
+ }catch (Exception ex){
+ LOG.warn("Closing queue failed: {}", ex.getMessage());
}
- LOG.debug("is handshake pool for node {} is terminated : {}",
- connectionAdapter.getRemoteAddress(), hsPool.isTerminated());
}
}
public void setHandshakeContext(HandshakeContext handshakeContext) {
this.handshakeContext = handshakeContext;
}
-
- @VisibleForTesting
- ThreadPoolExecutor getHsPool() {
- return hsPool;
- }
}
for(Elements element : list) {
List<Boolean> bitmap = element.getVersionBitmap();
// check for version bitmap
- for(short bitPos : ConnectionConductor.versionOrder) {
+ for(short bitPos : ConnectionConductor.VERSION_ORDER) {
// with all the version it should work.
if(bitmap.get(bitPos % Integer.SIZE)) {
supportedHighestVersion = bitPos;
public static final byte[] convertIpv6ArbitraryMaskToByteArray(final Ipv6ArbitraryMask mask) {
String maskValue;
- if (mask.getValue() != null) {
+ if (mask != null && mask.getValue() != null) {
maskValue = mask.getValue();
} else {
maskValue = DEFAULT_IPV6_ARBITRARY_BITMASK;
import org.opendaylight.openflowjava.protocol.api.util.BinContent;
import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.openflowplugin.api.openflow.md.util.OpenflowVersion;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Uri;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortNumberUni;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.OutputPortValues;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.common.types.rev130731.PortNumberValues;
}
return result;
}
+
+ /**
+ * Converts port number to Uri
+ * @param version openflow version
+ * @param portNumber port number
+ * @return port number uri
+ */
+ public static Uri getProtocolAgnosticPortUri(final short version, final long portNumber) {
+ return new Uri(portNumberToString(getProtocolAgnosticPort(OpenflowVersion.get(version), portNumber)));
+ }
}
libSimulation.join();
}
queueProcessor.shutdown();
- connectionConductor.getHsPool().shutdown();
for (Exception problem : adapter.getOccuredExceptions()) {
LOG.error("during simulation on adapter side: "
*/
private void executeNow() throws InterruptedException {
execute(true);
- connectionConductor.getHsPool().shutdown();
}
/**
@Before
public void setUp() {
handshakeManager = new HandshakeManagerImpl(adapter, OFConstants.OFP_VERSION_1_3,
- ConnectionConductor.versionOrder);
+ ConnectionConductor.VERSION_ORDER);
handshakeManager.setErrorHandler(errorHandler);
handshakeManager.setHandshakeListener(handshakeListener);
handshakeManager.setUseVersionBitmap(false);
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.HelloInput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.openflow.protocol.rev130731.hello.Elements;
-/**
- * @author mirehak
- *
- */
public class MessageFactoryTest {
- /**
- * Test method for {@link org.opendaylight.openflowplugin.openflow.md.core.MessageFactory#createHelloInputWoElements(java.lang.Short, java.lang.Long)}.
- */
@Test
public void testCreateHelloInputWoElements() {
short highestVersion = (short) 0x04;
Assert.assertNull(helloMsg.getElements());
}
- /**
- * Test method for {@link org.opendaylight.openflowplugin.openflow.md.core.MessageFactory#createHelloInputWithElements(java.lang.Short, java.lang.Long, java.util.List)}.
- */
@Test
public void testCreateHelloInputWithElements() {
short highestVersion = (short) 0x04;
false, true, false, false, true};
HelloInput helloMsg = MessageFactory.createHelloInput(highestVersion, xid,
- ConnectionConductor.versionOrder);
+ ConnectionConductor.VERSION_ORDER);
Assert.assertEquals(highestVersion, helloMsg.getVersion().shortValue());
Assert.assertEquals(xid, helloMsg.getXid().longValue());
Assert.assertEquals(1, helloMsg.getElements().size());
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>1.7.3-SNAPSHOT</version>
+ <version>1.7.5-SNAPSHOT</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>pom</packaging>
<scm>
<properties>
<project.build.sourceEncoding>utf-8</project.build.sourceEncoding>
- <openflowjava.version>0.8.3-SNAPSHOT</openflowjava.version>
- <openflowplugin.version>0.3.3-SNAPSHOT</openflowplugin.version>
- <sal.api.version>0.11.3-SNAPSHOT</sal.api.version>
+ <openflowjava.version>0.8.5-SNAPSHOT</openflowjava.version>
+ <openflowplugin.version>0.3.5-SNAPSHOT</openflowplugin.version>
+ <sal.api.version>0.11.5-SNAPSHOT</sal.api.version>
<jmxGeneratorPath>target/generated-sources/config</jmxGeneratorPath>
<salGeneratorPath>target/generated-sources/sal</salGeneratorPath>
<exi.nagasena.version>0000.0002.0053.0</exi.nagasena.version>
- <controller.distribution.version>0.4.3-SNAPSHOT</controller.distribution.version>
+ <controller.distribution.version>0.4.5-SNAPSHOT</controller.distribution.version>
- <config.version>0.5.3-SNAPSHOT</config.version>
- <mdsal.version>1.4.3-SNAPSHOT</mdsal.version>
- <mdsal.model.version>0.9.3-SNAPSHOT</mdsal.model.version>
- <yangtools.version>1.0.3-SNAPSHOT</yangtools.version>
+ <config.version>0.5.5-SNAPSHOT</config.version>
+ <mdsal.version>1.4.5-SNAPSHOT</mdsal.version>
+ <mdsal.model.version>0.9.5-SNAPSHOT</mdsal.model.version>
+ <yangtools.version>1.0.5-SNAPSHOT</yangtools.version>
</properties>
<dependencyManagement>
<dependency>
<groupId>org.opendaylight.mdsal</groupId>
<artifactId>mdsal-artifacts</artifactId>
- <version>2.1.3-SNAPSHOT</version>
+ <version>2.1.5-SNAPSHOT</version>
<scope>import</scope>
<type>pom</type>
</dependency>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>parent</relativePath>
</parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-aggregator</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<name>openflowplugin</name> <!-- Used by Sonar to set project name -->
<packaging>pom</packaging>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../../parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../../parent</relativePath>
</parent>
<artifactId>sample-consumer</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>test-common</artifactId>
<parent>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-parent</artifactId>
- <version>0.3.3-SNAPSHOT</version>
+ <version>0.3.5-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>test-provider</artifactId>