deployment needs.
2. From the features/ directory, execute "mvn clean install"
- This will generate a kar file such as "features/target/ovsdb-features-1.2.0-SNAPSHOT.kar"
+ This will generate a kar file such as "features/target/ovsdb-features-1.2.1-SNAPSHOT.kar"
3. Download (or build from controller project) the Karaf distribution :
http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/org/opendaylight/controller/distribution.opendaylight-karaf/
opendaylight-user@root>kar:list
KAR Name
-----------------------------
- ovsdb-features-1.2.0-SNAPSHOT
+ ovsdb-features-1.2.1-SNAPSHOT
opendaylight-user@root>feature:list | grep ovsdb
- odl-ovsdb-all | 1.2.0-SNAPSHOT | x | ovsdb-1.2.0-SNAPSHOT | OpenDaylight :: OVSDB :: all
- odl-ovsdb-library | 1.0.0-SNAPSHOT | x | ovsdb-1.2.0-SNAPSHOT | OVSDB :: Library
- odl-ovsdb-schema-openvswitch | 1.0.0-SNAPSHOT | x | ovsdb-1.2.0-SNAPSHOT | OVSDB :: Schema :: Open_vSwitch
- odl-ovsdb-schema-hardwarevtep | 1.0.0-SNAPSHOT | x | ovsdb-1.2.0-SNAPSHOT | OVSDB :: Schema :: hardware_vtep
- odl-ovsdb-plugin | 1.0.0-SNAPSHOT | x | ovsdb-1.2.0-SNAPSHOT | OpenDaylight :: OVSDB :: Plugin
+ odl-ovsdb-all | 1.2.1-SNAPSHOT | x | ovsdb-1.2.1-SNAPSHOT | OpenDaylight :: OVSDB :: all
+ odl-ovsdb-library | 1.0.0-SNAPSHOT | x | ovsdb-1.2.1-SNAPSHOT | OVSDB :: Library
+ odl-ovsdb-schema-openvswitch | 1.0.0-SNAPSHOT | x | ovsdb-1.2.1-SNAPSHOT | OVSDB :: Schema :: Open_vSwitch
+ odl-ovsdb-schema-hardwarevtep | 1.0.0-SNAPSHOT | x | ovsdb-1.2.1-SNAPSHOT | OVSDB :: Schema :: hardware_vtep
+ odl-ovsdb-plugin | 1.0.0-SNAPSHOT | x | ovsdb-1.2.1-SNAPSHOT | OpenDaylight :: OVSDB :: Plugin
opendaylight-user@root>bundle:list | grep OVSDB
186 | Active | 80 | 1.0.0.SNAPSHOT | OVSDB Library
To run the integration tests locally the following components are required:
- Docker
- - Fig
+ - Fig or Docker Compose
To install docker, follow the installation guide for your platform:
http://docs.docker.com/installation/
To install fig:
http://www.fig.sh/install.html
+To install Docker Compose:
+ http://docs.docker.com/compose/install/
+
To run the integration tests:
mvn clean install
On Linux you'll generally need to run fig as root (sudo fig ...).
+If you're using Docker Compose, replace "fig" above with "docker-compose" and ignore the warnings.
+
Skipping unit tests and karaf tests
====================
<properties>
<!-- Overrides -->
<!-- Controller Dependencies for Pax Exam -->
- <arphandler.version>0.7.0-SNAPSHOT</arphandler.version>
- <bundlescanner.implementation.version>0.6.0-SNAPSHOT</bundlescanner.implementation.version>
- <bundlescanner.version>0.6.0-SNAPSHOT</bundlescanner.version>
- <clustering.services_implementation.version>0.6.0-SNAPSHOT</clustering.services_implementation.version>
- <commons.httpclient.version>0.3.0-SNAPSHOT</commons.httpclient.version>
<configuration.implementation.version>0.6.0-SNAPSHOT</configuration.implementation.version>
<configuration.version>0.6.0-SNAPSHOT</configuration.version>
<connectionmanager.version>0.3.0-SNAPSHOT</connectionmanager.version>
<hosttracker.implementation.version>0.7.0-SNAPSHOT</hosttracker.implementation.version>
<hosttracker.northbound.version>0.6.0-SNAPSHOT</hosttracker.northbound.version>
<logging.bridge.version>0.6.0-SNAPSHOT</logging.bridge.version>
- <protocol_plugins.stub.version>0.6.0-SNAPSHOT</protocol_plugins.stub.version>
<routing.dijkstra_implementation.version>0.6.0-SNAPSHOT</routing.dijkstra_implementation.version>
<sal.implementation.version>0.6.0-SNAPSHOT</sal.implementation.version>
<security.version>0.6.0-SNAPSHOT</security.version>
<groupId>org.jboss.spec.javax.transaction</groupId>
<artifactId>jboss-transaction-api_1.1_spec</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>arphandler</artifactId>
- <version>${arphandler.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>bundlescanner</artifactId>
- <version>${bundlescanner.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>bundlescanner.implementation</artifactId>
- <version>${bundlescanner.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>${clustering.services.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services-implementation</artifactId>
- <version>${clustering.services_implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.httpclient</artifactId>
- <version>${commons.httpclient.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.northbound</artifactId>
- <version>${northbound.commons.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration</artifactId>
- <version>${configuration.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>configuration.implementation</artifactId>
- <version>${configuration.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>connectionmanager</artifactId>
- <version>${connectionmanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>connectionmanager.implementation</artifactId>
- <version>${connectionmanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>containermanager</artifactId>
- <version>${containermanager.it.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>containermanager.it.implementation</artifactId>
- <version>${containermanager.it.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>flowprogrammer.northbound</artifactId>
- <version>${flowprogrammer.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwarding.staticrouting</artifactId>
- <version>${forwarding.staticrouting}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwarding.staticrouting.northbound</artifactId>
- <version>${forwarding.staticrouting.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager</artifactId>
- <version>${forwardingrulesmanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>forwardingrulesmanager.implementation</artifactId>
- <version>${forwardingrulesmanager.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker</artifactId>
- <version>${hosttracker.api.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker.implementation</artifactId>
- <version>${hosttracker.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>hosttracker.northbound</artifactId>
- <version>${hosttracker.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>logging.bridge</artifactId>
- <version>${logging.bridge.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>protocol_plugins.stub</artifactId>
- <version>${protocol_plugins.stub.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>routing.dijkstra_implementation</artifactId>
- <version>${routing.dijkstra_implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- <version>${sal.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal.implementation</artifactId>
- <version>${sal.implementation.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-binding-it</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>security</artifactId>
- <version>${security.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statistics.northbound</artifactId>
- <version>${statistics.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statisticsmanager</artifactId>
- <version>${statisticsmanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>statisticsmanager.implementation</artifactId>
- <version>${statisticsmanager.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>subnets.northbound</artifactId>
- <version>${subnets.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager</artifactId>
- <version>${switchmanager.api.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager.implementation</artifactId>
- <version>${switchmanager.implementation.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>switchmanager.northbound</artifactId>
- <version>${switchmanager.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topology.northbound</artifactId>
- <version>${topology.northbound.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>topologymanager</artifactId>
- <version>${topologymanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>usermanager</artifactId>
- <version>${usermanager.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>usermanager.implementation</artifactId>
- <version>${usermanager.implementation.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>com.sun.jersey.jersey-servlet</artifactId>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>openstack.net-virt</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>ovsdb-plugin-compatibility-layer</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin</artifactId>
- </dependency>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>schema.hardwarevtep</artifactId>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>properties-maven-plugin</artifactId>
- <version>${propertymavenplugin.version}</version>
<executions>
<execution>
<goals>
<!-- Surefire/Failsafe Arguments -->
<argLine>-Xmx1024m -XX:MaxPermSize=256m</argLine>
<!-- OVSDB Component Versions -->
- <openstack.netvirt.version>1.2.0-SNAPSHOT</openstack.netvirt.version>
- <openstack.netvirt.providers.version>1.2.0-SNAPSHOT</openstack.netvirt.providers.version>
- <ovsdb.features.version>1.2.0-SNAPSHOT</ovsdb.features.version>
- <ovsdb.library.version>1.2.0-SNAPSHOT</ovsdb.library.version>
+ <openstack.netvirt.version>1.2.1-SNAPSHOT</openstack.netvirt.version>
+ <openstack.netvirt.providers.version>1.2.1-SNAPSHOT</openstack.netvirt.providers.version>
+ <ovsdb.features.version>1.2.1-SNAPSHOT</ovsdb.features.version>
+ <ovsdb.library.version>1.2.1-SNAPSHOT</ovsdb.library.version>
<ovsdb.northbound.version>0.8.0-SNAPSHOT</ovsdb.northbound.version>
- <ovsdb.plugin.version>1.2.0-SNAPSHOT</ovsdb.plugin.version>
- <ovsdb.plugin.compatibility.layer.version>1.2.0-SNAPSHOT</ovsdb.plugin.compatibility.layer.version>
+ <ovsdb.plugin.version>1.2.1-SNAPSHOT</ovsdb.plugin.version>
+ <ovsdb.plugin.compatibility.layer.version>1.2.1-SNAPSHOT</ovsdb.plugin.compatibility.layer.version>
<ovsdb.ovssfc.version>0.2.0-SNAPSHOT</ovsdb.ovssfc.version>
- <ovsdb.utils.config.version>1.2.0-SNAPSHOT</ovsdb.utils.config.version>
- <ovsdb.utils.mdsal.node.version>1.2.0-SNAPSHOT</ovsdb.utils.mdsal.node.version>
- <ovsdb.utils.mdsal.openflow.version>1.2.0-SNAPSHOT</ovsdb.utils.mdsal.openflow.version>
- <ovsdb.utils.servicehelper.version>1.2.0-SNAPSHOT</ovsdb.utils.servicehelper.version>
- <plugin.shell.version>1.2.0-SNAPSHOT</plugin.shell.version>
- <schema.hardwarevtep.version>1.2.0-SNAPSHOT</schema.hardwarevtep.version>
- <schema.openvswitch.version>1.2.0-SNAPSHOT</schema.openvswitch.version>
+ <ovsdb.utils.config.version>1.2.1-SNAPSHOT</ovsdb.utils.config.version>
+ <ovsdb.utils.mdsal.node.version>1.2.1-SNAPSHOT</ovsdb.utils.mdsal.node.version>
+ <ovsdb.utils.mdsal.openflow.version>1.2.1-SNAPSHOT</ovsdb.utils.mdsal.openflow.version>
+ <ovsdb.utils.servicehelper.version>1.2.1-SNAPSHOT</ovsdb.utils.servicehelper.version>
+ <plugin.shell.version>1.2.1-SNAPSHOT</plugin.shell.version>
+ <schema.hardwarevtep.version>1.2.1-SNAPSHOT</schema.hardwarevtep.version>
+ <schema.openvswitch.version>1.2.1-SNAPSHOT</schema.openvswitch.version>
<!-- Skip Coverage and IT by default -->
<skip.coverage>true</skip.coverage>
<skip.distribution>false</skip.distribution>
<sonar.jacoco.itReportPath>${root.directory}/target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
<sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
<!-- ODL Dependency Versions -->
- <clustering.services.version>0.7.0-SNAPSHOT</clustering.services.version>
<containermanager.version>0.7.0-SNAPSHOT</containermanager.version>
<controller.config.version>0.4.0-SNAPSHOT</controller.config.version>
<forwardingrulesmanager.version>0.8.0-SNAPSHOT</forwardingrulesmanager.version>
<dependencyManagement>
<dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>2.0.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>mdsal-model-artifacts</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<artifactId>config-api</artifactId>
<version>${controller.config.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>clustering.services</artifactId>
- <version>${clustering.services.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>containermanager</artifactId>
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
- <version>${jacoco.version}</version>
<configuration>
<skip>${skip.coverage}</skip>
</configuration>
</executions>
<dependencies>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.mdsal</groupId>
<artifactId>maven-sal-api-gen-plugin</artifactId>
<version>${yangtools.version}</version>
<type>jar</type>
</parent>
<artifactId>parents</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<packaging>pom</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>features-ovsdb</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
</scm>
<properties>
- <dummy-console.version>1.3.0-SNAPSHOT</dummy-console.version>
<odl.karaf.base.version>1.6.0-SNAPSHOT</odl.karaf.base.version>
<mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
<io.netty.version>3.8.0.Final</io.netty.version>
<networkconfig.neutron.version>0.6.0-SNAPSHOT</networkconfig.neutron.version>
- <ovsdb.library.version>1.2.0-SNAPSHOT</ovsdb.library.version>
- <openstack.netvirt.version>1.2.0-SNAPSHOT</openstack.netvirt.version>
- <openstack.netvirt.providers.version>1.2.0-SNAPSHOT</openstack.netvirt.providers.version>
- <ovsdb.plugin.version>1.2.0-SNAPSHOT</ovsdb.plugin.version>
- <ovsdb.plugin.compatibility.layer.version>1.2.0-SNAPSHOT</ovsdb.plugin.compatibility.layer.version>
- <ovsdb.utils.servicehelper.version>1.2.0-SNAPSHOT</ovsdb.utils.servicehelper.version>
- <plugin.shell.version>1.2.0-SNAPSHOT</plugin.shell.version>
- <schema.hardwarevtep.version>1.2.0-SNAPSHOT</schema.hardwarevtep.version>
- <schema.openvswitch.version>1.2.0-SNAPSHOT</schema.openvswitch.version>
+ <ovsdb.library.version>1.2.1-SNAPSHOT</ovsdb.library.version>
+ <openstack.netvirt.version>1.2.1-SNAPSHOT</openstack.netvirt.version>
+ <openstack.netvirt.providers.version>1.2.1-SNAPSHOT</openstack.netvirt.providers.version>
+ <ovsdb.plugin.version>1.2.1-SNAPSHOT</ovsdb.plugin.version>
+ <ovsdb.plugin.compatibility.layer.version>1.2.1-SNAPSHOT</ovsdb.plugin.compatibility.layer.version>
+ <ovsdb.utils.servicehelper.version>1.2.1-SNAPSHOT</ovsdb.utils.servicehelper.version>
+ <plugin.shell.version>1.2.1-SNAPSHOT</plugin.shell.version>
+ <schema.hardwarevtep.version>1.2.1-SNAPSHOT</schema.hardwarevtep.version>
+ <schema.openvswitch.version>1.2.1-SNAPSHOT</schema.openvswitch.version>
<openflowplugin.version>0.2.0-SNAPSHOT</openflowplugin.version>
<sal.version>0.10.0-SNAPSHOT</sal.version>
<yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
<dlux.core.version>0.3.0-SNAPSHOT</dlux.core.version>
<ovsdb.ui.version>0.1.0-SNAPSHOT</ovsdb.ui.version>
+ <config.version>0.4.0-SNAPSHOT</config.version>
</properties>
<dependencyManagement>
<artifactId>org.eclipse.osgi.services</artifactId>
<version>3.3.100.v20120522-1822</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>dummy-console</artifactId>
- <version>${dummy-console.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.neutron</groupId>
<artifactId>features-neutron</artifactId>
<classifier>features</classifier>
<type>xml</type>
</dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- <version>${ovsdb.library.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>openstack.net-virt</artifactId>
<type>xml</type>
<classifier>config</classifier>
</dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin</artifactId>
- <version>${ovsdb.plugin.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>ovsdb-plugin-compatibility-layer</artifactId>
- <version>${ovsdb.plugin.compatibility.layer.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin-shell</artifactId>
- <version>${plugin.shell.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>schema.openvswitch</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>library-features</artifactId>
+ <version>${ovsdb.library.version}</version>
+ <type>xml</type>
+ <classifier>features</classifier>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>routemgr-features</artifactId>
<repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin-extension/${openflowplugin.version}/xml/features</repository>
<repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/${openflowplugin.version}/xml/features</repository>
<repository>mvn:org.opendaylight.neutron/features-neutron/${networkconfig.neutron.version}/xml/features</repository>
- <repository>mvn:org.opendaylight.ovsdb/southbound-features/1.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.ovsdb/routemgr-features/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.ovsdb/routemgr-features/1.2.1-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.ovsdb/southbound-features/1.2.1-SNAPSHOT/xml/features</repository>
<repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.ovsdb/library-features/${ovsdb.library.version}/xml/features</repository>
<feature name="odl-ovsdb-all" description="OpenDaylight :: OVSDB :: all"
version='${project.version}'>
<feature version="${ovsdb.library.version}">odl-ovsdb-library</feature>
- <feature version="${ovsdb.plugin.version}">odl-ovsdb-plugin</feature>
- </feature>
-
- <feature name="odl-ovsdb-library" description="OVSDB :: Library"
- version='${ovsdb.library.version}'>
- <bundle>mvn:org.osgi/org.osgi.compendium/${osgi.compendium.version}</bundle>
- <bundle>mvn:org.apache.felix/org.apache.felix.dependencymanager/${felix.dependencymanager.version}</bundle>
- <bundle>mvn:org.apache.felix/org.apache.felix.dependencymanager.shell/${felix.dependencymanager.shell.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/dummy-console/1.3.0-SNAPSHOT</bundle>
- <bundle start="true">mvn:com.fasterxml.jackson.core/jackson-annotations/${jackson.version}</bundle>
- <bundle start="true">mvn:com.fasterxml.jackson.core/jackson-core/${jackson.version}</bundle>
- <bundle start="true">mvn:com.fasterxml.jackson.core/jackson-databind/${jackson.version}</bundle>
- <bundle start="true">mvn:com.google.guava/guava/${guava.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-buffer/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-codec/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-transport/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-common/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-handler/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-codec-http/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty/${io.netty.version}</bundle>
- <bundle>mvn:org.apache.commons/commons-lang3/${commons.lang3.version}</bundle>
- <bundle>mvn:org.osgi/org.osgi.core/${osgi.core.version}</bundle>
- <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
- <bundle>mvn:org.opendaylight.ovsdb/library/${ovsdb.library.version}</bundle>
</feature>
<feature name="odl-ovsdb-schema-openvswitch" description="OVSDB :: Schema :: Open_vSwitch"
<bundle>mvn:org.opendaylight.ovsdb/schema.hardwarevtep/${schema.hardwarevtep.version}</bundle>
</feature>
- <feature name="odl-ovsdb-plugin" description="OpenDaylight :: OVSDB :: Plugin"
- version='${ovsdb.plugin.version}'>
- <feature version="${openflowplugin.version}">odl-openflowplugin-nsf-services</feature>
- <feature version="${ovsdb.library.version}">odl-ovsdb-library</feature>
- <feature version="${schema.openvswitch.version}">odl-ovsdb-schema-openvswitch</feature>
- <feature version="${schema.hardwarevtep.version}">odl-ovsdb-schema-hardwarevtep</feature>
- <bundle>mvn:org.opendaylight.ovsdb/plugin/${ovsdb.plugin.version}</bundle>
- <bundle>mvn:org.opendaylight.ovsdb/plugin-shell/${plugin.shell.version}</bundle>
- </feature>
-
<feature name="odl-ovsdb-openstack" description="OpenDaylight :: OVSDB :: OpenStack Network Virtualization"
version='${openstack.netvirt.version}'>
<feature version='${mdsal.version}'>odl-mdsal-broker</feature>
<feature version="${openflowplugin.version}">odl-openflowplugin-nsf-model</feature>
<feature version="${networkconfig.neutron.version}">odl-neutron-service</feature>
- <feature version="1.2.0-SNAPSHOT">odl-ovsdb-southbound-impl-ui</feature>
- <feature version="1.2.0-SNAPSHOT">odl-routemgr-rest</feature>
+ <feature version="1.2.1-SNAPSHOT">odl-routemgr-plugin</feature>
+ <feature version="1.2.1-SNAPSHOT">odl-ovsdb-southbound-impl-ui</feature>
<feature version="${openflowplugin.version}">odl-openflowplugin-flow-services</feature>
<feature version="${openflowplugin.version}">odl-openflowplugin-nxm-extensions</feature>
<bundle>mvn:org.opendaylight.ovsdb/utils.servicehelper/${ovsdb.utils.servicehelper.version}</bundle>
</parent>
<artifactId>features</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<packaging>pom</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<!-- Should be in a parent POM -->
<version>4.4.0</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- </dependency>
<dependency>
<groupId>org.osgi</groupId>
<artifactId>org.osgi.core</artifactId>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
- <version>${failsafe.version}</version>
<executions>
<execution>
<id>failsafe-integration-tests</id>
<groupId>org.ops4j.pax.exam</groupId>
<artifactId>pax-exam-container-native</artifactId>
</dependency>
- <dependency>
- <groupId>org.ops4j.pax.exam</groupId>
- <artifactId>pax-exam-container-karaf</artifactId>
- <scope>compile</scope>
- </dependency>
</dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-failsafe-plugin</artifactId>
- <version>${failsafe.version}</version>
- <configuration>
- </configuration>
- <executions>
- <execution>
- <id>failsafe-integration-tests</id>
- <phase>integration-test</phase>
- <goals>
- <goal>integration-test</goal>
- </goals>
- <configuration>
- <classpathDependencyExcludes>
- <classpathDependencyExcludes>org.ops4j.pax.exam:pax-exam-container-karaf</classpathDependencyExcludes>
- </classpathDependencyExcludes>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
</profile>
<profile>
<id>karafit</id>
+++ /dev/null
-/*
- * Copyright (C) 2014 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Dave Tucker
- */
-
-package org.opendaylight.ovsdb.integrationtest;
-
-import static org.ops4j.pax.exam.CoreOptions.mavenBundle;
-import static org.ops4j.pax.exam.CoreOptions.systemPackages;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import org.opendaylight.controller.test.sal.binding.it.TestHelper;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.options.DefaultCompositeOption;
-
-
-public class ConfigurationBundles {
-
- public static Option controllerBundles() {
- return new DefaultCompositeOption(
- systemPackages("sun.reflect", "sun.reflect.misc", "sun.misc"),
- mavenBundle("ch.qos.logback", "logback-classic").versionAsInProject(),
- mavenBundle("ch.qos.logback", "logback-core").versionAsInProject(),
- mavenBundle("com.google.code.gson", "gson").versionAsInProject(),
- mavenBundle("com.google.guava", "guava").versionAsInProject(),
- mavenBundle("eclipselink", "javax.resource").versionAsInProject(),
- mavenBundle("equinoxSDK381", "javax.servlet").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.gogo.command").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.gogo.runtime").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.gogo.shell").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.equinox.console").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.equinox.ds").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.equinox.util").versionAsInProject(),
- mavenBundle("equinoxSDK381","org.eclipse.osgi.services").versionAsInProject(),
- mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.dependencymanager").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.dependencymanager.shell").versionAsInProject(),
- mavenBundle("org.slf4j", "slf4j-api").versionAsInProject()
- );
- }
-
- public static Option controllerNorthboundBundles() {
- return new DefaultCompositeOption(
- mavenBundle("org.slf4j", "log4j-over-slf4j").versionAsInProject(),
-
- // setting default level. Jersey bundles will need to be started
- // earlier.
- systemProperty("osgi.bundles.defaultStartLevel").value("4"),
- mavenBundle("com.fasterxml.jackson.jaxrs", "jackson-jaxrs-base").versionAsInProject(),
- mavenBundle("com.fasterxml.jackson.jaxrs", "jackson-jaxrs-json-provider").versionAsInProject(),
- mavenBundle("com.fasterxml.jackson.module", "jackson-module-jaxb-annotations").versionAsInProject(),
- mavenBundle("com.sun.jersey", "jersey-client").versionAsInProject(),
- mavenBundle("com.sun.jersey", "jersey-core").versionAsInProject().startLevel(2),
- mavenBundle("com.sun.jersey", "jersey-server").versionAsInProject().startLevel(2),
- mavenBundle("commons-codec", "commons-codec").versionAsInProject(),
- mavenBundle("commons-fileupload", "commons-fileupload").versionAsInProject(),
- mavenBundle("commons-io", "commons-io").versionAsInProject(),
- mavenBundle("eclipselink", "javax.persistence").versionAsInProject(),
- mavenBundle("equinoxSDK381", "javax.servlet.jsp").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.equinox.cm").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.equinox.launcher").versionAsInProject(),
- mavenBundle("equinoxSDK381", "org.eclipse.osgi.services").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.gemini.web.core").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.gemini.web.extender").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.gemini.web.tomcat").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.kernel.equinox.extensions").versionAsInProject().noStart(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.common").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.io").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.math").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.osgi").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.osgi.manifest").versionAsInProject(),
- mavenBundle("geminiweb", "org.eclipse.virgo.util.parser.manifest").versionAsInProject(),
- mavenBundle("orbit", "javax.activation").versionAsInProject(),
- mavenBundle("orbit", "javax.annotation").versionAsInProject(),
- mavenBundle("orbit", "javax.ejb").versionAsInProject(),
- mavenBundle("orbit", "javax.el").versionAsInProject(),
- mavenBundle("orbit", "javax.mail.glassfish").versionAsInProject(),
- mavenBundle("orbit", "javax.servlet.jsp.jstl").versionAsInProject(),
- mavenBundle("orbit", "javax.servlet.jsp.jstl.impl").versionAsInProject(),
- mavenBundle("orbit", "javax.xml.rpc").versionAsInProject(),
- mavenBundle("orbit", "javax.xml.rpc").versionAsInProject(),
- mavenBundle("orbit", "org.apache.catalina").versionAsInProject(),
- mavenBundle("orbit", "org.apache.catalina.ha").versionAsInProject().noStart(),
- mavenBundle("orbit", "org.apache.catalina.tribes").versionAsInProject().noStart(),
- mavenBundle("orbit", "org.apache.coyote").versionAsInProject().noStart(),
- mavenBundle("orbit", "org.apache.el").versionAsInProject(),
- mavenBundle("orbit", "org.apache.jasper").versionAsInProject().noStart(),
- mavenBundle("orbit", "org.apache.juli.extras").versionAsInProject(),
- mavenBundle("orbit", "org.apache.tomcat.api").versionAsInProject(),
- mavenBundle("orbit", "org.apache.tomcat.util").versionAsInProject().noStart(),
- mavenBundle("org.aopalliance", "com.springsource.org.aopalliance").versionAsInProject(),
- mavenBundle("org.apache.felix", "org.apache.felix.fileinstall").versionAsInProject(),
- mavenBundle("org.codehaus.jettison", "jettison").versionAsInProject(),
- mavenBundle("org.jboss.spec.javax.transaction", "jboss-transaction-api_1.1_spec").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "clustering.services").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "clustering.services-implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "configuration").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "containermanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "containermanager.it.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "sal").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "sal.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "bundlescanner").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "bundlescanner.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "commons.httpclient").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "commons.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "configuration.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "flowprogrammer.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "forwarding.staticrouting").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "forwarding.staticrouting.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "forwardingrulesmanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "hosttracker").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "hosttracker.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "hosttracker.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "logging.bridge").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "routing.dijkstra_implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "security").versionAsInProject().noStart(),
- mavenBundle("org.opendaylight.controller", "statistics.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "statisticsmanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "subnets.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "switchmanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "switchmanager.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "switchmanager.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "topology.northbound").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "topologymanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "usermanager").versionAsInProject(),
- mavenBundle("org.opendaylight.controller", "usermanager.implementation").versionAsInProject(),
- mavenBundle("org.opendaylight.controller.thirdparty", "com.sun.jersey.jersey-servlet").versionAsInProject(),
- mavenBundle("org.opendaylight.controller.thirdparty", "net.sf.jung2").versionAsInProject(),
- mavenBundle("org.opendaylight.controller.thirdparty", "org.apache.catalina.filters.CorsFilter").versionAsInProject().noStart(),
- mavenBundle("org.ow2.asm", "asm-all").versionAsInProject(),
- mavenBundle("org.ow2.chameleon.management", "chameleon-mbeans").versionAsInProject(),
- mavenBundle("org.slf4j", "jcl-over-slf4j").versionAsInProject(),
- mavenBundle("org.slf4j", "slf4j-api").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.aop").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.asm").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.beans").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.context").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.context.support").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.core").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.expression").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.transaction").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.web").versionAsInProject(),
- mavenBundle("org.springframework", "org.springframework.web.servlet").versionAsInProject(),
- mavenBundle("org.springframework.security", "spring-security-config").versionAsInProject(),
- mavenBundle("org.springframework.security", "spring-security-core").versionAsInProject(),
- mavenBundle("org.springframework.security", "spring-security-taglibs").versionAsInProject(),
- mavenBundle("org.springframework.security", "spring-security-web").versionAsInProject(),
- mavenBundle("virgomirror", "org.eclipse.jdt.core.compiler.batch").versionAsInProject(),
- mavenBundle("org.yaml", "snakeyaml").versionAsInProject()
- );
- }
-
- public static Option ovsdbLibraryBundles() {
- return new DefaultCompositeOption(
- mavenBundle("io.netty", "netty-buffer").versionAsInProject(),
- mavenBundle("io.netty", "netty-codec").versionAsInProject(),
- mavenBundle("io.netty", "netty-common").versionAsInProject(),
- mavenBundle("io.netty", "netty-handler").versionAsInProject(),
- mavenBundle("io.netty", "netty-transport").versionAsInProject(),
- mavenBundle("com.fasterxml.jackson.core", "jackson-annotations").versionAsInProject(),
- mavenBundle("com.fasterxml.jackson.core", "jackson-core").versionAsInProject(),
- mavenBundle("com.fasterxml.jackson.core", "jackson-databind").versionAsInProject(),
- mavenBundle("javax.portlet", "portlet-api").versionAsInProject(),
- mavenBundle("com.google.guava", "guava").versionAsInProject(),
- mavenBundle("org.opendaylight.ovsdb", "utils.servicehelper").versionAsInProject(),
- mavenBundle("org.opendaylight.ovsdb", "library").versionAsInProject()
- );
- }
-
- public static Option ovsdbPluginBundles() {
- return new DefaultCompositeOption(
- mavenBundle("org.opendaylight.ovsdb", "plugin").versionAsInProject(),
- mavenBundle("org.mockito", "mockito-all").versionAsInProject()
- );
- }
-
- public static Option ovsdbDefaultSchemaBundles() {
- return new DefaultCompositeOption(
- mavenBundle("org.opendaylight.ovsdb", "schema.openvswitch").versionAsInProject(),
- mavenBundle("org.opendaylight.ovsdb", "schema.hardwarevtep").versionAsInProject()
- );
- }
-
- public static Option ovsdbNeutronBundles() {
- return new DefaultCompositeOption(
- mavenBundle("commons-net", "commons-net").versionAsInProject(),
- mavenBundle("org.opendaylight.neutron", "neutron-spi").versionAsInProject(),
- mavenBundle("org.opendaylight.ovsdb", "openstack.net-virt").versionAsInProject()
- );
- }
-
- public static Option mdsalBundles() {
- return new DefaultCompositeOption(
- TestHelper.configMinumumBundles(),
- TestHelper.baseModelBundles(),
- TestHelper.flowCapableModelBundles(),
- TestHelper.junitAndMockitoBundles()
- );
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Madhu Venugopal, Sam Hague
- */
-package org.opendaylight.ovsdb.integrationtest;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.OvsdbConnection;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionListener;
-import org.opendaylight.ovsdb.plugin.api.ConnectionConstants;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-
-import org.junit.Rule;
-import org.junit.rules.TestRule;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(OvsdbIntegrationTestBase.class);
- protected final static String IDENTIFIER = "TEST";
- protected final static String SERVER_IPADDRESS = "ovsdbserver.ipaddress";
- protected final static String SERVER_PORT = "ovsdbserver.port";
- protected final static String CONNECTION_TYPE = "ovsdbserver.connection";
- protected final static String CONNECTION_TYPE_ACTIVE = "active";
- protected final static String CONNECTION_TYPE_PASSIVE = "passive";
- protected final static int CONNECTION_INIT_TIMEOUT = 10000;
- protected final static String DEFAULT_SERVER_PORT = "6640";
-
- private static boolean bundlesReady = false;
- public final static String OPEN_VSWITCH_SCHEMA = "Open_vSwitch";
- public final static String HARDWARE_VTEP = "hardware_vtep";
-
- public Properties loadProperties() {
- Properties props = new Properties(System.getProperties());
- return props;
- }
-
- public Node getPluginTestConnection() throws IOException, InterruptedException, ExecutionException, TimeoutException {
- Properties props = loadProperties();
- String addressStr = props.getProperty(SERVER_IPADDRESS);
- String portStr = props.getProperty(SERVER_PORT, DEFAULT_SERVER_PORT);
- String connectionType = props.getProperty(CONNECTION_TYPE, "active");
- Node node = null;
-
- OvsdbConnectionService
- connection = (OvsdbConnectionService)ServiceHelper.getGlobalInstance(OvsdbConnectionService.class, this);
- // If the connection type is active, controller connects to the ovsdb-server
- if (connectionType.equalsIgnoreCase(CONNECTION_TYPE_ACTIVE)) {
- if (addressStr == null) {
- fail(usage());
- }
-
- Map<ConnectionConstants, String> params = new HashMap<ConnectionConstants, String>();
- params.put(ConnectionConstants.ADDRESS, addressStr);
- params.put(ConnectionConstants.PORT, portStr);
- node = connection.connect(IDENTIFIER, params);
- } else if (connectionType.equalsIgnoreCase(CONNECTION_TYPE_PASSIVE)) {
- // Wait for CONNECTION_INIT_TIMEOUT for the Passive connection to be initiated by the ovsdb-server.
- Thread.sleep(CONNECTION_INIT_TIMEOUT);
- List<Node> nodes = connection.getNodes();
- assertNotNull(nodes);
- assertTrue(nodes.size() > 0);
- node = nodes.get(0);
- }
-
- if (node != null) {
- LOG.info("getPluginTestConnection: Successfully connected to {}", node);
- } else {
- fail("Connection parameter (" + CONNECTION_TYPE + ") must be active or passive");
- }
- return node;
- }
-
- public OvsdbClient getTestConnection() throws IOException, InterruptedException, ExecutionException, TimeoutException {
- Properties props = loadProperties();
- String addressStr = props.getProperty(SERVER_IPADDRESS);
- String portStr = props.getProperty(SERVER_PORT, DEFAULT_SERVER_PORT);
- String connectionType = props.getProperty(CONNECTION_TYPE, "active");
-
- // If the connection type is active, controller connects to the ovsdb-server
- if (connectionType.equalsIgnoreCase(CONNECTION_TYPE_ACTIVE)) {
- if (addressStr == null) {
- fail(usage());
- }
-
- InetAddress address;
- try {
- address = InetAddress.getByName(addressStr);
- } catch (Exception e) {
- System.out.println("Unable to resolve " + addressStr);
- e.printStackTrace();
- return null;
- }
-
- Integer port;
- try {
- port = Integer.parseInt(portStr);
- } catch (NumberFormatException e) {
- System.out.println("Invalid port number : " + portStr);
- e.printStackTrace();
- return null;
- }
-
- OvsdbConnection connection = (OvsdbConnection)ServiceHelper.getGlobalInstance(OvsdbConnection.class, this);
- return connection.connect(address, port);
- } else if (connectionType.equalsIgnoreCase(CONNECTION_TYPE_PASSIVE)) {
- ExecutorService executor = Executors.newFixedThreadPool(1);
- Future<OvsdbClient> passiveConnection = executor.submit(new PassiveListener());
- return passiveConnection.get(60, TimeUnit.SECONDS);
- }
- fail("Connection parameter ("+CONNECTION_TYPE+") must be either active or passive");
- return null;
- }
-
- protected String usage() {
- return "Integration Test needs a valid connection configuration as follows :\n" +
- "active connection : mvn -Pintegrationtest -Dovsdbserver.ipaddress=x.x.x.x -Dovsdbserver.port=yyyy verify\n"+
- "passive connection : mvn -Pintegrationtest -Dovsdbserver.connection=passive verify\n";
- }
-
- public class PassiveListener implements Callable<OvsdbClient>, OvsdbConnectionListener {
- OvsdbClient client = null;
- @Override
- public OvsdbClient call() throws Exception {
- OvsdbConnection connection = (OvsdbConnection)ServiceHelper.getGlobalInstance(OvsdbConnection.class, this);
- connection.registerConnectionListener(this);
- while (client == null) {
- Thread.sleep(500);
- }
- return client;
- }
-
- @Override
- public void connected(OvsdbClient client) {
- this.client = client;
- }
-
- @Override
- public void disconnected(OvsdbClient client) {
- assertEquals(this.client.getConnectionInfo(), client.getConnectionInfo());
- this.client = null;
- }
- }
-
- public String stateToString(int state) {
- switch (state) {
- case Bundle.ACTIVE:
- return "ACTIVE";
- case Bundle.INSTALLED:
- return "INSTALLED";
- case Bundle.RESOLVED:
- return "RESOLVED";
- case Bundle.UNINSTALLED:
- return "UNINSTALLED";
- default:
- return "Not CONVERTED";
- }
- }
-
- public void areWeReady(BundleContext bc) throws InterruptedException {
- if (bundlesReady) {
- LOG.info("Bundles already loaded");
- return;
- }
- assertNotNull(bc);
- boolean debugit = false;
- Bundle b[] = bc.getBundles();
- for (Bundle element : b) {
- int state = element.getState();
- if (state != Bundle.ACTIVE && state != Bundle.RESOLVED) {
- LOG.info("Bundle:" + element.getSymbolicName() + " state:"
- + stateToString(state));
- debugit = true;
- }
- }
- if (debugit) {
- LOG.debug("Do some debugging because some bundle is unresolved");
- Thread.sleep(600000);
- }
-
- // Assert if true, if false we are good to go!
- assertFalse("There is a problem with loading the bundles.", debugit);
- bundlesReady = true;
- LOG.info("Bundles loaded");
- }
-
- /*
- * Method adds a log as each test method starts and finishes. This is useful when
- * the test suite is used because the suites only print a final summary.
- */
- @Rule
- public TestRule watcher = new TestWatcher() {
- @Override
- protected void starting(Description description) {
- LOG.info("TestWatcher: Starting test: {}",
- description.getDisplayName());
- }
-
- @Override
- protected void finished(Description description) {
- LOG.info("TestWatcher: Finished test: {}", description.getDisplayName());
- }
- };
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Madhu Venugopal
- */
-package org.opendaylight.ovsdb.integrationtest.library;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-import static org.opendaylight.ovsdb.lib.operations.Operations.op;
-import static org.ops4j.pax.exam.CoreOptions.options;
-import static org.ops4j.pax.exam.CoreOptions.propagateSystemProperty;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
-import javax.inject.Inject;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.opendaylight.ovsdb.integrationtest.ConfigurationBundles;
-import org.opendaylight.ovsdb.integrationtest.OvsdbIntegrationTestBase;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.notation.Mutator;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.operations.OperationResult;
-import org.opendaylight.ovsdb.lib.operations.TransactionBuilder;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.junit.PaxExam;
-import org.ops4j.pax.exam.util.PathUtils;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-
-@RunWith(PaxExam.class)
-public class OvsdbLibraryIT extends OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(OvsdbLibraryIT.class);
- @Inject
- private BundleContext bc;
- private OvsdbClient client = null;
-
- @Configuration
- public Option[] config() {
- return options(
- //
- systemProperty("logback.configurationFile").value(
- "file:" + PathUtils.getBaseDir()
- + "/src/test/resources/logback.xml"
- ),
- // To start OSGi console for inspection remotely
- systemProperty("osgi.console").value("2401"),
-
- propagateSystemProperty("ovsdbserver.ipaddress"),
- propagateSystemProperty("ovsdbserver.port"),
-
- ConfigurationBundles.mdsalBundles(),
- ConfigurationBundles.controllerBundles(),
- ConfigurationBundles.ovsdbLibraryBundles(),
- ConfigurationBundles.ovsdbDefaultSchemaBundles()
- );
- }
-
- @Before
- public void areWeReady() throws InterruptedException {
- assertNotNull(bc);
- boolean debugit = false;
- Bundle b[] = bc.getBundles();
- for (Bundle element : b) {
- int state = element.getState();
- if (state != Bundle.ACTIVE && state != Bundle.RESOLVED) {
- LOG.info("Bundle: {} state: {}", element.getSymbolicName(), stateToString(state));
- debugit = true;
- }
- }
- if (debugit) {
- LOG.debug("Do some debugging because some bundle is unresolved");
- Thread.sleep(600000);
- }
-
- // Assert if true, if false we are good to go!
- assertFalse(debugit);
- try {
- client = getTestConnection();
- } catch (Exception e) {
- fail("Exception : "+e.getMessage());
- }
- }
-
- public boolean isSchemaSupported(String schema) throws ExecutionException, InterruptedException {
- ListenableFuture<List<String>> databases = client.getDatabases();
- List<String> dbNames = databases.get();
- assertNotNull(dbNames);
- return dbNames.contains(schema);
- }
-
- static String testBridgeName = "br_test";
- static UUID testBridgeUuid = null;
- private void createTypedBridge(DatabaseSchema dbSchema) throws IOException, InterruptedException, ExecutionException, NoSuchMethodException, IllegalAccessException, IllegalArgumentException, InvocationTargetException {
- Bridge bridge = client.createTypedRowWrapper(Bridge.class);
- bridge.setName(testBridgeName);
- bridge.setStatus(ImmutableMap.of("key","value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
-
- OpenVSwitch openVSwitch = client.createTypedRowWrapper(OpenVSwitch.class);
- openVSwitch.setBridges(Sets.newHashSet(new UUID(testBridgeName)));
-
- int insertOperationIndex = 0;
-
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema)
- .add(op.insert(bridge.getSchema())
- .withId(testBridgeName)
- .value(bridge.getNameColumn()))
- .add(op.update(bridge.getSchema())
- .set(bridge.getStatusColumn())
- .set(bridge.getFloodVlansColumn())
- .where(bridge.getNameColumn().getSchema().opEqual(bridge.getName()))
- .and(bridge.getNameColumn().getSchema().opEqual(bridge.getName())).build())
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getBridgesColumn().getSchema(), Mutator.INSERT,
- openVSwitch.getBridgesColumn().getData()));
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults = results.get();
- assertFalse(operationResults.isEmpty());
- // Check if Results matches the number of operations in transaction
- assertEquals(transactionBuilder.getOperations().size(), operationResults.size());
- System.out.println("Insert & Update operation results = " + operationResults);
- for (OperationResult result : operationResults) {
- assertNull(result.getError());
- }
- testBridgeUuid = operationResults.get(insertOperationIndex).getUuid();
- assertNotNull(testBridgeUuid);
- }
-
- @Test
- public void tableTest() throws Exception {
- assertNotNull("Invalid Client. Check connection params", client);
- Thread.sleep(3000); // Wait for a few seconds to get the Schema exchange done
- if (isSchemaSupported(OPEN_VSWITCH_SCHEMA)) {
- DatabaseSchema dbSchema = client.getSchema(OPEN_VSWITCH_SCHEMA).get();
- assertNotNull(dbSchema);
- System.out.println(OPEN_VSWITCH_SCHEMA + " schema in "+ client.getConnectionInfo() +
- " with Tables : " + dbSchema.getTables());
-
- // A simple Typed Test to make sure a Typed wrapper bundle can coexist in an OSGi environment
- createTypedBridge(dbSchema);
- }
-
- if (isSchemaSupported(HARDWARE_VTEP)) {
- DatabaseSchema dbSchema = client.getSchema(HARDWARE_VTEP).get();
- assertNotNull(dbSchema);
- System.out.println(HARDWARE_VTEP + " schema in "+ client.getConnectionInfo() +
- " with Tables : " + dbSchema.getTables());
- }
- }
-
- @After
- public void tearDown() throws InterruptedException, ExecutionException {
- Bridge bridge = client.getTypedRowWrapper(Bridge.class, null);
- OpenVSwitch openVSwitch = client.getTypedRowWrapper(OpenVSwitch.class, null);
- DatabaseSchema dbSchema = client.getSchema(OPEN_VSWITCH_SCHEMA).get();
- ListenableFuture<List<OperationResult>> results = client.transactBuilder(dbSchema)
- .add(op.delete(bridge.getSchema())
- .where(bridge.getNameColumn().getSchema().opEqual(testBridgeName))
- .build())
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getBridgesColumn().getSchema(), Mutator.DELETE, Sets.newHashSet(testBridgeUuid)))
- .add(op.commit(true))
- .execute();
-
- List<OperationResult> operationResults = results.get();
- System.out.println("Delete operation results = " + operationResults);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Madhu Venugopal
- */
-package org.opendaylight.ovsdb.integrationtest.plugin;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.ops4j.pax.exam.CoreOptions.options;
-import static org.ops4j.pax.exam.CoreOptions.propagateSystemProperty;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import org.junit.After;
-import org.opendaylight.ovsdb.integrationtest.ConfigurationBundles;
-import org.opendaylight.ovsdb.integrationtest.OvsdbIntegrationTestBase;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionInfo;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.plugin.api.Status;
-import org.opendaylight.ovsdb.plugin.api.StatusWithUuid;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-import org.apache.felix.dm.Component;
-import org.apache.felix.dm.DependencyManager;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.junit.PaxExam;
-import org.ops4j.pax.exam.util.PathUtils;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-
-import javax.inject.Inject;
-
-@RunWith(PaxExam.class)
-public class OvsdbPluginIT extends OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(OvsdbPluginIT.class);
- @Inject
- private BundleContext bc;
- private OvsdbConfigurationService ovsdbConfigurationService = null;
-
- @Inject
- private OvsdbInventoryService ovsdbInventoryService;
-
- private Node node = null;
- private OvsdbClient client = null;
-
- // Configure the OSGi container
- @Configuration
- public Option[] config() {
- return options(
- //
- systemProperty("logback.configurationFile").value(
- "file:" + PathUtils.getBaseDir()
- + "/src/test/resources/logback.xml"
- ),
- // To start OSGi console for inspection remotely
- systemProperty("osgi.console").value("2401"),
-
- propagateSystemProperty("ovsdbserver.ipaddress"),
- propagateSystemProperty("ovsdbserver.port"),
-
- ConfigurationBundles.mdsalBundles(),
- ConfigurationBundles.controllerBundles(),
- ConfigurationBundles.ovsdbLibraryBundles(),
- ConfigurationBundles.ovsdbDefaultSchemaBundles(),
- ConfigurationBundles.ovsdbPluginBundles()
- );
- }
-
- @Before
- public void areWeReady() throws InterruptedException {
- assertNotNull(bc);
- boolean debugit = false;
- Bundle b[] = bc.getBundles();
- for (Bundle element : b) {
- int state = element.getState();
- if (state != Bundle.ACTIVE && state != Bundle.RESOLVED) {
- LOG.info("Bundle: {} state: {}", element.getSymbolicName(), stateToString(state));
- debugit = true;
- }
- }
- if (debugit) {
- LOG.debug("Do some debugging because some bundle is unresolved");
- }
-
- assertFalse(debugit);
- try {
- node = getPluginTestConnection();
- } catch (Exception e) {
- fail("Exception : "+e.getMessage());
- }
- this.ovsdbConfigurationService = (OvsdbConfigurationService)ServiceHelper.getGlobalInstance(OvsdbConfigurationService.class, this);
- }
-
- @Test
- public void apiTests() throws Exception {
- Thread.sleep(5000);
- OvsdbConnectionService
- connectionService = (OvsdbConnectionService)ServiceHelper.getGlobalInstance(OvsdbConnectionService.class, this);
-
- // Check for the ovsdb Connection as seen by the Plugin layer
- assertNotNull(connectionService.getNodes());
- assertTrue(connectionService.getNodes().size() > 0);
- Node node = connectionService.getNodes().get(0);
- Connection connection = connectionService.getConnection(node);
- OvsdbConnectionInfo connectionInfo = connection.getClient().getConnectionInfo();
- String identifier = IDENTIFIER;
- if (connectionInfo.getType().equals(OvsdbConnectionInfo.ConnectionType.PASSIVE)) {
- identifier = connectionInfo.getRemoteAddress().getHostAddress()+":"+connectionInfo.getRemotePort();
- }
- assertEquals(node, connectionService.getNode("OVS|" + identifier));
- System.out.println("Nodes = "+ connectionService.getNodes());
- /*
- * Test sequence :
- * 1. Print Cache and Assert to make sure the bridge is not created yet.
- * 2. Create a bridge with a valid parent_uuid & Assert to make sure the return status is success.
- * 3. Assert to make sure the bridge is created with a valid Uuid.
- * 4. Delete the bridge & Assert to make sure the return status is success.
- * 5. Assert to make sure the bridge is deleted
- */
-
- this.endToEndApiTest(connection, getOpenVSwitchTableUUID(connection));
-
- /*
- * Repeat all of the above tests without the parent_uuid
- */
-
- this.endToEndApiTest(connection, null);
- }
-
- @Test
- public void testInventoryListeners() throws UnknownHostException {
- DependencyManager dm = new DependencyManager(bc);
-
- OvsdbInventoryListener listenerA = Mockito.mock(FakeListener.class);
- OvsdbInventoryListener listenerB = Mockito.mock(FakeListener.class);
-
- Component componentA = dm.createComponent();
- componentA.setInterface(OvsdbInventoryListener.class.getName(), null);
- componentA.setImplementation(listenerA);
- dm.add(componentA);
-
- Component componentB = dm.createComponent();
- componentB.setInterface(OvsdbInventoryListener.class.getName(), null);
- componentB.setImplementation(listenerB);
- dm.add(componentB);
-
- NodeId nodeId = new NodeId("OVS|10.10.10.10:65342");
- NodeKey nodeKey = new NodeKey(nodeId);
- node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
- InetAddress address = InetAddress.getByName("10.10.10.10");
- int port = 65342;
-
- // Trigger event
- ovsdbInventoryService.notifyNodeAdded(node, address, port);
-
- Mockito.verify(listenerA, Mockito.times(1)).nodeAdded(node, address, port);
- Mockito.verify(listenerB, Mockito.times(1)).nodeAdded(node, address, port);
-
- dm.remove(componentA);
- dm.remove(componentB);
-
- }
-
- @Test
- public void testSetOFControllers() throws Exception {
- Thread.sleep(5000);
- OvsdbConnectionService
- connectionService = (OvsdbConnectionService)ServiceHelper.getGlobalInstance(OvsdbConnectionService.class, this);
-
- // 1. Check for the ovsdb Connection as seen by the Plugin layer
- assertNotNull(connectionService.getNodes());
- assertTrue(connectionService.getNodes().size() > 0);
- Node node = connectionService.getNodes().get(0);
- Connection connection = connectionService.getConnection(node);
- assertNotNull(connection);
-
- // 2. Create a bridge with a valid parent_uuid & Assert to make sure the return status is success.
- final StatusWithUuid status = insertBridge(connection, getOpenVSwitchTableUUID(connection));
- assertTrue(status.isSuccess());
-
- // Thread.sleep(3000); // wait for _real_ controller to be added to bridge... or not (see below **)
-
- // 3. Test against bug 960: Add same controller multiple times and make sure we do not end up with duplicates.
- ovsdbConfigurationService.setOFController(node, status.getUuid().toString());
- ovsdbConfigurationService.setOFController(node, status.getUuid().toString());
- ovsdbConfigurationService.setOFController(node, status.getUuid().toString());
- ovsdbConfigurationService.setOFController(node, status.getUuid().toString());
-
- Row bridgeRow = ovsdbConfigurationService.getRow(node,
- ovsdbConfigurationService.getTableName(node, Bridge.class),
- status.getUuid().toString());
- assertNotNull(bridgeRow);
- Bridge bridge = ovsdbConfigurationService.getTypedRow(node, Bridge.class, bridgeRow);
- assertTrue(bridge.getUuid().equals(status.getUuid()));
-
- final int currControllersSize = bridge.getControllerColumn().getData().size();
-
- LOG.debug("Bridge has {} controllers", currControllersSize);
-
- // ** Note: we assert against 2 or less -- instead of 1 -- to account for the _real_ controller's connection
- assertTrue( "Too few controllers added to bridge object. Is this bug 960?", currControllersSize >= 1 );
- assertTrue( "Too many controllers added to bridge object. Is this bug 960?", currControllersSize <= 2 );
-
- // Removal of bridge created in this test is done via tearDown(). It is done that way, so cleanup is ran
- // even if test fails.
- }
-
- @After
- public void tearDown() throws InterruptedException {
- Thread.sleep(5000);
- OvsdbConnectionService
- connectionService = (OvsdbConnectionService)ServiceHelper.getGlobalInstance(OvsdbConnectionService.class, this);
-
- if (connectionService.getNodes() == null) {
- return; // no nodes: noop
- }
-
- int bridgesRemoved = 0;
- List<Node> nodes = connectionService.getNodes();
- for (Node node : nodes) {
- Map<String, Row> bridgeRows =
- ovsdbConfigurationService.getRows(node, ovsdbConfigurationService.getTableName(node, Bridge.class));
- if (bridgeRows == null) {
- continue;
- }
- for (Row bridgeRow : bridgeRows.values()) {
- Bridge bridge = ovsdbConfigurationService.getTypedRow(node, Bridge.class, bridgeRow);
- LOG.trace("Test clean up removing Bridge {}", bridge.getUuid());
- Status delStatus = ovsdbConfigurationService.deleteRow(node,
- bridge.getSchema().getName(),
- bridge.getUuid().toString());
- assertTrue(delStatus.isSuccess());
- bridgesRemoved++;
- }
- }
-
- if (bridgesRemoved > 0) {
- LOG.debug("Test clean up removed {} bridges", bridgesRemoved);
- Thread.sleep(2000); // TODO : Remove this Sleep once the Select operation is resolved.
- }
- }
-
- public void endToEndApiTest(Connection connection, String parentUuid) throws Exception {
- // 1. Print Cache and Assert to make sure the bridge is not created yet.
- printCache();
-
- // 2. Create a bridge with a valid parent_uuid & Assert to make sure the return status is success.
- StatusWithUuid status = insertBridge(connection, parentUuid);
- assertTrue(status.isSuccess());
-
- Thread.sleep(2000); // TODO : Remove this Sleep once the Select operation is resolved.
-
- // 3. Assert to make sure the bridge is created with a valid Uuid.
- printCache();
- Bridge bridge = connection.getClient().getTypedRowWrapper(Bridge.class, null);
- Row bridgeRow = ovsdbConfigurationService.getRow(node, bridge.getSchema().getName(), status.getUuid().toString());
- assertNotNull(bridgeRow);
- bridge = connection.getClient().getTypedRowWrapper(Bridge.class, bridgeRow);
- assertEquals(bridge.getUuid(), status.getUuid());
-
- // 4. Delete the bridge & Assert to make sure the return status is success.
- Status delStatus = ovsdbConfigurationService.deleteRow(node, bridge.getSchema().getName(), status.getUuid().toString());
- assertTrue(delStatus.isSuccess());
- Thread.sleep(2000); // TODO : Remove this Sleep once the Select operation is resolved.
-
- // 5. Assert to make sure the bridge is deleted
- bridgeRow = ovsdbConfigurationService.getRow(node, bridge.getSchema().getName(), status.getUuid().toString());
- assertNull(bridgeRow);
- }
-
- public StatusWithUuid insertBridge(Connection connection, String parentUuid) throws Exception {
- Bridge bridge = connection.getClient().createTypedRowWrapper(Bridge.class);
- bridge.setName("br_test1");
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
- return ovsdbConfigurationService.insertRow(node, bridge.getSchema().getName(), parentUuid, bridge.getRow());
- }
-
- public String getOpenVSwitchTableUUID(Connection connection) throws Exception {
- OpenVSwitch openVSwitch = connection.getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- ConcurrentMap<String, Row> row = ovsdbConfigurationService.getRows(node, openVSwitch.getSchema().getName());
- if (row == null || row.size() == 0) {
- return null;
- }
- return (String)row.keySet().toArray()[0];
- }
-
- public void printCache() throws Exception {
- List<String> tables = ovsdbConfigurationService.getTables(node);
- System.out.println("Tables = "+tables);
- assertNotNull(tables);
- for (String table : tables) {
- System.out.println("Table "+table);
- ConcurrentMap<String,Row> row = ovsdbConfigurationService.getRows(node, table);
- System.out.println(row);
- }
- }
-
- public class FakeListener implements OvsdbInventoryListener {
-
- @Override
- public void nodeAdded(Node node, InetAddress address, int port) {
-
- }
-
- @Override
- public void nodeRemoved(Node node) {
-
- }
-
- @Override
- public void rowAdded(Node node, String tableName, String uuid, Row row) {
-
- }
-
- @Override
- public void rowUpdated(Node node, String tableName, String uuid, Row old, Row row) {
-
- }
-
- @Override
- public void rowRemoved(Node node, String tableName, String uuid, Row row, Object context) {
-
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Madhu Venugopal
- */
-package org.opendaylight.ovsdb.integrationtest.plugin;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.ops4j.pax.exam.CoreOptions.options;
-import static org.ops4j.pax.exam.CoreOptions.propagateSystemProperty;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import javax.inject.Inject;
-
-import org.apache.felix.dm.Component;
-import org.apache.felix.dm.DependencyManager;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.opendaylight.ovsdb.integrationtest.ConfigurationBundles;
-import org.opendaylight.ovsdb.integrationtest.OvsdbIntegrationTestBase;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionInfo;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.plugin.api.StatusCode;
-import org.opendaylight.ovsdb.plugin.api.StatusWithUuid;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.junit.PaxExam;
-import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
-import org.ops4j.pax.exam.spi.reactors.PerSuite;
-import org.ops4j.pax.exam.util.PathUtils;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
-@RunWith(PaxExam.class)
-@ExamReactorStrategy(PerSuite.class)
-public class OvsdbPluginV3IT extends OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(OvsdbPluginV3IT.class);
- @Inject
- private BundleContext bc;
- private OvsdbConfigurationService ovsdbConfigurationService = null;
- private String databaseName = "Open_vSwitch";
-
- @Inject
- private OvsdbInventoryService ovsdbInventoryService;
-
- private Node node = null;
- private OvsdbClient client = null;
-
- // Configure the OSGi container
- @Configuration
- public Option[] config() {
- return options(
- //
- systemProperty("logback.configurationFile").value(
- "file:" + PathUtils.getBaseDir()
- + "/src/test/resources/logback.xml"
- ),
- // To start OSGi console for inspection remotely
- systemProperty("osgi.console").value("2401"),
-
- propagateSystemProperty("ovsdbserver.ipaddress"),
- propagateSystemProperty("ovsdbserver.port"),
-
- ConfigurationBundles.mdsalBundles(),
- ConfigurationBundles.controllerBundles(),
- ConfigurationBundles.ovsdbLibraryBundles(),
- ConfigurationBundles.ovsdbDefaultSchemaBundles(),
- ConfigurationBundles.ovsdbPluginBundles()
- );
- }
-
- @Before
- public void setUp () throws ExecutionException, InterruptedException, IOException {
- areWeReady(bc);
- }
-
- public void getConnection () throws InterruptedException {
- try {
- node = getPluginTestConnection();
- } catch (Exception e) {
- fail("Exception : "+e.getMessage());
- }
- this.ovsdbConfigurationService = (OvsdbConfigurationService)ServiceHelper.getGlobalInstance(OvsdbConfigurationService.class, this);
- }
-
- @Test
- public void apiTests() throws Exception {
- getConnection();
- assertNotNull("Node should not be null", node);
- assertNotNull("OvsdbConfigurationService should not be null", ovsdbConfigurationService);
-
- Thread.sleep(1000);
- OvsdbConnectionService
- connectionService = (OvsdbConnectionService)ServiceHelper.getGlobalInstance(OvsdbConnectionService.class, this);
-
- // Check for the ovsdb Connection as seen by the Plugin layer
- assertNotNull(connectionService.getNodes());
- assertTrue(connectionService.getNodes().size() > 0);
- Node node = connectionService.getNodes().get(0);
- Connection connection = connectionService.getConnection(node);
- OvsdbConnectionInfo connectionInfo = connection.getClient().getConnectionInfo();
- String identifier = IDENTIFIER;
- if (connectionInfo.getType().equals(OvsdbConnectionInfo.ConnectionType.PASSIVE)) {
- identifier = connectionInfo.getRemoteAddress().getHostAddress()+":"+connectionInfo.getRemotePort();
- }
- assertEquals(node, connectionService.getNode("OVS|" + identifier));
- LOG.info("Nodes = {}", connectionService.getNodes());
- /*
- * Test sequence :
- * 1. Print Cache and Assert to make sure the bridge is not created yet.
- * 2. Create a bridge with a valid parent_uuid & Assert to make sure the return status is success.
- * 3. Assert to make sure the bridge is created with a valid Uuid.
- * 4. Delete the bridge & Assert to make sure the return status is success.
- * 5. Assert to make sure the bridge is deleted
- */
-
- this.endToEndApiTest(connection, getOpenVSwitchTableUUID(connection));
- }
-
- @Test
- public void testInventoryListeners() throws UnknownHostException {
- DependencyManager dm = new DependencyManager(bc);
-
- OvsdbInventoryListener listenerA = Mockito.mock(FakeListener.class);
- OvsdbInventoryListener listenerB = Mockito.mock(FakeListener.class);
-
- Component componentA = dm.createComponent();
- componentA.setInterface(OvsdbInventoryListener.class.getName(), null);
- componentA.setImplementation(listenerA);
- dm.add(componentA);
-
- Component componentB = dm.createComponent();
- componentB.setInterface(OvsdbInventoryListener.class.getName(), null);
- componentB.setImplementation(listenerB);
- dm.add(componentB);
-
- NodeId nodeId = new NodeId("OVS|10.10.10.10:65342");
- NodeKey nodeKey = new NodeKey(nodeId);
- node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
-
- InetAddress address = InetAddress.getByName("10.10.10.10");
- int port = 65342;
-
- // Trigger event
- ovsdbInventoryService.notifyNodeAdded(node, address, port);
-
- Mockito.verify(listenerA, Mockito.times(1)).nodeAdded(node, address, port);
- Mockito.verify(listenerB, Mockito.times(1)).nodeAdded(node, address, port);
-
- dm.remove(componentA);
- dm.remove(componentB);
- }
-
- public void endToEndApiTest(Connection connection, String parentUuid) throws Exception {
- // 1. Print Cache and Assert to make sure the bridge is not created yet.
- printCache();
-
- // 2. Create a bridge with a valid parent_uuid & Assert to make sure the return status is success.
- StatusWithUuid status = insertBridge(connection, parentUuid);
- assertTrue(status.isSuccess());
-
- // 3. Assert to make sure the bridge is created with a valid Uuid.
- printCache();
- Bridge bridge = connection.getClient().getTypedRowWrapper(Bridge.class, null);
- Row bridgeRow = ovsdbConfigurationService.getRow(node, databaseName, bridge.getSchema().getName(), status.getUuid());
- assertNotNull(bridgeRow);
- bridge = connection.getClient().getTypedRowWrapper(Bridge.class, bridgeRow);
- LOG.info("Bridge UUID {} Status Uuid {}", bridge.getUuid(), status.getUuid());
- assertEquals(bridge.getUuid(), status.getUuid());
-
- bridge = connection.getClient().createTypedRowWrapper(Bridge.class);
- bridge.setDatapathType("netdev");
- try {
- ovsdbConfigurationService.updateRow(node, databaseName, bridge.getSchema().getName(), status.getUuid(), bridge.getRow(), false);
- } catch (Exception e) {
- fail("Failed to updated Bridge "+e.getMessage());
- }
-
- // 4. Delete the bridge & Assert to make sure the return status is success.
- try {
- ovsdbConfigurationService.deleteRow(node, databaseName, bridge.getSchema().getName(), null, new UUID(parentUuid) ,null, status.getUuid());
- } catch (Exception e) {
- fail(e.getMessage());
- }
-
- // 5. Assert to make sure the bridge is deleted
- bridgeRow = ovsdbConfigurationService.getRow(node, databaseName, bridge.getSchema().getName(), status.getUuid());
- assertNull(bridgeRow);
- }
-
- public StatusWithUuid insertBridge(Connection connection, String parentUuid) throws Exception {
- Bridge bridge = connection.getClient().createTypedRowWrapper(Bridge.class);
- bridge.setName("br_test1");
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
- try {
- Row<GenericTableSchema> row = ovsdbConfigurationService.insertTree(node, databaseName, bridge.getSchema().getName(), new UUID(parentUuid), bridge.getRow());
- bridge = connection.getClient().getTypedRowWrapper(Bridge.class, row);
- return new StatusWithUuid(StatusCode.SUCCESS, bridge.getUuid());
- } catch (Exception e) {
- return new StatusWithUuid(StatusCode.INTERNALERROR);
- }
- }
-
- public String getOpenVSwitchTableUUID(Connection connection) throws Exception {
- OpenVSwitch openVSwitch = connection.getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- ConcurrentMap<UUID, Row<GenericTableSchema>> rows = ovsdbConfigurationService.getRows(node, databaseName, openVSwitch.getSchema().getName());
- if (rows == null || rows.size() == 0) {
- return null;
- }
- return rows.keySet().toArray()[0].toString();
- }
-
- public void printCache() throws Exception {
- List<String> tables = ovsdbConfigurationService.getTables(node, databaseName);
- LOG.info("Tables = {}", tables);
- assertNotNull(tables);
- for (String table : tables) {
- LOG.info("Table {}", table);
- ConcurrentMap<UUID, Row<GenericTableSchema>> rows = ovsdbConfigurationService.getRows(node, databaseName, table);
- LOG.info(rows.toString());
- }
- }
-
- public class FakeListener implements OvsdbInventoryListener {
-
- @Override
- public void nodeAdded(Node node, InetAddress address, int port) {
-
- }
-
- @Override
- public void nodeRemoved(Node node) {
-
- }
-
- @Override
- public void rowAdded(Node node, String tableName, String uuid, Row row) {
-
- }
-
- @Override
- public void rowUpdated(Node node, String tableName, String uuid, Row old, Row row) {
-
- }
-
- @Override
- public void rowRemoved(Node node, String tableName, String uuid, Row row, Object context) {
-
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (C) 2014 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Sam Hague, Matt Oswalt
- */
-package org.opendaylight.ovsdb.integrationtest.schema.hardwarevtep;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-import static org.opendaylight.ovsdb.lib.operations.Operations.op;
-import static org.ops4j.pax.exam.CoreOptions.junitBundles;
-import static org.ops4j.pax.exam.CoreOptions.options;
-import static org.ops4j.pax.exam.CoreOptions.propagateSystemProperty;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import javax.inject.Inject;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.opendaylight.ovsdb.integrationtest.ConfigurationBundles;
-import org.opendaylight.ovsdb.integrationtest.OvsdbIntegrationTestBase;
-import org.opendaylight.ovsdb.lib.MonitorCallBack;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.message.MonitorRequest;
-import org.opendaylight.ovsdb.lib.message.MonitorRequestBuilder;
-import org.opendaylight.ovsdb.lib.message.MonitorSelect;
-import org.opendaylight.ovsdb.lib.message.TableUpdate;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Mutator;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.operations.OperationResult;
-import org.opendaylight.ovsdb.lib.operations.TransactionBuilder;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.TableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-import org.opendaylight.ovsdb.schema.hardwarevtep.Global;
-import org.opendaylight.ovsdb.schema.hardwarevtep.Manager;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.junit.PaxExam;
-import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
-import org.ops4j.pax.exam.spi.reactors.PerSuite;
-import org.ops4j.pax.exam.util.PathUtils;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@RunWith(PaxExam.class)
-@ExamReactorStrategy(PerSuite.class)
-public class HardwareVTEPIT extends OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(HardwareVTEPIT.class);
- private static boolean monitorReady = false;
- private static boolean schemaSupported = false;
- private static final String ASSERT_TRANS_ERROR = "Transaction should not have errors";
- private static final String ASSERT_TRANS_RESULT_EMPTY = "Transaction should not be empty";
- private static final String ASSERT_TRANS_OPERATION_COUNT = "Transaction should match number of operations";
- private static final String ASSERT_TRANS_UUID = "Transaction UUID should not be null";
- private UUID testManagerUuid = null;
-
- private static Map<String, Map<UUID, Row>> tableCache = new HashMap<>();
- private static Map<String, Map<UUID, Row>> getTableCache () {
- return tableCache;
- }
-
- private static OvsdbClient ovsdbClient;
- private OvsdbClient getClient () {
- return ovsdbClient;
- }
-
- private static DatabaseSchema dbSchema;
- private DatabaseSchema getDbSchema () {
- return dbSchema;
- }
-
- @Inject
- private BundleContext bc;
-
- @Configuration
- public Option[] config() throws Exception {
- return options(
- systemProperty("logback.configurationFile").value(
- "file:" + PathUtils.getBaseDir()
- + "/src/test/resources/logback.xml"
- ),
- // To start OSGi console for inspection remotely
- systemProperty("osgi.console").value("2401"),
-
- propagateSystemProperty("ovsdbserver.ipaddress"),
- propagateSystemProperty("ovsdbserver.port"),
-
- ConfigurationBundles.mdsalBundles(),
- ConfigurationBundles.controllerBundles(),
- ConfigurationBundles.ovsdbLibraryBundles(),
- ConfigurationBundles.ovsdbDefaultSchemaBundles()
- );
- }
-
- @Before
- public void setUp () throws ExecutionException, InterruptedException, IOException {
- areWeReady(bc);
- assumeTrue(HARDWARE_VTEP + " is required.", checkSchema(HARDWARE_VTEP));
- assertTrue("Failed to monitor tables", monitorTables());
- LOG.info("{} schema version = {}", OPEN_VSWITCH_SCHEMA,
- getClient().getDatabaseSchema(OPEN_VSWITCH_SCHEMA).getVersion());
- }
-
- public boolean checkSchema (String schema) {
- if (schemaSupported) {
- LOG.info("Schema ({}) is supported", schema);
- return true;
- }
- try {
- ovsdbClient = getTestConnection();
- assertNotNull("Invalid Client. Check connection params", ovsdbClient);
- //Thread.sleep(3000); // Wait for a few seconds to get the Schema exchange done
- if (isSchemaSupported(ovsdbClient, schema)) {
- dbSchema = ovsdbClient.getSchema(schema).get();
- assertNotNull(dbSchema);
- LOG.info("{} schema in {} with tables: {}",
- schema, ovsdbClient.getConnectionInfo(), dbSchema.getTables());
- schemaSupported = true;
- return true;
- }
- } catch (Exception e) {
- fail("Exception : "+e.getMessage());
- }
-
- LOG.info("Schema ({}) is not supported", schema);
- return false;
- }
-
- public UUID getOpenVSwitchTableUuid (OvsdbClient ovs, Map<String, Map<UUID, Row>> tableCache) {
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- Map<UUID, Row> ovsTable = tableCache.get(openVSwitch.getSchema().getName());
- if (ovsTable != null) {
- if (ovsTable.keySet().size() >= 1) {
- return (UUID)ovsTable.keySet().toArray()[0];
- }
- }
- return null;
- }
-
- public UUID getGlobalTableUuid(OvsdbClient ovs, Map<String, Map<UUID, Row>> tableCache) {
- Global glbl = getClient().getTypedRowWrapper(Global.class, null);
- Map<UUID, Row> glblTbl = tableCache.get(glbl.getSchema().getName());
- if (glblTbl != null) {
- if (glblTbl.keySet().size() >= 1) {
- return (UUID)glblTbl.keySet().toArray()[0];
- }
- }
- return null;
- }
-
- public boolean isSchemaSupported (OvsdbClient client, String schema) throws ExecutionException, InterruptedException {
- ListenableFuture<List<String>> databases = client.getDatabases();
- List<String> dbNames = databases.get();
- assertNotNull(dbNames);
- if (dbNames.contains(schema)) {
- return true;
- } else {
- return false;
- }
- }
-
- /**
- * As per RFC 7047, section 4.1.5, if a Monitor request is sent without any columns, the update response will not include
- * the _uuid column.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * Each <monitor-request> specifies one or more columns and the manner in which the columns (or the entire table) are to be monitored.
- * The "columns" member specifies the columns whose values are monitored. It MUST NOT contain duplicates.
- * If "columns" is omitted, all columns in the table, except for "_uuid", are monitored.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * In order to overcome this limitation, this method
- *
- * @return MonitorRequest that includes all the Bridge Columns including _uuid
- */
- public <T extends TypedBaseTable<GenericTableSchema>> MonitorRequest<GenericTableSchema> getAllColumnsMonitorRequest (Class <T> klazz) {
- TypedBaseTable<GenericTableSchema> table = getClient().createTypedRowWrapper(klazz);
- GenericTableSchema tableSchema = table.getSchema();
- Set<String> columns = tableSchema.getColumns();
- MonitorRequestBuilder<GenericTableSchema> bridgeBuilder = MonitorRequestBuilder.builder(table.getSchema());
- for (String column : columns) {
- bridgeBuilder.addColumn(column);
- }
- return bridgeBuilder.with(new MonitorSelect(true, true, true, true)).build();
- }
-
- public <T extends TableSchema<T>> MonitorRequest<T> getAllColumnsMonitorRequest (T tableSchema) {
- Set<String> columns = tableSchema.getColumns();
- MonitorRequestBuilder<T> monitorBuilder = MonitorRequestBuilder.builder(tableSchema);
- for (String column : columns) {
- monitorBuilder.addColumn(column);
- }
- return monitorBuilder.with(new MonitorSelect(true, true, true, true)).build();
- }
-
- public boolean monitorTables () throws ExecutionException, InterruptedException, IOException {
- if (monitorReady) {
- LOG.info("Monitoring is already initialized.");
- return monitorReady;
- }
-
- assertNotNull(getDbSchema());
-
- List<MonitorRequest<GenericTableSchema>> monitorRequests = Lists.newArrayList();
- Set<String> tables = getDbSchema().getTables();
- assertNotNull("ovsdb tables should not be null", tables);
-
- for (String tableName : tables) {
- GenericTableSchema tableSchema = getDbSchema().table(tableName, GenericTableSchema.class);
- monitorRequests.add(this.getAllColumnsMonitorRequest(tableSchema));
- }
- TableUpdates updates = getClient().monitor(getDbSchema(), monitorRequests, new UpdateMonitor());
- assertNotNull(updates);
- this.updateTableCache(updates);
-
- monitorReady = true;
- LOG.info("Monitoring is initialized.");
- return monitorReady;
- }
-
- private void updateTableCache (TableUpdates updates) {
- for (String tableName : updates.getUpdates().keySet()) {
- Map<UUID, Row> tUpdate = getTableCache().get(tableName);
- TableUpdate update = updates.getUpdates().get(tableName);
- for (UUID uuid : (Set<UUID>)update.getRows().keySet()) {
- if (update.getNew(uuid) != null) {
- if (tUpdate == null) {
- tUpdate = new HashMap<>();
- getTableCache().put(tableName, tUpdate);
- }
- tUpdate.put(uuid, update.getNew(uuid));
- } else {
- tUpdate.remove(uuid);
- }
- }
- }
- }
-
- private class UpdateMonitor implements MonitorCallBack {
- @Override
- public void update(TableUpdates result, DatabaseSchema dbSchema) {
- updateTableCache(result);
- }
-
- @Override
- public void exception(Throwable t) {
- LOG.error("Exception t = " + t);
- }
- }
-
- public List<OperationResult> executeTransaction (TransactionBuilder transactionBuilder, String text)
- throws ExecutionException, InterruptedException {
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults = results.get();
- LOG.info("{}: {}", text, operationResults);
- org.junit.Assert.assertFalse(ASSERT_TRANS_RESULT_EMPTY, operationResults.isEmpty());
- assertEquals(ASSERT_TRANS_OPERATION_COUNT, transactionBuilder.getOperations().size(), operationResults.size());
- for (OperationResult result : operationResults) {
- assertNull(ASSERT_TRANS_ERROR, result.getError());
- }
- //Thread.sleep(500); // Wait for a few seconds to ensure the cache updates
- return operationResults;
- }
-
- /**
- * Create a new manager string in addition to whatever is already there
- * Will modify the Global table to include the UUID to the new Manager row
- */
- public void managerInsert () throws ExecutionException, InterruptedException {
- //Ensure test only proceeds if HW VTEP is supported
- assumeTrue(isSchemaSupported(getClient(), HARDWARE_VTEP));
-
- //proceed only if schema was already retrieved successfully
- Assert.assertNotNull(getDbSchema());
-
- //create new manager and set target string
- Manager manager = getClient().createTypedRowWrapper(Manager.class);
- manager.setTarget("ptcp:6641");
-
- String transactionUuidStr = "foobar";
-
- Global glbl = this.getClient().createTypedRowWrapper(Global.class);
- glbl.setManagers(Sets.newHashSet(new UUID(transactionUuidStr)));
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(manager.getSchema())
- .withId(transactionUuidStr)
- .value(manager.getTargetColumn()))
- .add(op.comment("Manager: Inserting " + transactionUuidStr))
- .add(op.mutate(glbl.getSchema())
- .addMutation(glbl.getManagersColumn().getSchema(), Mutator.INSERT,
- glbl.getManagersColumn().getData()))
- .add(op.comment("Global: Mutating " + transactionUuidStr));
-
- int insertOperationIndex = 0;
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Manager: Insert and Mutate results");
- testManagerUuid = operationResults.get(insertOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testManagerUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row managerRow = getTableCache().get(manager.getSchema().getName()).get(testManagerUuid);
- Manager monitoredManager = getClient().getTypedRowWrapper(Manager.class, managerRow);
- assertEquals(manager.getTargetColumn().getData(), monitoredManager.getTargetColumn().getData());
- assertNotNull(monitoredManager.getUuid());
- assertNotNull(monitoredManager.getVersion());
- assertNotNull(getGlobalTableUuid(getClient(), getTableCache()));
- }
-
- public void managerDelete () throws ExecutionException, InterruptedException {
- assumeTrue(isSchemaSupported(getClient(), HARDWARE_VTEP));
-
- Manager manager = getClient().getTypedRowWrapper(Manager.class, null);
- Global global = getClient().getTypedRowWrapper(Global.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(manager.getSchema())
- .where(manager.getUuidColumn().getSchema().opEqual(testManagerUuid))
- .build())
- .add(op.comment("Manager: Deleting " + testManagerUuid))
- .add(op.mutate(global.getSchema())
- .addMutation(global.getManagersColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testManagerUuid)))
- .add(op.comment("Global: Mutating " + testManagerUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Manager delete operation results");
- }
-
- @Test
- public void testManager () throws ExecutionException, InterruptedException {
- managerInsert();
- managerDelete();
- }
-}
+++ /dev/null
-/*
- * Copyright (C) 2014 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Sam Hague, Matt Oswalt
- */
-package org.opendaylight.ovsdb.integrationtest.schema.openvswitch;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-import static org.opendaylight.ovsdb.lib.operations.Operations.op;
-import static org.ops4j.pax.exam.CoreOptions.junitBundles;
-import static org.ops4j.pax.exam.CoreOptions.options;
-import static org.ops4j.pax.exam.CoreOptions.propagateSystemProperty;
-import static org.ops4j.pax.exam.CoreOptions.systemProperty;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import javax.inject.Inject;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.opendaylight.ovsdb.integrationtest.ConfigurationBundles;
-import org.opendaylight.ovsdb.integrationtest.OvsdbIntegrationTestBase;
-import org.opendaylight.ovsdb.lib.MonitorCallBack;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.error.SchemaVersionMismatchException;
-import org.opendaylight.ovsdb.lib.message.MonitorRequest;
-import org.opendaylight.ovsdb.lib.message.MonitorRequestBuilder;
-import org.opendaylight.ovsdb.lib.message.MonitorSelect;
-import org.opendaylight.ovsdb.lib.message.TableUpdate;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Mutator;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.notation.Version;
-import org.opendaylight.ovsdb.lib.operations.OperationResult;
-import org.opendaylight.ovsdb.lib.operations.TransactionBuilder;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.TableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.ovsdb.schema.openvswitch.Controller;
-import org.opendaylight.ovsdb.schema.openvswitch.FlowSampleCollectorSet;
-import org.opendaylight.ovsdb.schema.openvswitch.FlowTable;
-import org.opendaylight.ovsdb.schema.openvswitch.Interface;
-import org.opendaylight.ovsdb.schema.openvswitch.IPFIX;
-import org.opendaylight.ovsdb.schema.openvswitch.Manager;
-import org.opendaylight.ovsdb.schema.openvswitch.Mirror;
-import org.opendaylight.ovsdb.schema.openvswitch.NetFlow;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.opendaylight.ovsdb.schema.openvswitch.Port;
-import org.opendaylight.ovsdb.schema.openvswitch.Qos;
-import org.opendaylight.ovsdb.schema.openvswitch.Queue;
-import org.opendaylight.ovsdb.schema.openvswitch.SFlow;
-import org.opendaylight.ovsdb.schema.openvswitch.SSL;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.junit.PaxExam;
-import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
-import org.ops4j.pax.exam.spi.reactors.PerSuite;
-import org.ops4j.pax.exam.util.PathUtils;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@RunWith(PaxExam.class)
-@ExamReactorStrategy(PerSuite.class)
-public class OpenVSwitchIT extends OvsdbIntegrationTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(OpenVSwitchIT.class);
- private static boolean monitorReady = false;
- private static boolean schemaSupported = false;
- private static final String TEST_BRIDGE_NAME = "br_test";
- private static final String TEST_MANAGER_UUID_STR = "managerUuidName";
- private static final String ASSERT_TRANS_ERROR = "Transaction should not have errors";
- private static final String ASSERT_TRANS_RESULT_EMPTY = "Transaction should not be empty";
- private static final String ASSERT_TRANS_OPERATION_COUNT = "Transaction should match number of operations";
- private static final String ASSERT_TRANS_UUID = "Transaction UUID should not be null";
- private Version schemaVersion;
- private UUID testBridgeUuid = null;
- private UUID testController1Uuid = null;
- private UUID testController2Uuid = null;
- private UUID testFlowSampleCollectorSetUuid = null;
- private UUID testFlowTableUuid = null;
- private UUID testInterfaceUuid = null;
- private UUID testIpfixUuid = null;
- private UUID testManagerUuid = null;
- private UUID testMirrorUuid = null;
- private UUID testNetFlowUuid = null;
- private UUID testPortUuid = null;
- private UUID testQosUuid = null;
- private UUID testQueueUuid = null;
- private UUID testSFlowUuid = null;
- private UUID testSslUuid = null;
- private Version flowSampleCollectorSetFromVersion = Version.fromString("7.1.0");
- private Version flowTableFromVersion = Version.fromString("6.5.0");
- private Version prefixesAddedVersion = Version.fromString("7.4.0");
- private Version externalIdAddedVerson = Version.fromString("7.5.0");
- private Version ipfixFromVersion = Version.fromString("7.1.0");
- private Version ipfixCacheFromVersion = Version.fromString("7.3.0");
-
- private static Map<String, Map<UUID, Row>> tableCache = new HashMap<>();
- private static Map<String, Map<UUID, Row>> getTableCache () {
- return tableCache;
- }
-
- private static OvsdbClient ovsdbClient;
- private OvsdbClient getClient () {
- return ovsdbClient;
- }
-
- private static DatabaseSchema dbSchema;
- private DatabaseSchema getDbSchema () {
- return dbSchema;
- }
-
- @Inject
- private BundleContext bc;
-
- @Configuration
- public Option[] config() throws Exception {
- return options(
- systemProperty("logback.configurationFile").value(
- "file:" + PathUtils.getBaseDir()
- + "/src/test/resources/logback.xml"
- ),
- // To start OSGi console for inspection remotely
- systemProperty("osgi.console").value("2401"),
-
- propagateSystemProperty("ovsdbserver.ipaddress"),
- propagateSystemProperty("ovsdbserver.port"),
-
- ConfigurationBundles.mdsalBundles(),
- ConfigurationBundles.controllerBundles(),
- ConfigurationBundles.ovsdbLibraryBundles(),
- ConfigurationBundles.ovsdbDefaultSchemaBundles()
- );
- }
-
- @Before
- public void setUp () throws ExecutionException, InterruptedException, IOException {
- areWeReady(bc);
- assertTrue(OPEN_VSWITCH_SCHEMA + " is required.", checkSchema(OPEN_VSWITCH_SCHEMA));
- assertTrue("Failed to monitor tables", monitorTables());
- schemaVersion = getClient().getDatabaseSchema(OPEN_VSWITCH_SCHEMA).getVersion();
- LOG.info("{} schema version = {}", OPEN_VSWITCH_SCHEMA, schemaVersion);
- }
-
- public boolean checkSchema (String schema) {
- if (schemaSupported) {
- LOG.info("Schema ({}) is supported", schema);
- return true;
- }
- try {
- ovsdbClient = getTestConnection();
- assertNotNull("Invalid Client. Check connection params", ovsdbClient);
- //Thread.sleep(3000); // Wait for a few seconds to get the Schema exchange done
- if (isSchemaSupported(ovsdbClient, schema)) {
- dbSchema = ovsdbClient.getSchema(schema).get();
- assertNotNull(dbSchema);
- LOG.info("{} schema in {} with tables: {}",
- schema, ovsdbClient.getConnectionInfo(), dbSchema.getTables());
- schemaSupported = true;
- return true;
- }
- } catch (Exception e) {
- fail("Exception : "+e.getMessage());
- }
-
- LOG.info("Schema ({}) is not supported", schema);
- return false;
- }
-
- public UUID getOpenVSwitchTableUuid (OvsdbClient ovs, Map<String, Map<UUID, Row>> tableCache) {
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- Map<UUID, Row> ovsTable = tableCache.get(openVSwitch.getSchema().getName());
- if (ovsTable != null) {
- if (ovsTable.keySet().size() >= 1) {
- return (UUID)ovsTable.keySet().toArray()[0];
- }
- }
- return null;
- }
-
- public boolean isSchemaSupported (OvsdbClient client, String schema) throws ExecutionException, InterruptedException {
- ListenableFuture<List<String>> databases = client.getDatabases();
- List<String> dbNames = databases.get();
- assertNotNull(dbNames);
- if (dbNames.contains(schema)) {
- return true;
- } else {
- return false;
- }
- }
-
- /**
- * As per RFC 7047, section 4.1.5, if a Monitor request is sent without any columns, the update response will not include
- * the _uuid column.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * Each <monitor-request> specifies one or more columns and the manner in which the columns (or the entire table) are to be monitored.
- * The "columns" member specifies the columns whose values are monitored. It MUST NOT contain duplicates.
- * If "columns" is omitted, all columns in the table, except for "_uuid", are monitored.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * In order to overcome this limitation, this method
- *
- * @return MonitorRequest that includes all the Bridge Columns including _uuid
- */
- public <T extends TypedBaseTable<GenericTableSchema>> MonitorRequest<GenericTableSchema> getAllColumnsMonitorRequest (Class <T> klazz) {
- TypedBaseTable<GenericTableSchema> table = getClient().createTypedRowWrapper(klazz);
- GenericTableSchema tableSchema = table.getSchema();
- Set<String> columns = tableSchema.getColumns();
- MonitorRequestBuilder<GenericTableSchema> bridgeBuilder = MonitorRequestBuilder.builder(table.getSchema());
- for (String column : columns) {
- bridgeBuilder.addColumn(column);
- }
- return bridgeBuilder.with(new MonitorSelect(true, true, true, true)).build();
- }
-
- public <T extends TableSchema<T>> MonitorRequest<T> getAllColumnsMonitorRequest (T tableSchema) {
- Set<String> columns = tableSchema.getColumns();
- MonitorRequestBuilder<T> monitorBuilder = MonitorRequestBuilder.builder(tableSchema);
- for (String column : columns) {
- monitorBuilder.addColumn(column);
- }
- return monitorBuilder.with(new MonitorSelect(true, true, true, true)).build();
- }
-
- public boolean monitorTables () throws ExecutionException, InterruptedException, IOException {
- if (monitorReady) {
- LOG.info("Monitoring is already initialized.");
- return monitorReady;
- }
-
- assertNotNull(getDbSchema());
-
- List<MonitorRequest<GenericTableSchema>> monitorRequests = Lists.newArrayList();
- Set<String> tables = getDbSchema().getTables();
- assertNotNull("ovsdb tables should not be null", tables);
-
- for (String tableName : tables) {
- GenericTableSchema tableSchema = getDbSchema().table(tableName, GenericTableSchema.class);
- monitorRequests.add(this.getAllColumnsMonitorRequest(tableSchema));
- }
- TableUpdates updates = getClient().monitor(getDbSchema(), monitorRequests, new UpdateMonitor());
- assertNotNull(updates);
- this.updateTableCache(updates);
-
- monitorReady = true;
- LOG.info("Monitoring is initialized.");
- return monitorReady;
- }
-
- private void updateTableCache (TableUpdates updates) {
- for (String tableName : updates.getUpdates().keySet()) {
- Map<UUID, Row> tUpdate = getTableCache().get(tableName);
- TableUpdate update = updates.getUpdates().get(tableName);
- for (UUID uuid : (Set<UUID>)update.getRows().keySet()) {
- if (update.getNew(uuid) != null) {
- if (tUpdate == null) {
- tUpdate = new HashMap<>();
- getTableCache().put(tableName, tUpdate);
- }
- tUpdate.put(uuid, update.getNew(uuid));
- } else {
- tUpdate.remove(uuid);
- }
- }
- }
- }
-
- private class UpdateMonitor implements MonitorCallBack {
- @Override
- public void update(TableUpdates result, DatabaseSchema dbSchema) {
- updateTableCache(result);
- }
-
- @Override
- public void exception(Throwable t) {
- LOG.error("Exception t = " + t);
- }
- }
-
- public List<OperationResult> executeTransaction (TransactionBuilder transactionBuilder, String text)
- throws ExecutionException, InterruptedException {
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults = results.get();
- LOG.info("{}: {}", text, operationResults);
- org.junit.Assert.assertFalse(ASSERT_TRANS_RESULT_EMPTY, operationResults.isEmpty());
- assertEquals(ASSERT_TRANS_OPERATION_COUNT, transactionBuilder.getOperations().size(), operationResults.size());
- for (OperationResult result : operationResults) {
- assertNull(ASSERT_TRANS_ERROR, result.getError());
- }
- //Thread.sleep(500); // Wait for a few seconds to ensure the cache updates
- return operationResults;
- }
-
- public UUID bridgeInsert () throws ExecutionException, InterruptedException {
- Bridge bridge = getClient().createTypedRowWrapper(Bridge.class);
- bridge.setName(TEST_BRIDGE_NAME);
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
-
- OpenVSwitch openVSwitch = getClient().createTypedRowWrapper(OpenVSwitch.class);
- openVSwitch.setBridges(Sets.newHashSet(new UUID(TEST_BRIDGE_NAME)));
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(bridge.getSchema())
- .withId(TEST_BRIDGE_NAME)
- .value(bridge.getNameColumn()))
- .add(op.comment("Bridge: Inserting " + TEST_BRIDGE_NAME))
- .add(op.update(bridge.getSchema())
- .set(bridge.getStatusColumn())
- .set(bridge.getFloodVlansColumn())
- .where(bridge.getNameColumn().getSchema().opEqual(bridge.getName()))
- .and(bridge.getNameColumn().getSchema().opEqual(bridge.getName()))
- .build())
- .add(op.comment("Bridge: Updating " + TEST_BRIDGE_NAME))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getBridgesColumn().getSchema(), Mutator.INSERT,
- openVSwitch.getBridgesColumn().getData()))
- .add(op.comment("Open_vSwitch: Mutating " + TEST_BRIDGE_NAME));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Bridge Insert, Update and Mutate operation results");
- UUID bridgeUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, bridgeUuid);
- return bridgeUuid;
- }
-
- public void bridgeDelete (UUID bridgeUuid) throws ExecutionException, InterruptedException {
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(bridge.getSchema())
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Deleting " + TEST_BRIDGE_NAME))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getBridgesColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(bridgeUuid)))
- .add(op.comment("Open_vSwitch: Mutating " + TEST_BRIDGE_NAME + " " + bridgeUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Bridge delete operation results");
- }
-
- @Test
- public void testBridge () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
-
- // Verify that the local cache was updated with the remote changes
- Bridge bridge = getClient().createTypedRowWrapper(Bridge.class);
- Row bridgeRow = getTableCache().get(bridge.getSchema().getName()).get(testBridgeUuid);
- Bridge monitoredBridge = getClient().getTypedRowWrapper(Bridge.class, bridgeRow);
- assertEquals(TEST_BRIDGE_NAME, monitoredBridge.getNameColumn().getData());
-
- bridgeDelete(testBridgeUuid);
- }
-
- private void controllerInsert () throws ExecutionException, InterruptedException {
- String controllerUuidStr = "controller";
- Controller controller1 = getClient().createTypedRowWrapper(Controller.class);
- controller1.setTarget("tcp:1.1.1.1:6640");
- Controller controller2 = getClient().createTypedRowWrapper(Controller.class);
- controller2.setTarget("tcp:2.2.2.2:6640");
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- // Insert row to Controller table with address in target column
- // Update row in Bridge table with controller uuid in controller column
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(controller1.getSchema())
- .withId(controllerUuidStr)
- .value(controller1.getTargetColumn()))
- .add(op.comment("Controller: Inserting controller1 " + controller1.getTargetColumn().getData()))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getControllerColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(controllerUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating controller1 " + controller1.getTargetColumn().getData()));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Controller: Insert & Mutate operation results for controller1");
- testController1Uuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testController1Uuid);
-
- // Verify that the local cache was updated with the remote changes
- Row controllerRow = getTableCache().get(controller1.getSchema().getName()).get(testController1Uuid);
- Controller monitoredController = getClient().getTypedRowWrapper(Controller.class, controllerRow);
- assertEquals(controller1.getTargetColumn().getData(), monitoredController.getTargetColumn().getData());
-
- Row bridgeRow = getTableCache().get(bridge.getSchema().getName()).get(testBridgeUuid);
- Bridge monitoredBridge = getClient().getTypedRowWrapper(Bridge.class, bridgeRow);
- assertEquals(1, monitoredBridge.getControllerColumn().getData().size());
-
- transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(controller2.getSchema())
- .withId(controllerUuidStr)
- .value(controller2.getTargetColumn()))
- .add(op.comment("Controller: Inserting controller2 " + controller2.getTargetColumn().getData()))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getControllerColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(controllerUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating controller2 " + controller2.getTargetColumn().getData()));
-
- operationResults = executeTransaction(transactionBuilder,
- new String("Controller: Insert & Mutate operation results for controller2"));
- testController2Uuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testController2Uuid);
-
- // Verify that the local cache was updated with the remote changes
- controllerRow = getTableCache().get(controller2.getSchema().getName()).get(testController2Uuid);
- monitoredController = getClient().getTypedRowWrapper(Controller.class, controllerRow);
- assertEquals(controller2.getTargetColumn().getData(), monitoredController.getTargetColumn().getData());
-
- bridgeRow = getTableCache().get(bridge.getSchema().getName()).get(testBridgeUuid);
- monitoredBridge = getClient().getTypedRowWrapper(Bridge.class, bridgeRow);
- assertEquals(2, monitoredBridge.getControllerColumn().getData().size());
- }
-
- private void controllerDelete () throws ExecutionException, InterruptedException {
- Controller controller = getClient().getTypedRowWrapper(Controller.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(controller.getSchema())
- .where(controller.getUuidColumn().getSchema().opEqual(testController1Uuid))
- .build())
- .add(op.comment("Controller: Deleting " + testController1Uuid))
- .add(op.mutate(bridge.getSchema()) // Delete a controller column in the Bridge table
- .addMutation(bridge.getControllerColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testController1Uuid)))
- .add(op.comment("Bridge: Mutating " + testController1Uuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Controller: Delete operation results for controller1");
-
- transactionBuilder
- .add(op.delete(controller.getSchema())
- .where(controller.getUuidColumn().getSchema().opEqual(testController2Uuid))
- .build())
- .add(op.comment("Controller: Deleting " + testController2Uuid))
- .add(op.mutate(bridge.getSchema()) // Delete a controller column in the Bridge table
- .addMutation(bridge.getControllerColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testController2Uuid)))
- .add(op.comment("Bridge: Mutating " + testController2Uuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Controller: Delete operation results for controller2");
- }
-
- @Test
- public void testController () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- controllerInsert();
- controllerDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- @Test
- public void testFlowSampleCollectorSetTableNotSupported () {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowSampleCollectorSetFromVersion) < 0);
-
- boolean isExceptionRaised = false;
- try {
- FlowSampleCollectorSet flowSampleCollectorSet = getClient().createTypedRowWrapper(FlowSampleCollectorSet.class);
- } catch (SchemaVersionMismatchException e) {
- isExceptionRaised = true;
- }
- assertTrue(isExceptionRaised);
- }
-
- public void flowSampleCollectorSetInsert () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowSampleCollectorSetFromVersion) >= 0);
-
- FlowSampleCollectorSet flowSampleCollectorSet =
- getClient().createTypedRowWrapper(FlowSampleCollectorSet.class);
- flowSampleCollectorSet.setId(Long.valueOf(1));
- flowSampleCollectorSet.setExternalIds(ImmutableMap.of("I <3", "ovs"));
- flowSampleCollectorSet.setBridge(testBridgeUuid);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(flowSampleCollectorSet.getSchema())
- .value(flowSampleCollectorSet.getIdColumn())
- .value(flowSampleCollectorSet.getExternalIdsColumn())
- .value(flowSampleCollectorSet.getBridgeColumn()))
- .add(op.comment("FlowSampleCollectorSet: Inserting"));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "FlowSampleCollectorSet: Insert results");
- testFlowSampleCollectorSetUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testFlowSampleCollectorSetUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row flowSampleCollectorSetRow = getTableCache().get(flowSampleCollectorSet.getSchema().getName())
- .get(testFlowSampleCollectorSetUuid);
- FlowSampleCollectorSet monitoredflowSampleCollectorSet =
- getClient().getTypedRowWrapper(FlowSampleCollectorSet.class, flowSampleCollectorSetRow);
- assertEquals(flowSampleCollectorSet.getIdColumn().getData(),
- monitoredflowSampleCollectorSet.getIdColumn().getData());
- }
-
- public void flowSampleCollectorSetDelete () throws ExecutionException, InterruptedException {
- assumeTrue(schemaVersion.compareTo(flowSampleCollectorSetFromVersion) >= 0);
-
- FlowSampleCollectorSet flowSampleCollectorSet = getClient().getTypedRowWrapper(FlowSampleCollectorSet.class, null);
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(flowSampleCollectorSet.getSchema())
- .where(flowSampleCollectorSet.getUuidColumn().getSchema().opEqual(testFlowSampleCollectorSetUuid))
- .build())
- .add(op.comment("FlowSampleCollectorSet: Deleting " + testFlowSampleCollectorSetUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Bridge delete operation results");
- }
-
- @Test
- public void testFlowSampleCollectorSet () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowSampleCollectorSetFromVersion) >= 0);
-
- testBridgeUuid = bridgeInsert();
- flowSampleCollectorSetInsert();
- flowSampleCollectorSetDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- @Test
- public void testFlowTableTableNotSupported () {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowTableFromVersion) < 0);
-
- boolean isExceptionRaised = false;
- try {
- FlowTable flowTable = getClient().createTypedRowWrapper(FlowTable.class);
- } catch (SchemaVersionMismatchException e) {
- isExceptionRaised = true;
- }
- assertTrue(isExceptionRaised);
- }
-
- public void flowTableInsert () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowTableFromVersion) >= 0);
-
- String flowTableUuidStr = "testFlowTable";
- String tableName = "flow_table_row_name";
- String overflowPolicy = "evict";
- String groups = "group name";
- String prefixes = "wildcarding prefixes";
- Long flowLimit = 50000L;
- Map<Long, UUID> flowTableBrRef = new HashMap<>();
- flowTableBrRef.put(1L, new UUID(flowTableUuidStr));
- FlowTable flowTable = getClient().createTypedRowWrapper(FlowTable.class);
- flowTable.setName(ImmutableSet.of(tableName));
- flowTable.setOverflowPolicy(ImmutableSet.of(overflowPolicy));
- flowTable.setGroups(ImmutableSet.of(groups));
- if (schemaVersion.compareTo(prefixesAddedVersion) >= 0) {
- flowTable.setPrefixes(ImmutableSet.of(prefixes));
- }
- if (schemaVersion.compareTo(externalIdAddedVerson) >= 0) {
- flowTable.setExternalIds(ImmutableMap.of("I <3", "OVS"));
- }
- flowTable.setFlowLimit(ImmutableSet.of(flowLimit));
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(flowTable)
- .withId(flowTableUuidStr))
- .add(op.comment("Flowtable: Inserting " + flowTableUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getFlowTablesColumn().getSchema(), Mutator.INSERT, flowTableBrRef)
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "FlowTable: Insert and Mutate results");
- testFlowTableUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testFlowTableUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row flowTableRow = getTableCache().get(flowTable.getSchema().getName()).get(testFlowTableUuid);
- FlowTable monitoredFlowTable = getClient().getTypedRowWrapper(FlowTable.class, flowTableRow);
- assertEquals(flowTable.getNameColumn().getData(), monitoredFlowTable.getNameColumn().getData());
- }
-
- public void flowTableDelete () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowTableFromVersion) >= 0);
-
- FlowTable flowTable = getClient().getTypedRowWrapper(FlowTable.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(flowTable.getSchema())
- .where(flowTable.getUuidColumn().getSchema().opEqual(testFlowTableUuid))
- .build())
- .add(op.comment("FlowTable: Deleting " + testFlowTableUuid))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getFlowTablesColumn().getSchema(), Mutator.DELETE,
- Maps.newHashMap(ImmutableMap.of(1L, testFlowTableUuid))))
- .add(op.comment("Bridge: Mutating " + testFlowTableUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "FlowTable delete operation results");
- }
-
- @Test
- public void setTestFlowTableSet () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(flowTableFromVersion) >= 0);
-
- UUID testBridgeUuid = bridgeInsert();
- flowTableInsert();
- flowTableDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- @Test
- public void testIpfixTableNotSupported () {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(ipfixFromVersion) < 0);
-
- boolean isExceptionRaised = false;
- try {
- IPFIX ipfix = getClient().createTypedRowWrapper(IPFIX.class);
- } catch (SchemaVersionMismatchException e) {
- isExceptionRaised = true;
- }
- assertTrue(isExceptionRaised);
- }
-
- public void ipfixInsert () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(ipfixFromVersion) >= 0);
-
- String ipfixUuidStr = "testIpfix";
- String ipfixTarget = "172.16.20.1:4739";
- Long obsDomainId = 112L;
- Long obsPointId = 358L;
- Long cacheMax = 132L;
- Long cacheTimeout = 134L;
- Long sampling = 558L;
-
- IPFIX ipfix = getClient().createTypedRowWrapper(IPFIX.class);
- ipfix.setTargets(ImmutableSet.of(ipfixTarget));
- ipfix.setObsDomainId(ImmutableSet.of(obsDomainId));
- ipfix.setObsPointId(ImmutableSet.of(obsPointId));
- // Only set these rows if the schema version supports it
- if (schemaVersion.compareTo(ipfixCacheFromVersion) >= 0) {
- ipfix.setCacheMaxFlows(ImmutableSet.of(cacheMax));
- ipfix.setCacheActiveTimeout(ImmutableSet.of(cacheTimeout));
- }
- ipfix.setSampling(ImmutableSet.of(sampling));
- ipfix.setExternalIds(ImmutableMap.of("I <3", "ovs"));
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(ipfix)
- .withId(ipfixUuidStr))
- .add(op.comment("IPFIX: Inserting " + ipfixUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getIpfixColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(ipfixUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + ipfixUuidStr));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "IPFIX: Insert and Mutate results");
- testIpfixUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testIpfixUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row ipfixRow = getTableCache().get(ipfix.getSchema().getName()).get(testIpfixUuid);
- IPFIX monitoredIPFIX = getClient().getTypedRowWrapper(IPFIX.class, ipfixRow);
- assertEquals(testIpfixUuid, monitoredIPFIX.getUuidColumn().getData());
- }
-
- public void ipfixDelete () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(ipfixFromVersion) >= 0);
-
- FlowTable flowTable = getClient().getTypedRowWrapper(FlowTable.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(flowTable.getSchema())
- .where(flowTable.getUuidColumn().getSchema().opEqual(testIpfixUuid))
- .build())
- .add(op.comment("IPFIX: Deleting " + testIpfixUuid))
- .add(op.mutate(bridge.getSchema()) // Delete a controller column in the Bridge table
- .addMutation(bridge.getMirrorsColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testIpfixUuid)))
- .add(op.comment("Bridge: Mutating " + testIpfixUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "IPFIX delete operation results");
- }
-
- @Test
- public void testIpfix () throws ExecutionException, InterruptedException {
- // Don't run this test if the table is not supported
- assumeTrue(schemaVersion.compareTo(ipfixFromVersion) >= 0);
-
- testBridgeUuid = bridgeInsert();
- ipfixInsert();
- ipfixDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void managerInsert() throws ExecutionException, InterruptedException {
- ImmutableMap<String, String> externalIds = ImmutableMap.of("slaveof", "themaster");
- UUID openVSwitchRowUuid = getOpenVSwitchTableUuid(getClient(), getTableCache());
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- Manager manager = getClient().createTypedRowWrapper(Manager.class);
- manager.setInactivityProbe(Sets.newHashSet(8192L));
- manager.setMaxBackoff(Sets.newHashSet(4094L));
- manager.setTarget(Sets.newHashSet("172.16.50.50:6640"));
- manager.setExternalIds(externalIds);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(manager.getSchema())
- .withId(TEST_MANAGER_UUID_STR)
- .value(manager.getTargetColumn())
- .value(manager.getInactivityProbeColumn())
- .value(manager.getMaxBackoffColumn())
- .value(manager.getExternalIdsColumn()))
- .add(op.comment("Manager: Inserting Slave Manager " + TEST_MANAGER_UUID_STR))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getManagerOptionsColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(TEST_MANAGER_UUID_STR)))
- .where(openVSwitch.getUuidColumn().getSchema().opEqual(openVSwitchRowUuid))
- .build())
- .add(op.comment("Open_vSwitch: Mutating " + TEST_MANAGER_UUID_STR));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Manager: Insert & Mutate operation results");
- testManagerUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testManagerUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row managerRow = getTableCache().get(manager.getSchema().getName()).get(testManagerUuid);
- Manager monitoredManager = getClient().getTypedRowWrapper(Manager.class, managerRow);
- assertEquals(externalIds, monitoredManager.getExternalIdsColumn().getData());
- }
-
- public void managerDelete () throws ExecutionException, InterruptedException {
- Manager manager = getClient().getTypedRowWrapper(Manager.class, null);
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(manager.getSchema())
- .where(manager.getUuidColumn().getSchema().opEqual(testManagerUuid))
- .build())
- .add(op.comment("Manager: Deleting " + TEST_MANAGER_UUID_STR))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getManagerOptionsColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testManagerUuid)))
- .add(op.comment("Open_vSwitch: Mutating " + TEST_MANAGER_UUID_STR + " " + testManagerUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Manager: Delete operation results");
- }
-
- @Test
- public void testManager () throws ExecutionException, InterruptedException {
- managerInsert();
- managerDelete();
- }
-
- public void mirrorInsert () throws ExecutionException, InterruptedException {
- String mirrorUuidStr = "testMirror";
- String mirrorName = "my_name_is_mirror";
- Long outputVid = 1024L;
- Long selectVid = Long.valueOf(2048);
-
- Mirror mirror = getClient().createTypedRowWrapper(Mirror.class);
- mirror.setName(ImmutableSet.of(mirrorName));
- mirror.setExternalIds(ImmutableMap.of("overlays", "ftw"));
- mirror.setOutputVlan(ImmutableSet.of(outputVid));
- mirror.setSelectVlan(ImmutableSet.of(selectVid));
- mirror.setExternalIds(ImmutableMap.of("reading", "urmail"));
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(mirror.getSchema())
- .withId(mirrorUuidStr)
- .value(mirror.getNameColumn())
- .value(mirror.getExternalIdsColumn()))
- .add(op.comment("Mirror: Inserting " + mirrorUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getMirrorsColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(mirrorUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for Mirror");
- testMirrorUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testMirrorUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row mirrorRow = getTableCache().get(mirror.getSchema().getName()).get(testMirrorUuid);
- Mirror monitoredMirror = getClient().getTypedRowWrapper(Mirror.class, mirrorRow);
- assertEquals(mirror.getExternalIdsColumn().getData(), monitoredMirror.getExternalIdsColumn().getData());
- }
-
- private void mirrorDelete () throws ExecutionException, InterruptedException {
- Mirror mirror = getClient().getTypedRowWrapper(Mirror.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(mirror.getSchema())
- .where(mirror.getUuidColumn().getSchema().opEqual(testMirrorUuid))
- .build())
- .add(op.comment("Mirror: Deleting " + testMirrorUuid))
- .add(op.mutate(bridge.getSchema()) // Delete a controller column in the Bridge table
- .addMutation(bridge.getMirrorsColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testMirrorUuid)))
- .add(op.comment("Bridge: Mutating " + testMirrorUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Mirror: Delete operation results");
- }
-
- @Test
- public void testMirror () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- mirrorInsert();
- mirrorDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void netFlowInsert () throws ExecutionException, InterruptedException {
- String netFlowUuidStr = "testNetFlow";
- String netFlowTargets = "172.16.20.200:6343";
- Long engineType = 128L;
- Long engineID = 32L;
- Long activityTimeout = 1L;
- NetFlow netFlow = getClient().createTypedRowWrapper(NetFlow.class);
- netFlow.setTargets(ImmutableSet.of(netFlowTargets));
- netFlow.setEngineType(ImmutableSet.of(engineType));
- netFlow.setEngineId(ImmutableSet.of(engineID));
- netFlow.setActivityTimeout(ImmutableSet.of(activityTimeout));
- netFlow.setExternalIds(ImmutableMap.of("big", "baby"));
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(netFlow.getSchema())
- .withId(netFlowUuidStr)
- .value(netFlow.getTargetsColumn())
- .value(netFlow.getEngineTypeColumn())
- .value(netFlow.getEngineIdColumn())
- .value(netFlow.getActiveTimeoutColumn())
- .value(netFlow.getExternalIdsColumn()))
- .add(op.comment("Mirror: Inserting " + netFlowUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getNetflowColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(netFlowUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for NetFlow");
- testNetFlowUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testNetFlowUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row netFlowRow = getTableCache().get(netFlow.getSchema().getName()).get(testNetFlowUuid);
- Mirror monitoredNetFlow = getClient().getTypedRowWrapper(Mirror.class, netFlowRow);
- assertEquals(netFlow.getExternalIdsColumn().getData(), monitoredNetFlow.getExternalIdsColumn().getData());
- }
-
- private void netFlowDelete () throws ExecutionException, InterruptedException {
- NetFlow netFlow = getClient().getTypedRowWrapper(NetFlow.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(netFlow.getSchema())
- .where(netFlow.getUuidColumn().getSchema().opEqual(testNetFlowUuid))
- .build())
- .add(op.comment("NetFlow: Deleting " + testNetFlowUuid))
- .add(op.mutate(bridge.getSchema()) // Delete a controller column in the Bridge table
- .addMutation(bridge.getNetflowColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testNetFlowUuid)))
- .add(op.comment("Bridge: Mutating " + testNetFlowUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "NetFlow: Delete operation results");
- }
-
- @Test
- public void testNetFlow () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- netFlowInsert();
- netFlowDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void portAndInterfaceInsert () throws ExecutionException, InterruptedException {
- String portUuidStr = "testPort";
- String intfUuidStr = "testIntf";
- String tunnelEncap = "vxlan";
- Port port = getClient().createTypedRowWrapper(Port.class);
- port.setName("testPort");
- port.setTag(ImmutableSet.of(1L));
- port.setMac(ImmutableSet.of("00:00:00:00:00:01"));
- port.setInterfaces(ImmutableSet.of(new UUID(intfUuidStr)));
-
- Interface intf = getClient().createTypedRowWrapper(Interface.class);
- intf.setName(port.getNameColumn().getData());
- intf.setType(tunnelEncap);
- intf.setExternalIds(ImmutableMap.of("vm-id", "12345abcedf78910"));
- // For per Flow TEPs use remote_ip=flow
- // For per Port TEPs use remote_ip=x.x.x.x (ipv4)
- intf.setOptions(ImmutableMap.of("local_ip", "172.16.24.145",
- "remote_ip", "flow",
- "key", "flow",
- "dst_port", "8472"));
-
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(port.getSchema())
- .withId(portUuidStr)
- .value(port.getNameColumn())
- .value(port.getMacColumn()))
- .add(op.comment("Port: Inserting " + portUuidStr))
- .add(op.insert(intf.getSchema())
- .withId(intfUuidStr)
- .value(intf.getNameColumn()))
- .add(op.comment("Interface: Inserting " + intfUuidStr))
- .add(op.update(port.getSchema())
- .set(port.getTagColumn())
- .set(port.getMacColumn())
- .set(port.getInterfacesColumn())
- .where(port.getNameColumn().getSchema().opEqual(port.getName()))
- .build())
- .add(op.comment("Port: Updating " + portUuidStr))
- .add(op.update(intf.getSchema())
- .set(intf.getTypeColumn())
- .set(intf.getExternalIdsColumn())
- .set(intf.getOptionsColumn())
- .where(intf.getNameColumn().getSchema().opEqual(intf.getName()))
- .build())
- .add(op.comment("Interface: Updating " + intfUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getPortsColumn().getSchema(), Mutator.INSERT, Sets.newHashSet(new UUID(portUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
-
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for Port and Interface");
- testPortUuid = operationResults.get(0).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testPortUuid);
- testInterfaceUuid = operationResults.get(2).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testInterfaceUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row portRow = getTableCache().get(port.getSchema().getName()).get(testPortUuid);
- Port monitoredPort = getClient().getTypedRowWrapper(Port.class, portRow);
- assertEquals(port.getNameColumn().getData(), monitoredPort.getNameColumn().getData());
-
- Row interfaceRow = getTableCache().get(intf.getSchema().getName()).get(testInterfaceUuid);
- Interface monitoredInterface = getClient().getTypedRowWrapper(Interface.class, interfaceRow);
- assertEquals(intf.getNameColumn().getData(), monitoredInterface.getNameColumn().getData());
- }
-
- private void portAndInterfaceDelete () throws ExecutionException, InterruptedException {
- Port port = getClient().getTypedRowWrapper(Port.class, null);
- Interface intf = getClient().getTypedRowWrapper(Interface.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(port.getSchema())
- .where(port.getUuidColumn().getSchema().opEqual(testPortUuid))
- .build())
- .add(op.comment("Port: Deleting " + testPortUuid))
- .add(op.delete(intf.getSchema())
- .where(intf.getUuidColumn().getSchema().opEqual(testInterfaceUuid))
- .build())
- .add(op.comment("Interface: Deleting " + testInterfaceUuid))
- .add(op.mutate(bridge.getSchema()) // Delete a port column in the Bridge table
- .addMutation(bridge.getPortsColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testPortUuid)))
- .add(op.comment("Bridge: Mutating " + testPortUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Port and Interface: Delete operation results");
- }
-
- @Test
- public void testPortAndInterface () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- portAndInterfaceInsert();
- portAndInterfaceDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void qosInsert() throws ExecutionException, InterruptedException {
- String portUuidStr = "testQosPortUuid";
- String intfUuidStr = "testQosIntfUuid";
- String qosUuidStr = "testQosUuid";
- String qosPort = "testQosPort";
-
- Port port = getClient().createTypedRowWrapper(Port.class);
- port.setName(qosPort);
- port.setInterfaces(ImmutableSet.of(new UUID(intfUuidStr)));
- port.setQos(ImmutableSet.of(new UUID(qosUuidStr)));
- port.setOtherConfig(ImmutableMap.of("m0r3", "c0ff33"));
-
- Interface intf = getClient().createTypedRowWrapper(Interface.class);
- intf.setName(port.getNameColumn().getData());
- intf.setOtherConfig(ImmutableMap.of("proto", "duction"));
- intf.setExternalIds(ImmutableMap.of("stringly", "typed"));
-
- Qos qos = getClient().createTypedRowWrapper(Qos.class);
- qos.setOtherConfig(ImmutableMap.of("mmm", "kay"));
- qos.setType(ImmutableSet.of("404"));
-
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(port.getSchema())
- .withId(portUuidStr)
- .value(port.getNameColumn()))
- .add(op.comment("Port: Inserting " + portUuidStr))
- .add(op.insert(intf.getSchema())
- .withId(intfUuidStr)
- .value(intf.getExternalIdsColumn())
- .value(intf.getNameColumn())
- .value(intf.getOtherConfigColumn()))
- .add(op.comment("Interface: Inserting " + intfUuidStr))
- .add(op.insert(qos.getSchema())
- .withId(qosUuidStr)
- .value(qos.getTypeColumn())
- .value(qos.getOtherConfigColumn()))
- .add(op.comment("QOS: Inserting " + qosUuidStr))
- .add(op.update(port.getSchema())
- .set(port.getOtherConfigColumn())
- .set(port.getInterfacesColumn())
- .set(port.getQosColumn())
- .where(port.getNameColumn().getSchema().opEqual(port.getName()))
- .build())
- .add(op.comment("Interface: Updating " + intfUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getPortsColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(portUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
-
- int insertPortOperationIndex = 0;
- int insertInterfaceOperationIndex = 2;
- int insertQosOperationIndex = 4;
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for Port and Interface");
- testPortUuid = operationResults.get(insertPortOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testPortUuid);
- testInterfaceUuid = operationResults.get(insertInterfaceOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testInterfaceUuid);
- testQosUuid = operationResults.get(insertQosOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testQosUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row portRow = getTableCache().get(port.getSchema().getName()).get(testPortUuid);
- Port monitoredPort = getClient().getTypedRowWrapper(Port.class, portRow);
- assertEquals(port.getNameColumn().getData(), monitoredPort.getNameColumn().getData());
-
- Row interfaceRow = getTableCache().get(intf.getSchema().getName()).get(testInterfaceUuid);
- Interface monitoredInterface = getClient().getTypedRowWrapper(Interface.class, interfaceRow);
- assertEquals(intf.getNameColumn().getData(), monitoredInterface.getNameColumn().getData());
-
- Row qosRow = getTableCache().get(qos.getSchema().getName()).get(testQosUuid);
- Qos monitoredQos = getClient().getTypedRowWrapper(Qos.class, qosRow);
- assertEquals(qos.getTypeColumn().getData().toArray()[0], monitoredQos.getTypeColumn().getData());
- }
-
- private void qosDelete () throws ExecutionException, InterruptedException {
- Port port = getClient().getTypedRowWrapper(Port.class, null);
- Interface intf = getClient().getTypedRowWrapper(Interface.class, null);
- Qos qos = getClient().getTypedRowWrapper(Qos.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(port.getSchema())
- .where(port.getUuidColumn().getSchema().opEqual(testPortUuid))
- .build())
- .add(op.comment("Port: Deleting " + testPortUuid))
- .add(op.delete(intf.getSchema())
- .where(intf.getUuidColumn().getSchema().opEqual(testInterfaceUuid))
- .build())
- .add(op.comment("Interface: Deleting " + testInterfaceUuid))
- .add(op.delete(qos.getSchema())
- .where(qos.getUuidColumn().getSchema().opEqual(testQosUuid))
- .build())
- .add(op.comment("Qos: Deleting " + testQosUuid))
- .add(op.mutate(bridge.getSchema()) // Delete a port column in the Bridge table
- .addMutation(bridge.getPortsColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testPortUuid)))
- .add(op.comment("Bridge: Mutating " + testPortUuid))
- .add(op.mutate(port.getSchema()) // Delete a qos column in the Port table
- .addMutation(port.getQosColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testQosUuid)))
- .add(op.comment("Port: Mutating " + testPortUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Qos, Port and Interface: Delete operation results");
- }
-
- @Test
- public void testQos () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- qosInsert();
- qosDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void queueInsert() throws InterruptedException, ExecutionException {
- /**
- * This is an arbitrary String that is a placeholder for
- * the future UUID generated by the OVSDB Server in the
- * future transaction. While it is possible to generate
- * ones own UUID for the transaction it is not recommended
- * since it wouldn't add any conceivable value.
- */
- String queueUuidStr = "queueUuidStr";
- Long dscpVal = Long.valueOf(4);
- Queue queue = getClient().createTypedRowWrapper(Queue.class);
- // Example of explicit ImmutableSet/Map Attribute declaration
- ImmutableSet<Long> dscp = ImmutableSet.of(dscpVal);
- ImmutableMap<String, String> externalIds = ImmutableMap.of("little", "coat");
- // Example of condensing the attributes bindings in one line
- queue.setOtherConfig(ImmutableMap.of("war", "onfun"));
- // Bind the Attributes to the transaction. These values end up in columns.
- queue.setExternalIds(externalIds);
- queue.setDscp(dscp);
- // Get the parent Qos table row UUID to insert the queue.
- Qos qos = getClient().getTypedRowWrapper(Qos.class, null);
- // Queue key that is mapped to the queue record/value/ofp_action_enqueue
- Long queueKey = 0L;
- // Reference the Port row to insert the Queue with UID or Port name
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(queue.getSchema())
- .withId(queueUuidStr)
- .value(queue.getDscpColumn())
- .value(queue.getExternalIdsColumn())
- .value(queue.getOtherConfigColumn()))
- .add(op.comment("Queue: Inserting " + queueUuidStr))
- .add(op.mutate(qos.getSchema())
- .addMutation(qos.getQueuesColumn().getSchema(), Mutator.INSERT,
- ImmutableMap.of(queueKey, new UUID(queueUuidStr)))
- .where(qos.getUuidColumn().getSchema().opEqual(testQosUuid))
- .build())
- .add(op.comment("Qos: Mutating " + testQosUuid));
-
- // The transaction index for the Queue insert is used to store the Queue UUID
- int insertQueueOperationIndex = 0;
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for Queue");
- testQueueUuid = operationResults.get(insertQueueOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testQueueUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row queueRow = getTableCache().get(queue.getSchema().getName()).get(testQueueUuid);
- Queue monitoredQueue = getClient().getTypedRowWrapper(Queue.class, queueRow);
- assertEquals(queue.getExternalIdsColumn().getData(), monitoredQueue.getExternalIdsColumn().getData());
- }
-
- private void queueDelete () throws ExecutionException, InterruptedException {
- Queue queue = getClient().getTypedRowWrapper(Queue.class, null);
- Qos qos = getClient().getTypedRowWrapper(Qos.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(queue.getSchema())
- .where(queue.getUuidColumn().getSchema().opEqual(testQueueUuid))
- .build())
- .add(op.comment("Queue: Deleting " + testQueueUuid))
- .add(op.mutate(qos.getSchema()) // Delete a queue column in the Qos table
- .addMutation(qos.getQueuesColumn().getSchema(), Mutator.DELETE,
- Maps.newHashMap(ImmutableMap.of(0L,testQueueUuid))))
- .add(op.comment("Queue: Mutating " + testQueueUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Queue: Delete operation results");
- }
-
- @Test
- public void testQueue () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- qosInsert();
- queueInsert();
- queueDelete();
- qosDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void sFlowInsert () throws ExecutionException, InterruptedException {
- String sFlowUuidStr = "testSFlow";
- String sFlowTarget = "172.16.20.200:6343";
- Long header = 128L;
- Long obsPointId = 358L;
- Long polling =10L;
- String agent = "172.16.20.210";
- Long sampling = 64L;
- SFlow sFlow = getClient().createTypedRowWrapper(SFlow.class);
- sFlow.setTargets(ImmutableSet.of(sFlowTarget));
- sFlow.setHeader(ImmutableSet.of(header));
- sFlow.setPolling(ImmutableSet.of(obsPointId));
- sFlow.setPolling(ImmutableSet.of(polling));
- sFlow.setAgent(ImmutableSet.of(agent));
- sFlow.setSampling(ImmutableSet.of(sampling));
- sFlow.setExternalIds(ImmutableMap.of("kit", "tah"));
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(sFlow.getSchema())
- .withId(sFlowUuidStr)
- .value(sFlow.getTargetsColumn())
- .value(sFlow.getHeaderColumn())
- .value(sFlow.getPollingColumn())
- .value(sFlow.getAgentColumn())
- .value(sFlow.getSamplingColumn())
- .value(sFlow.getExternalIdsColumn()))
- .add(op.comment("sFlow: Inserting " + sFlowUuidStr))
- .add(op.mutate(bridge.getSchema())
- .addMutation(bridge.getSflowColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(sFlowUuidStr)))
- .where(bridge.getNameColumn().getSchema().opEqual(TEST_BRIDGE_NAME))
- .build())
- .add(op.comment("Bridge: Mutating " + TEST_BRIDGE_NAME));
- int insertSFlowOperationIndex = 0;
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for sFlow");
- testSFlowUuid = operationResults.get(insertSFlowOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testSFlowUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row sFlowRow = getTableCache().get(sFlow.getSchema().getName()).get(testSFlowUuid);
- Queue monitoredSFlow = getClient().getTypedRowWrapper(Queue.class, sFlowRow);
- assertEquals(sFlow.getExternalIdsColumn().getData(), monitoredSFlow.getExternalIdsColumn().getData());
- }
-
- private void sFlowDelete () throws ExecutionException, InterruptedException {
- SFlow sFlow = getClient().getTypedRowWrapper(SFlow.class, null);
- Bridge bridge = getClient().getTypedRowWrapper(Bridge.class, null);
- DatabaseSchema dbSchema = getClient().getSchema(OPEN_VSWITCH_SCHEMA).get();
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(dbSchema)
- .add(op.delete(sFlow.getSchema())
- .where(sFlow.getUuidColumn().getSchema().opEqual(testSFlowUuid))
- .build())
- .add(op.comment("SFlow: Deleting " + testSFlowUuid))
- .add(op.mutate(bridge.getSchema()) // Delete an sflow column in the Bridge table
- .addMutation(bridge.getSflowColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testSFlowUuid)))
- .add(op.comment("Bridge: Mutating " + testSFlowUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "Queue: Delete operation results");
- }
-
- @Test
- public void testSFlow () throws ExecutionException, InterruptedException {
- testBridgeUuid = bridgeInsert();
- sFlowInsert();
- sFlowDelete();
- bridgeDelete(testBridgeUuid);
- }
-
- public void sslInsert () throws ExecutionException, InterruptedException {
-
- String sslUuidStr = "sslUuidName";
- String caCert = "PARC";
- String certificate = "01101110 01100101 01110010 01100100";
- String privateKey = "SSL_Table_Test_Secret";
- ImmutableMap<String, String> externalIds = ImmutableMap.of("roomba", "powered");
-
- SSL ssl = getClient().createTypedRowWrapper(SSL.class);
- ssl.setCaCert(caCert);
- ssl.setCertificate(certificate);
- ssl.setPrivateKey(privateKey);
- ssl.setExternalIds(externalIds);
- // Get the parent OVS table UUID in it's single row
- UUID openVSwitchRowUuid = getOpenVSwitchTableUuid(getClient(), getTableCache());
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.insert(ssl.getSchema())
- .withId(sslUuidStr)
- .value(ssl.getCertificateColumn())
- .value(ssl.getPrivateKeyColumn())
- .value(ssl.getCaCertColumn())
- .value(ssl.getExternalIdsColumn()))
- .add(op.comment("SSL: Inserting " + sslUuidStr))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getSslColumn().getSchema(), Mutator.INSERT,
- Sets.newHashSet(new UUID(sslUuidStr)))
- .where(openVSwitch.getUuidColumn().getSchema().opEqual(openVSwitchRowUuid))
- .build())
- .add(op.comment("Open_vSwitch: Mutating " + sslUuidStr));
-
- // The transaction index for the SSL insert is used to store the SSL UUID
- int insertSslOperationIndex = 0;
- List<OperationResult> operationResults = executeTransaction(transactionBuilder,
- "Insert and Mutate operation results for SSL");
- testSslUuid = operationResults.get(insertSslOperationIndex).getUuid();
- assertNotNull(ASSERT_TRANS_UUID, testSslUuid);
-
- // Verify that the local cache was updated with the remote changes
- Row sslRow = getTableCache().get(ssl.getSchema().getName()).get(testSslUuid);
- SSL monitoredSsl = getClient().getTypedRowWrapper(SSL.class, sslRow);
- assertEquals(ssl.getExternalIdsColumn().getData(), monitoredSsl.getExternalIdsColumn().getData());
- }
-
- public void sslDelete () throws ExecutionException, InterruptedException {
- SSL ssl = getClient().getTypedRowWrapper(SSL.class, null);
- OpenVSwitch openVSwitch = getClient().getTypedRowWrapper(OpenVSwitch.class, null);
-
- TransactionBuilder transactionBuilder = getClient().transactBuilder(getDbSchema())
- .add(op.delete(ssl.getSchema())
- .where(ssl.getUuidColumn().getSchema().opEqual(testSslUuid))
- .build())
- .add(op.comment("SSL: Deleting " + testSslUuid))
- .add(op.mutate(openVSwitch.getSchema())
- .addMutation(openVSwitch.getSslColumn().getSchema(), Mutator.DELETE,
- Sets.newHashSet(testSslUuid)))
- .add(op.comment("Open_vSwitch: Mutating " + testSslUuid))
- .add(op.commit(true));
-
- executeTransaction(transactionBuilder, "SSL delete operation results");
- }
-
- @Test
- public void testSsl () throws ExecutionException, InterruptedException {
- sslInsert();
- sslDelete();
- }
-
- @Test
- public void testTyperUtilsSpecialMethodsToString () {
- Bridge bridge = getClient().createTypedRowWrapper(Bridge.class);
- assertNotNull(bridge);
- bridge.setName(TEST_BRIDGE_NAME);
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
- assertNotNull(bridge.toString());
-
- Bridge nullRowBridge = getClient().getTypedRowWrapper(Bridge.class, null);
- assertNotNull(nullRowBridge.toString());
- }
-
- @Test
- public void testTyperUtilsSpecialMethodsEquals () {
- Bridge bridge = getClient().createTypedRowWrapper(Bridge.class);
- assertNotNull(bridge);
- bridge.setName(TEST_BRIDGE_NAME);
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
-
- assertTrue("Equals check on same Bridge object", bridge.equals(bridge));
-
- Bridge bridge2 = getClient().createTypedRowWrapper(Bridge.class);
- assertNotNull(bridge2);
- bridge2.setName(bridge.getName());
- bridge2.setStatus(bridge.getStatusColumn().getData());
- bridge2.setFloodVlans(bridge.getFloodVlansColumn().getData());
-
- assertTrue("Equals check for different Bridge objects with same content", bridge.equals(bridge2));
-
- bridge2.setStpEnable(true);
- assertFalse("Equals check for different Bridge objects with different content", bridge.equals(bridge2));
-
- Port port = getClient().createTypedRowWrapper(Port.class);
- port.setName(bridge.getName());
- assertFalse("Equals check for a Bridge object and Port Object", bridge.equals(port));
- assertFalse("Equals check for a Typed Proxy object and non-proxy object", port.equals("String"));
-
- Bridge nullRowBridge = getClient().getTypedRowWrapper(Bridge.class, null);
- assertTrue("Equals check on Bridge object with null Row", nullRowBridge.equals(nullRowBridge));
- }
-
- @Test
- public void testTyperUtilsSpecialMethodsHashCode () {
- Bridge bridge = getClient().createTypedRowWrapper(Bridge.class);
-
- assertNotNull(bridge);
- bridge.setName(TEST_BRIDGE_NAME);
- bridge.setStatus(ImmutableMap.of("key", "value"));
- bridge.setFloodVlans(Sets.newHashSet(34L));
-
- assertNotSame(bridge.hashCode(), 0);
- Bridge nullRowBridge = getClient().getTypedRowWrapper(Bridge.class, null);
- assertSame(nullRowBridge.hashCode(), 0);
- }
-}
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>karaf</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<prerequisites>
<maven>3.1.1</maven>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
<extensions>true</extensions>
<configuration>
<instructions>
- <Export-Package></Export-Package>
<Import-Package>org.opendaylight.controller.sal.utils,
org.opendaylight.controller.northbound.commons,
org.opendaylight.controller.northbound.commons.exception,
org.slf4j,
org.apache.catalina.filters,
!org.codehaus.enunciate.jaxrs,*</Import-Package>
- <Export-Package></Export-Package>
+ <Export-Package/>
<Web-ContextPath>/ovsdb/nb</Web-ContextPath>
<Jaxrs-Resources>,${classes;ANNOTATION;javax.ws.rs.Path}</Jaxrs-Resources>
</instructions>
import org.opendaylight.ovsdb.lib.notation.Row;
import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
public class OvsdbRow {
+ private static final Logger LOG = LoggerFactory.getLogger(OvsdbRow.class);
private static final String PARENTUUID = "parent_uuid";
private static final String PARENTTABLE = "parent_table";
private static final String PARENTCOLUMN = "parent_column";
if (rowNode == null) {
return null;
}
- for(Iterator<String> fieldNames = rowNode.fieldNames(); fieldNames.hasNext();) {
+ Iterator<String> fieldNames = rowNode.fieldNames();
+ if (fieldNames.hasNext()) {
String tableName = fieldNames.next();
- Row<GenericTableSchema> row = null;
try {
- row = getRow(client, dbName, tableName, rowNode.get(tableName));
+ Row<GenericTableSchema> row = getRow(client, dbName, tableName, rowNode.get(tableName));
+ return new OvsdbRow(parentTable, parentUuid, parentColumn, tableName, row);
} catch (InterruptedException | ExecutionException | IOException e) {
- e.printStackTrace();
+ LOG.error("Error retrieving the row for {}", tableName, e);
return null;
}
- return new OvsdbRow(parentTable, parentUuid, parentColumn, tableName, row);
}
return null;
}
- public static Row<GenericTableSchema> getRow(OvsdbClient client, String dbName, String tableName, JsonNode rowJson) throws InterruptedException, ExecutionException, JsonParseException, IOException {
+ public static Row<GenericTableSchema> getRow(OvsdbClient client, String dbName, String tableName, JsonNode rowJson) throws InterruptedException, ExecutionException, IOException {
DatabaseSchema dbSchema = client.getSchema(dbName).get();
GenericTableSchema schema = dbSchema.table(tableName, GenericTableSchema.class);
return schema.createRow((ObjectNode)rowJson);
<parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-parent</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <artifactId>mdsal-it-parent</artifactId>
+ <version>1.3.0-SNAPSHOT</version>
<relativePath/>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>openstack.net-virt-it</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>features-ovsdb</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
+++ /dev/null
-/*
- * Copyright (C) 2015 Red Hat, Inc. and others
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.openstack.netvirt.it;
-
-import static org.ops4j.pax.exam.CoreOptions.maven;
-import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.editConfigurationFilePut;
-import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.features;
-import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.karafDistributionConfiguration;
-import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.keepRuntimeFolder;
-
-import com.google.common.collect.ObjectArrays;
-import java.io.File;
-import java.lang.management.ManagementFactory;
-import java.util.Calendar;
-import javax.management.InstanceNotFoundException;
-import org.junit.Rule;
-import org.junit.internal.AssumptionViolatedException;
-import org.junit.rules.TestRule;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-import org.opendaylight.controller.config.api.ConfigRegistry;
-import org.opendaylight.controller.config.util.ConfigRegistryJMXClient;
-import org.ops4j.pax.exam.Configuration;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.karaf.options.LogLevelOption.LogLevel;
-import org.ops4j.pax.exam.options.MavenArtifactUrlReference;
-import org.ops4j.pax.exam.options.MavenUrlReference;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-//import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.features;
-
-/**
- * @author Sam Hague (shague@redhat.com)
- */
-public abstract class AbstractConfigTestBase {
- private static final Logger LOG = LoggerFactory.getLogger(AbstractConfigTestBase.class);
-
- /*
- * Wait up to 10s for our configured module to come up
- */
- private static final int MODULE_TIMEOUT = 10000;
- private static int configTimes = 0;
-
- public abstract String getModuleName();
-
- public abstract String getInstanceName();
-
- public abstract MavenUrlReference getFeatureRepo();
-
- public abstract String getFeatureName();
-
- public Option[] getLoggingOptions() {
- Option[] options = new Option[] {
- editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
- logConfiguration(AbstractConfigTestBase.class),
- LogLevel.INFO.name())
- };
- return options;
- }
-
- public String logConfiguration(Class<?> klazz) {
- return "log4j.logger." + klazz.getPackage().getName();
- }
-
- public Option[] getFeaturesOptions() {
- return new Option[]{};
- }
-
- public Option[] getPropertiesOptions() {
- return new Option[]{};
- }
-
- public MavenArtifactUrlReference getKarafDistro() {
- MavenArtifactUrlReference karafUrl;
- /*karafUrl = maven()
- .groupId("org.opendaylight.controller")
- .artifactId("opendaylight-karaf-empty")
- .version("1.5.0-SNAPSHOT")
- .type("zip");*/
-
- karafUrl = maven()
- .groupId("org.opendaylight.ovsdb")
- .artifactId("karaf")
- .versionAsInProject()
- .type("zip");
-
- return karafUrl;
- }
-
- @Configuration
- public Option[] config() {
- LOG.info("Calling config, configTimes: {}", configTimes);
- configTimes++;
- Option[] options = new Option[] {
- //KarafDistributionOption.debugConfiguration("5005", true),
- karafDistributionConfiguration()
- .frameworkUrl(getKarafDistro())
- .unpackDirectory(new File("target/exam"))
- .useDeployFolder(false),
- keepRuntimeFolder(),
- //features(getFeatureRepo() , getFeatureName())
- };
- options = ObjectArrays.concat(options, getFeaturesOptions(), Option.class);
- options = ObjectArrays.concat(options, getLoggingOptions(), Option.class);
- options = ObjectArrays.concat(options, getPropertiesOptions(), Option.class);
- return options;
- }
-
- public void setup() throws Exception {
- LOG.info("Module: {} Instance: {} attempting to configure.",
- getModuleName(),getInstanceName());
- Calendar start = Calendar.getInstance();
- ConfigRegistry configRegistryClient = new ConfigRegistryJMXClient(ManagementFactory
- .getPlatformMBeanServer());
- for (int timer = 0;timer < MODULE_TIMEOUT;timer++) {
- try {
- configRegistryClient.lookupConfigBean(getModuleName(), getInstanceName());
- Thread.sleep(100);
- } catch (InstanceNotFoundException e) {
- if (timer >= MODULE_TIMEOUT) {
- throw e;
- }
- } catch (InterruptedException e) {
- LOG.error("Exception: ",e);
- }
- }
- Calendar stop = Calendar.getInstance();
- LOG.info("Module: {} Instance: {} configured after {} ms",
- getModuleName(),getInstanceName(),
- stop.getTimeInMillis() - start.getTimeInMillis());
- }
-
- @Rule
- public TestRule watcher = new TestWatcher() {
- @Override
- protected void starting(Description description) {
- LOG.info("TestWatcher: Starting test:\n{}", description.getDisplayName());
- }
-
- @Override
- protected void finished(Description description) {
- LOG.info("TestWatcher: Finished test:\n{}", description.getDisplayName());
- }
-
- @Override
- protected void succeeded(Description description) {
- LOG.info("TestWatcher: Test succeeded:\n{}", description.getDisplayName());
- }
-
- @Override
- protected void failed(Throwable ex, Description description) {
- LOG.info("TestWatcher: Test failed:\n{} ", description.getDisplayName(), ex);
- }
-
- @Override
- protected void skipped(AssumptionViolatedException ex, Description description) {
- LOG.info("TestWatcher: Test skipped:\n{} ", description.getDisplayName(), ex);
- }
- };
-}
+++ /dev/null
-/*
- * Copyright (C) 2015 Red Hat, Inc. and others
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.openstack.netvirt.it;
-
-import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.editConfigurationFilePut;
-
-import com.google.common.collect.ObjectArrays;
-import java.util.Calendar;
-import javax.inject.Inject;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
-import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
-import org.ops4j.pax.exam.Option;
-import org.ops4j.pax.exam.karaf.options.LogLevelOption.LogLevel;
-import org.ops4j.pax.exam.util.Filter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractMdsalTestBase extends AbstractConfigTestBase implements BindingAwareProvider {
-
- private static final Logger LOG = LoggerFactory.getLogger(AbstractMdsalTestBase.class);
- private static final int REGISTRATION_TIMEOUT = 10000;
- @Inject @Filter(timeout = 120000)
- private BindingAwareBroker broker;
- private ProviderContext session = null;
-
- public ProviderContext getSession() {
- return session;
- }
-
- @Override
- public void onSessionInitiated(ProviderContext session) {
- LOG.info("Session Initiated: {}",session);
- this.session = session;
- }
-
- @Override
- public void setup() throws Exception {
- super.setup();
- Calendar start = Calendar.getInstance();
- broker.registerProvider(this);
- for (int timer = 0;timer < REGISTRATION_TIMEOUT;timer++) {
- if (session != null) {
- Calendar stop = Calendar.getInstance();
- LOG.info("Registered with the MD-SAL after {} ms",
- stop.getTimeInMillis() - start.getTimeInMillis());
- return;
- } else {
- Thread.sleep(1);
- }
- }
- throw new RuntimeException("Session not initiated after " + REGISTRATION_TIMEOUT + " ms");
- }
-
- @Override
- public Option[] getLoggingOptions() {
- Option[] options = new Option[] {
- editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
- logConfiguration(AbstractMdsalTestBase.class),
- LogLevel.INFO.name()),
- };
- options = ObjectArrays.concat(options, super.getLoggingOptions(), Option.class);
- return options;
- }
-
-}
package org.opendaylight.ovsdb.openstack.netvirt.it;
import static org.junit.Assert.fail;
+import static org.ops4j.pax.exam.CoreOptions.composite;
import static org.ops4j.pax.exam.CoreOptions.maven;
+import static org.ops4j.pax.exam.CoreOptions.when;
import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.editConfigurationFilePut;
+import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.features;
+import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.karafDistributionConfiguration;
+import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.keepRuntimeFolder;
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.Lists;
import com.google.common.collect.ObjectArrays;
+
+import java.io.File;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.UnknownHostException;
import org.junit.runner.RunWith;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.mdsal.it.base.AbstractMdsalTestBase;
import org.opendaylight.ovsdb.openstack.netvirt.api.Southbound;
import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
import org.ops4j.pax.exam.Configuration;
import org.ops4j.pax.exam.Option;
import org.ops4j.pax.exam.junit.PaxExam;
+import org.ops4j.pax.exam.karaf.options.KarafDistributionOption;
import org.ops4j.pax.exam.karaf.options.LogLevelOption;
import org.ops4j.pax.exam.options.MavenUrlReference;
import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
private static final String NETVIRT = "org.opendaylight.ovsdb.openstack.net-virt";
private static final String NETVIRTPROVIDERS = "org.opendaylight.ovsdb.openstack.net-virt-providers";
+ // TODO Constants copied frmo AbstractConfigTestBase, need to be removed (see TODO below)
+ private static final String PAX_EXAM_UNPACK_DIRECTORY = "target/exam";
+ private static final String KARAF_DEBUG_PORT = "5005";
+ private static final String KARAF_DEBUG_PROP = "karaf.debug";
+ private static final String KEEP_UNPACK_DIRECTORY_PROP = "karaf.keep.unpack";
+
@Inject
private BundleContext bundleContext;
@Configuration
public Option[] config() {
- return super.config();
+ // TODO Figure out how to use the parent Karaf setup, then just use super.config()
+ Option[] options = new Option[] {
+ when(Boolean.getBoolean(KARAF_DEBUG_PROP))
+ .useOptions(KarafDistributionOption.debugConfiguration(KARAF_DEBUG_PORT, true)),
+ karafDistributionConfiguration().frameworkUrl(getKarafDistro())
+ .unpackDirectory(new File(PAX_EXAM_UNPACK_DIRECTORY))
+ .useDeployFolder(false),
+ when(Boolean.getBoolean(KEEP_UNPACK_DIRECTORY_PROP)).useOptions(keepRuntimeFolder()),
+ // Works only if we don't specify the feature repo and name
+ getLoggingOption()};
+ Option[] propertyOptions = getPropertiesOptions();
+ Option[] combinedOptions = new Option[options.length + propertyOptions.length];
+ System.arraycopy(options, 0, combinedOptions, 0, options.length);
+ System.arraycopy(propertyOptions, 0, combinedOptions, options.length, propertyOptions.length);
+ return combinedOptions;
+ }
+
+ @Override
+ public String getKarafDistro() {
+ return maven()
+ .groupId("org.opendaylight.ovsdb")
+ .artifactId("karaf")
+ .versionAsInProject()
+ .type("zip")
+ .getURL();
}
@Override
}
@Override
- public Option[] getFeaturesOptions() {
- return new Option[]{};
- }
-
- @Override
- public Option[] getLoggingOptions() {
- Option[] options;
-
- options = new Option[] {
- editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
- "log4j.logger.org.opendaylight.ovsdb",
- LogLevelOption.LogLevel.DEBUG.name()),
- editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
- "log4j.logger.org.opendaylight.ovsdb.lib",
- LogLevelOption.LogLevel.INFO.name()),
+ public Option getLoggingOption() {
+ return composite(
+ editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
+ "log4j.logger.org.opendaylight.ovsdb",
+ LogLevelOption.LogLevel.DEBUG.name()),
+ editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
+ "log4j.logger.org.opendaylight.ovsdb.lib",
+ LogLevelOption.LogLevel.INFO.name()),
+ super.getLoggingOption());
/*editConfigurationFilePut(NetvirtITConstants.ORG_OPS4J_PAX_LOGGING_CFG,
"log4j.logger.org.opendaylight.ovsdb.openstack.net-virt",
LogLevelOption.LogLevel.DEBUG.name())*/
- };
-
- options = ObjectArrays.concat(options, super.getLoggingOptions(), Option.class);
- return options;
}
- @Override
- public Option[] getPropertiesOptions() {
+ private Option[] getPropertiesOptions() {
Properties props = new Properties(System.getProperties());
String addressStr = props.getProperty(NetvirtITConstants.SERVER_IPADDRESS,
NetvirtITConstants.DEFAULT_SERVER_IPADDRESS);
}
@Before
- public void setUp() throws InterruptedException {
+ @Override
+ public void setup() throws InterruptedException {
if (setup.get()) {
LOG.info("Skipping setUp, already initialized");
return;
}
public static InstanceIdentifier<Node> createInstanceIdentifier(NodeId nodeId) {
- InstanceIdentifier<Node> nodePath = InstanceIdentifier
+ return InstanceIdentifier
.create(NetworkTopology.class)
.child(Topology.class, new TopologyKey(SouthboundConstants.OVSDB_TOPOLOGY_ID))
.child(Node.class,new NodeKey(nodeId));
- return nodePath;
}
public static InstanceIdentifier<Node> createInstanceIdentifier(ConnectionInfo key,OvsdbBridgeName bridgeName) {
String uriString = SouthboundConstants.OVSDB_URI_PREFIX + "://"
+ new String(ip.getValue()) + ":" + port.getValue();
Uri uri = new Uri(uriString);
- NodeId nodeId = new NodeId(uri);
- return nodeId;
+ return new NodeId(uri);
}
public static InetAddress createInetAddress(IpAddress ip) throws UnknownHostException {
}
public static String createDatapathType(OvsdbBridgeAugmentation mdsalbridge) {
- String datapathtype = new String(SouthboundConstants.DATAPATH_TYPE_MAP.get(DatapathTypeSystem.class));
+ String datapathtype = SouthboundConstants.DATAPATH_TYPE_MAP.get(DatapathTypeSystem.class);
if (mdsalbridge.getDatapathType() != null) {
if (SouthboundConstants.DATAPATH_TYPE_MAP.get(mdsalbridge.getDatapathType()) != null) {
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>openstack.net-virt-providers</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<networkconfig.neutron.version>0.6.0-SNAPSHOT</networkconfig.neutron.version>
<openflowjava-nicira.version>0.2.0-SNAPSHOT</openflowjava-nicira.version>
<openflowplugin.version>0.2.0-SNAPSHOT</openflowplugin.version>
- <ovsdb.utils.config.version>1.2.0-SNAPSHOT</ovsdb.utils.config.version>
- <ovsdb.utils.mdsal.openflow.version>1.2.0-SNAPSHOT</ovsdb.utils.mdsal.openflow.version>
- <ovsdb.utils.servicehelper.version>1.2.0-SNAPSHOT</ovsdb.utils.servicehelper.version>
+ <ovsdb.utils.config.version>1.2.1-SNAPSHOT</ovsdb.utils.config.version>
+ <ovsdb.utils.mdsal.openflow.version>1.2.1-SNAPSHOT</ovsdb.utils.mdsal.openflow.version>
+ <ovsdb.utils.servicehelper.version>1.2.1-SNAPSHOT</ovsdb.utils.servicehelper.version>
<powermock.version>1.5.2</powermock.version>
<sonar-jacoco-listeners.version>2.4</sonar-jacoco-listeners.version>
- <liblldp.version>0.9.1-SNAPSHOT</liblldp.version>
+ <liblldp.version>0.10.0-SNAPSHOT</liblldp.version>
<root.directory>${env.PWD}</root.directory>
<sonar.jacoco.itReportPath>${root.directory}/target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
</properties>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>2.0.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>mdsal-model-artifacts</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
<dependencies>
<dependency>
<groupId>org.osgi</groupId>
<version>${ovsdb.utils.mdsal.openflow.version}</version>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.mdsal</groupId>
<artifactId>yang-binding</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-yang-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>opendaylight-l2-types</artifactId>
</dependency>
<dependency>
<artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-topology</artifactId>
</dependency>
<dependency>
package org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13;
+import java.net.InetAddress;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.neutron.spi.NeutronNetwork;
import org.opendaylight.neutron.spi.NeutronPort;
+import org.opendaylight.neutron.spi.NeutronSecurityGroup;
import org.opendaylight.neutron.spi.Neutron_IPs;
import org.opendaylight.ovsdb.openstack.netvirt.MdsalHelper;
import org.opendaylight.ovsdb.openstack.netvirt.NetworkHandler;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbTerminationPointAugmentation;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbTerminationPointAugmentation;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.CheckedFuture;
-import java.net.InetAddress;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-
/**
* Open vSwitch OpenFlow 1.3 Networking Provider for OpenStack Neutron
return networkType.equalsIgnoreCase(NetworkHandler.NETWORK_TYPE_VLAN);
}
- private void programLocalRules (String networkType, String segmentationId, Node node,
+ private void programLocalRules(String networkType, String segmentationId, Node node,
OvsdbTerminationPointAugmentation intf) {
LOG.debug("programLocalRules: node: {}, intf: {}, networkType: {}, segmentationId: {}",
node.getNodeId(), intf.getName(), networkType, segmentationId);
long localPort = southbound.getOFPort(intf);
if (localPort == 0) {
- LOG.info("programLocalRules: could not find ofPort for Port {} on Node {}", intf.getName(), node.getNodeId());
+ LOG.info("programLocalRules: could not find ofPort for Port {} on Node {}",
+ intf.getName(), node.getNodeId());
return;
}
LOG.debug("Program local vlan rules for interface {}", intf.getName());
programLocalVlanRules(node, dpid, segmentationId, attachedMac, localPort);
}
- if ((isTunnel(networkType)|| isVlan(networkType))) {
- LOG.debug("programLocalRules: Program fixed security group rules for interface {}", intf.getName());
- // Get the DHCP port for the subnet to which the interface belongs to.
- NeutronPort dhcpPort = securityServicesManager.getDHCPServerPort(intf);
- if (null != dhcpPort) {
- boolean isComputePort =securityServicesManager.isComputePort(intf);
- boolean isLastPortinBridge = securityServicesManager.isLastPortinBridge(node, intf);
- boolean isLastPortinSubnet =false;
- List<Neutron_IPs> srcAddressList = null;
- if(isComputePort) {
- isLastPortinSubnet = securityServicesManager.isLastPortinSubnet(node, intf);
- srcAddressList = securityServicesManager.getIpAddress(node, intf);
- if (null == srcAddressList) {
- LOG.warn("programLocalRules: No Ip address assigned {}", intf);
- return;
- }
- }
- ingressAclProvider.programFixedSecurityACL(dpid,segmentationId, dhcpPort.getMacAddress(), localPort,
- isLastPortinSubnet,isComputePort, true);
- egressAclProvider.programFixedSecurityACL(dpid, segmentationId, attachedMac, localPort,
- srcAddressList, isLastPortinBridge, isComputePort,true);
- } else {
- LOG.warn("programLocalRules: No DCHP port seen in network of {}", intf);
- }
+ if ((isTunnel(networkType) || isVlan(networkType))) {
+ programLocalSecurityGroupRules(attachedMac, node, intf, dpid, localPort, segmentationId, true);
}
- /* If the network type is tunnel based (VXLAN/GRRE/etc) with Neutron Port Security ACLs */
- /* TODO SB_MIGRATION */
- /*if ((networkType.equalsIgnoreCase(NetworkHandler.NETWORK_TYPE_GRE) || networkType.equalsIgnoreCase
- (NetworkHandler.NETWORK_TYPE_VXLAN)) && securityServicesManager.isPortSecurityReady(intf)) {
- LOG.debug("Neutron port has a Port Security Group");
- // Retrieve the security group UUID from the Neutron Port
- NeutronSecurityGroup securityGroupInPort = securityServicesManager.getSecurityGroupInPort(intf);
- LOG.debug("Program Local rules for networkType: {} does contain a Port Security Group: {} " +
- "to be installed on DPID: {}", networkType, securityGroupInPort, dpid);
- ingressAclProvider.programPortSecurityACL(dpid, segmentationId, attachedMac, localPort,
- securityGroupInPort);
- egressAclProvider.programPortSecurityACL(dpid, segmentationId, attachedMac, localPort,
- securityGroupInPort);
- }*/
if (isTunnel(networkType)) {
LOG.debug("Program local bridge rules for interface {}, "
+ "dpid: {}, segmentationId: {}, attachedMac: {}, localPort: {}",
}
}
- private void removeLocalRules (String networkType, String segmentationId, Node node,
+ private void removeLocalRules(String networkType, String segmentationId, Node node,
OvsdbTerminationPointAugmentation intf) {
LOG.debug("removeLocalRules: node: {}, intf: {}, networkType: {}, segmentationId: {}",
node.getNodeId(), intf.getName(), networkType, segmentationId);
LOG.debug("Remove local bridge rules for interface {}", intf.getName());
removeLocalBridgeRules(node, dpid, segmentationId, attachedMac, localPort);
}
- if (isTunnel(networkType)|| isVlan(networkType)) {
- LOG.debug("removeLocalRules: Remove fixed security group rules for interface {}", intf.getName());
- NeutronPort dhcpPort = securityServicesManager.getDHCPServerPort(intf);
- if (null != dhcpPort) {
- List<Neutron_IPs> srcAddressList = securityServicesManager.getIpAddress(node, intf);
- if (null == srcAddressList) {
- LOG.warn("removeLocalRules: No Ip address assigned {}", intf);
- return;
- }
- boolean isLastPortinBridge = securityServicesManager.isLastPortinBridge(node, intf);
- boolean isComputePort =securityServicesManager.isComputePort(intf);
- boolean isLastPortinSubnet =false;
- if (isComputePort)
- {
- isLastPortinSubnet = securityServicesManager.isLastPortinSubnet(node, intf);
- }
- ingressAclProvider.programFixedSecurityACL(dpid, segmentationId, dhcpPort.getMacAddress(), localPort,
- isLastPortinSubnet, isComputePort, false);
- egressAclProvider.programFixedSecurityACL(dpid, segmentationId, attachedMac, localPort,
- srcAddressList, isLastPortinBridge, isComputePort, false);
- }else{
- LOG.warn("removeLocalRules: No DCHP port seen in network of {}", intf);
- }
+ if (isTunnel(networkType) || isVlan(networkType)) {
+ programLocalSecurityGroupRules(attachedMac, node, intf, dpid, localPort, segmentationId, false);
}
} catch (Exception e) {
LOG.error("Exception in removing Local Rules for " + intf + " on " + node, e);
}
}
+ private void programLocalSecurityGroupRules(String attachedMac, Node node, OvsdbTerminationPointAugmentation intf,
+ Long dpid,long localPort, String segmentationId,
+ boolean write) {
+
+ LOG.debug("programLocalRules: Program fixed security group rules for interface {}", intf.getName());
+ NeutronPort dhcpPort = securityServicesManager.getDhcpServerPort(intf);
+ boolean isComputePort = false;
+ boolean isLastPortinBridge = false;
+ boolean isLastPortinSubnet = false;
+ List<Neutron_IPs> srcAddressList = null;
+ if (null != dhcpPort) {
+ isComputePort = securityServicesManager.isComputePort(intf);
+ isLastPortinBridge = securityServicesManager.isLastPortinBridge(node, intf);
+ isLastPortinSubnet = false;
+ if (isComputePort) {
+ isLastPortinSubnet = securityServicesManager.isLastPortinSubnet(node, intf);
+ srcAddressList = securityServicesManager.getIpAddressList(node, intf);
+ if (null == srcAddressList) {
+ LOG.warn("programLocalRules: No Ip address assigned {}", intf);
+ return;
+ }
+ }
+ ingressAclProvider.programFixedSecurityAcl(dpid, segmentationId, dhcpPort.getMacAddress(), localPort,
+ isLastPortinSubnet, isComputePort, write);
+ egressAclProvider.programFixedSecurityAcl(dpid, segmentationId, attachedMac, localPort,
+ srcAddressList, isLastPortinBridge, isComputePort,write);
+ /* If the network type is tunnel based (VXLAN/GRRE/etc) with Neutron Port Security ACLs */
+ /* TODO SB_MIGRATION */
+
+ LOG.debug("Neutron port has a Port Security Group");
+ // Retrieve the security group from the Neutron Port and apply the rules
+ if (securityServicesManager.isPortSecurityReady(intf)) {
+ //Associate the security group flows.
+ List<NeutronSecurityGroup> securityGroupListInPort = securityServicesManager
+ .getSecurityGroupInPortList(intf);
+ for (NeutronSecurityGroup securityGroupInPort:securityGroupListInPort) {
+ ingressAclProvider.programPortSecurityAcl(dpid, segmentationId, attachedMac, localPort,
+ securityGroupInPort,srcAddressList, write);
+ egressAclProvider.programPortSecurityAcl(dpid, segmentationId, attachedMac, localPort,
+ securityGroupInPort,srcAddressList, write);
+ }
+ }
+ } else {
+ LOG.warn("programLocalRules: No DCHP port seen in network of {}", intf);
+ }
+ }
@Override
public boolean handleInterfaceUpdate(NeutronNetwork network, Node srcNode,
OvsdbTerminationPointAugmentation intf) {
}
}
LOG.debug("createOutputGroupInstructions: addNew {}", addNew);
- if (addNew) {
+ if (addNew && !buckets.getBucket().isEmpty()) {
/* the new output action is not in the bucket, add to bucket */
- if (!buckets.getBucket().isEmpty()) {
- Bucket bucket = buckets.getBucket().get(0);
- List<Action> bucketActionList = Lists.newArrayList();
- bucketActionList.addAll(bucket.getAction());
- /* set order for new action and add to action list */
- ab.setOrder(bucketActionList.size());
- ab.setKey(new ActionKey(bucketActionList.size()));
- bucketActionList.add(ab.build());
-
- /* set bucket and buckets list. Reset groupBuilder with new buckets.*/
- BucketsBuilder bucketsBuilder = new BucketsBuilder();
- List<Bucket> bucketList = Lists.newArrayList();
- BucketBuilder bucketBuilder = new BucketBuilder();
- bucketBuilder.setBucketId(new BucketId((long) 1));
- bucketBuilder.setKey(new BucketKey(new BucketId((long) 1)));
- bucketBuilder.setAction(bucketActionList);
- bucketList.add(bucketBuilder.build());
- bucketsBuilder.setBucket(bucketList);
- groupBuilder.setBuckets(bucketsBuilder.build());
- LOG.debug("createOutputGroupInstructions: bucketList {}", bucketList);
- }
+ Bucket bucket = buckets.getBucket().get(0);
+ List<Action> bucketActionList = Lists.newArrayList();
+ bucketActionList.addAll(bucket.getAction());
+ /* set order for new action and add to action list */
+ ab.setOrder(bucketActionList.size());
+ ab.setKey(new ActionKey(bucketActionList.size()));
+ bucketActionList.add(ab.build());
+
+ /* set bucket and buckets list. Reset groupBuilder with new buckets.*/
+ BucketsBuilder bucketsBuilder = new BucketsBuilder();
+ List<Bucket> bucketList = Lists.newArrayList();
+ BucketBuilder bucketBuilder = new BucketBuilder();
+ bucketBuilder.setBucketId(new BucketId((long) 1));
+ bucketBuilder.setKey(new BucketKey(new BucketId((long) 1)));
+ bucketBuilder.setAction(bucketActionList);
+ bucketList.add(bucketBuilder.build());
+ bucketsBuilder.setBucket(bucketList);
+ groupBuilder.setBuckets(bucketsBuilder.build());
+ LOG.debug("createOutputGroupInstructions: bucketList {}", bucketList);
}
} else {
/* create group */
package org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
-import org.opendaylight.ovsdb.openstack.netvirt.api.Southbound;
+
import org.opendaylight.ovsdb.openstack.netvirt.api.Action;
import org.opendaylight.ovsdb.openstack.netvirt.api.NodeCacheListener;
import org.opendaylight.ovsdb.openstack.netvirt.api.NodeCacheManager;
+import org.opendaylight.ovsdb.openstack.netvirt.api.Southbound;
import org.opendaylight.ovsdb.openstack.netvirt.providers.ConfigInterface;
import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
public class PipelineOrchestratorImpl implements ConfigInterface, NodeCacheListener, PipelineOrchestrator {
private static final Logger LOG = LoggerFactory.getLogger(PipelineOrchestratorImpl.class);
private List<Service> staticPipeline = Lists.newArrayList(
return serviceRegistry.get(service);
}
- public void start() {
+ public final void start() {
eventHandler.submit(new Runnable() {
@Override
public void run() {
try {
while (true) {
Node node = queue.take();
- /*
- * Since we are hooking on OpendaylightInventoryListener and as observed in
- * Bug 1997 multiple Threads trying to write to a same table at the same time
- * causes programming issues. Hence delaying the programming by a second to
- * avoid the clash. This hack/workaround should be removed once Bug 1997 is resolved.
- */
LOG.info(">>>>> dequeue: {}", node);
- Thread.sleep(1000);
for (Service service : staticPipeline) {
AbstractServiceInstance serviceInstance = getServiceInstance(service);
- //LOG.info("pipeline: {} - {}", service, serviceInstance);
- if (serviceInstance != null) {
- if (southbound.getBridge(node) != null) {
- serviceInstance.programDefaultPipelineRule(node);
- }
+ if (serviceInstance != null && southbound.getBridge(node) != null) {
+ serviceInstance.programDefaultPipelineRule(node);
}
}
}
package org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services;
import java.math.BigInteger;
+import java.net.Inet6Address;
import java.net.InetAddress;
import java.util.List;
//import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.l2.types.rev130827.EtherType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
public class ArpResponderService extends AbstractServiceInstance implements ArpProvider, ConfigInterface {
+ private static final Logger LOG = LoggerFactory.getLogger(ArpResponderService.class);
+
public ArpResponderService() {
super(Service.ARP_RESPONDER);
}
}
}
+ if (ipAddress instanceof Inet6Address) {
+ // WORKAROUND: For now ipv6 is not supported
+ // TODO: implement ipv6 case
+ LOG.debug("ipv6 address case is not implemented yet. dpid {} segmentationId {} macAddressStr, ipAddress {} action {}",
+ dpid, segmentationId, macAddressStr, ipAddress, action);
+ return new Status(StatusCode.NOTIMPLEMENTED);
+ }
+
MatchUtils.createEtherTypeMatch(matchBuilder, new EtherType(Constants.ARP_ETHERTYPE));
MatchUtils.createArpDstIpv4Match(matchBuilder, MatchUtils.iPv4PrefixFromIPv4Address(ipAddress.getHostAddress()));
import org.opendaylight.neutron.spi.Neutron_IPs;
import org.opendaylight.ovsdb.openstack.netvirt.api.Constants;
import org.opendaylight.ovsdb.openstack.netvirt.api.EgressAclProvider;
+import org.opendaylight.ovsdb.openstack.netvirt.api.SecurityServicesManager;
import org.opendaylight.ovsdb.openstack.netvirt.providers.ConfigInterface;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.AbstractServiceInstance;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.Service;
import org.opendaylight.ovsdb.utils.mdsal.openflow.InstructionUtils;
import org.opendaylight.ovsdb.utils.mdsal.openflow.MatchUtils;
+import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
public class EgressAclService extends AbstractServiceInstance implements EgressAclProvider, ConfigInterface {
private static final Logger LOG = LoggerFactory.getLogger(EgressAclService.class);
- final int DHCP_SOURCE_PORT = 67;
- final int DHCP_DESTINATION_PORT = 68;
- final String HOST_MASK = "/32";
+ private volatile SecurityServicesManager securityServicesManager;
+ private static final int DHCP_SOURCE_PORT = 67;
+ private static final int DHCP_DESTINATION_PORT = 68;
+ private static final String HOST_MASK = "/32";
public EgressAclService() {
super(Service.EGRESS_ACL);
}
@Override
- public void programPortSecurityACL(Long dpid, String segmentationId, String attachedMac, long localPort,
- NeutronSecurityGroup securityGroup) {
+ public void programPortSecurityAcl(Long dpid, String segmentationId, String attachedMac, long localPort,
+ NeutronSecurityGroup securityGroup,
+ List<Neutron_IPs> srcAddressList, boolean write) {
+
+ LOG.trace("programPortSecurityAcl: neutronSecurityGroup: {} ", securityGroup);
+ if (securityGroup == null || securityGroup.getSecurityRules() == null) {
+ return;
+ }
- LOG.trace("programLocalBridgeRulesWithSec neutronSecurityGroup: {} ", securityGroup);
List<NeutronSecurityRule> portSecurityList = securityGroup.getSecurityRules();
/* Iterate over the Port Security Rules in the Port Security Group bound to the port*/
for (NeutronSecurityRule portSecurityRule : portSecurityList) {
/**
- * Neutron Port Security ACL "egress" and "IPv4"
- *
+ * Neutron Port Security Acl "egress" and "IPv4"
* Check that the base conditions for flow based Port Security are true:
* Port Security Rule Direction ("egress") and Protocol ("IPv4")
* Neutron defines the direction "ingress" as the vSwitch to the VM as defined in:
* http://docs.openstack.org/api/openstack-network/2.0/content/security_groups.html
*
*/
- if (portSecurityRule.getSecurityRuleEthertype().equalsIgnoreCase("IPv4") &&
+ if (portSecurityRule.getSecurityRuleEthertype().equals("IPv4")
+ && portSecurityRule.getSecurityRuleDirection().equals("egress")) {
+ LOG.debug("programPortSecurityAcl: Acl Rule matching IPv4 and ingress is: {} ", portSecurityRule);
+ if (null == portSecurityRule.getSecurityRuleProtocol()) {
+ /* TODO Rework on the priority values */
+ egressAclIPv4(dpid, segmentationId, attachedMac,
+ write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ } else if (null != portSecurityRule.getSecurityRemoteGroupID()) {
+ //Remote Security group is selected
+ List<Neutron_IPs> remoteSrcAddressList = securityServicesManager
+ .getVmListForSecurityGroup(srcAddressList,portSecurityRule.getSecurityRemoteGroupID());
+ if (null != remoteSrcAddressList) {
+ for (Neutron_IPs vmIp :remoteSrcAddressList ) {
+ switch (portSecurityRule.getSecurityRuleProtocol()) {
+ case MatchUtils.TCP:
+ egressAclTcp(dpid, segmentationId, attachedMac,
+ portSecurityRule,vmIp.getIpAddress(), write,
+ Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ case MatchUtils.UDP:
+ egressAclUdp(dpid, segmentationId, attachedMac,
+ portSecurityRule,vmIp.getIpAddress(), write,
+ Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ default:
+ LOG.error("programPortSecurityAcl: Protocol not supported", portSecurityRule);
+ break;
+ }
+ }
+ }
+ } else {
+ //CIDR is selected
+ switch (portSecurityRule.getSecurityRuleProtocol()) {
+ case MatchUtils.TCP:
+ egressAclTcp(dpid, segmentationId, attachedMac,
+ portSecurityRule, null, write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ case MatchUtils.UDP:
+ egressAclUdp(dpid, segmentationId, attachedMac,
+ portSecurityRule, null, write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ default:
+ LOG.error("programPortSecurityAcl: Protocol not supported", portSecurityRule);
+ }
+ }
+ }
+ /*
+ * Code is refactored to handle all the protocols. More
+ * protocols will be added incrementrally
+ * TODO Connection tracking will be used to track active TCP connections This code
+ * may be reused then.
+ */
+ /* if (portSecurityRule.getSecurityRuleEthertype().equalsIgnoreCase("IPv4") &&
portSecurityRule.getSecurityRuleDirection().equalsIgnoreCase("egress")) {
LOG.debug("Egress IPV4 ACL Port Security Rule: {} ", portSecurityRule);
// ToDo: Implement Port Range
- /**
- * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (True)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (True)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (True)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (True)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRuleRemoteIpPrefix(), Constants.PROTO_PREFIX_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//**
+ * TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRuleRemoteIpPrefix(), Constants.PREFIX_MATCH_PRIORITY);
continue;
}
- /**
+ *//**
* TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (False)
- */
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
Constants.PROTO_PORT_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRulePortMin(), Constants.PROTO_PORT_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
continue;
}
LOG.debug("ACL Match combination not found for rule: {}", portSecurityRule);
- }
+ }*/
}
}
@Override
- public void programFixedSecurityACL(Long dpid, String segmentationId, String attachedMac,
- long localPort, List<Neutron_IPs> srcAddressList, boolean isLastPortinBridge, boolean isComputePort ,boolean write) {
+ public void programFixedSecurityAcl(Long dpid, String segmentationId, String attachedMac,
+ long localPort, List<Neutron_IPs> srcAddressList,
+ boolean isLastPortinBridge, boolean isComputePort ,boolean write) {
// If it is the only port in the bridge add the rule to allow any DHCP client traffic
if (isLastPortinBridge) {
- egressACLDHCPAllowClientTrafficFromVm(dpid, write, Constants.PROTO_DHCP_CLIENT_TRAFFIC_MATCH_PRIORITY);
+ egressAclDhcpAllowClientTrafficFromVm(dpid, write, Constants.PROTO_DHCP_CLIENT_TRAFFIC_MATCH_PRIORITY);
}
- if(isComputePort) {
- // add rule to drop the DHCP server traffic originating from the vm.
- egressACLDHCPDropServerTrafficfromVM(dpid, localPort, write, Constants.PROTO_DHCP_CLIENT_SPOOF_MATCH_PRIORITY_DROP);
+ if (isComputePort) {
+ // add rule to drop the DHCP server traffic originating from the vm.
+ egressAclDhcpDropServerTrafficfromVm(dpid, localPort, write,
+ Constants.PROTO_DHCP_CLIENT_SPOOF_MATCH_PRIORITY_DROP);
//Adds rule to check legitimate ip/mac pair for each packet from the vm
- for(Neutron_IPs srcAddress : srcAddressList) {
+ for (Neutron_IPs srcAddress : srcAddressList) {
String addressWithPrefix = srcAddress.getIpAddress() + HOST_MASK;
- egressACLAllowTrafficFromVmIpMacPair(dpid, localPort, attachedMac, addressWithPrefix, Constants.PROTO_VM_IP_MAC_MATCH_PRIORITY,write);
+ egressAclAllowTrafficFromVmIpMacPair(dpid, localPort, attachedMac, addressWithPrefix,
+ Constants.PROTO_VM_IP_MAC_MATCH_PRIORITY,write);
}
}
}
+ /**
+ * Allows IPv4 packet egress from the src mac address.
+ * @param dpidLong the dpid
+ * @param segmentationId the segementation id
+ * @param srcMac the src mac address
+ * @param write add or remove
+ * @param protoPortMatchPriority the protocol match priority.
+ */
+ private void egressAclIPv4(Long dpidLong, String segmentationId, String srcMac,
+ boolean write, Integer protoPortMatchPriority ) {
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ MatchBuilder matchBuilder = new MatchBuilder();
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ String flowId = "Egress_IP" + segmentationId + "_" + srcMac + "_Permit_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,srcMac,null);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+ }
+
+ /**
+ * Creates a egress match with src macaddress. If dest address is specified
+ * destination specific match will be created. Otherwise a match with a
+ * CIDR will be created.
+ * @param dpidLong the dpid
+ * @param segmentationId the segmentation id
+ * @param srcMac the source mac address.
+ * @param portSecurityRule the security rule in the SG
+ * @param dstAddress the destination IP address
+ * @param write add or delete
+ * @param protoPortMatchPriority the protocol match priroty
+ */
+ private void egressAclTcp(Long dpidLong, String segmentationId, String srcMac,
+ NeutronSecurityRule portSecurityRule, String dstAddress,
+ boolean write, Integer protoPortMatchPriority) {
+ MatchBuilder matchBuilder = new MatchBuilder();
+ String flowId = "Egress_Custom_Tcp" + segmentationId + "_" + srcMac + "_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,srcMac,null);
+ if (portSecurityRule.getSecurityRulePortMin().equals(portSecurityRule.getSecurityRulePortMax())) {
+ flowId = flowId + portSecurityRule.getSecurityRulePortMin();
+ matchBuilder = MatchUtils.addLayer4Match(matchBuilder, MatchUtils.TCP_SHORT, 0,
+ portSecurityRule.getSecurityRulePortMin());
+ } else {
+ /*TODO TCP PortRange Match*/
+
+ }
+
+ if (null != dstAddress) {
+ flowId = flowId + dstAddress;
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,null,
+ MatchUtils.iPv4PrefixFromIPv4Address(dstAddress));
+
+ } else if (null != portSecurityRule.getSecurityRuleRemoteIpPrefix()) {
+ flowId = flowId + portSecurityRule.getSecurityRuleRemoteIpPrefix();
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,null,
+ new Ipv4Prefix(portSecurityRule
+ .getSecurityRuleRemoteIpPrefix()));
+ }
+ flowId = flowId + "_Permit_";
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
+ /**
+ * Creates a egress match with src macaddress. If dest address is specified
+ * destination specific match will be created. Otherwise a match with a
+ * CIDR will be created.
+ * @param dpidLong the dpid
+ * @param segmentationId the segmentation id
+ * @param srcMac the source mac address.
+ * @param portSecurityRule the security rule in the SG
+ * @param dstAddress the source IP address
+ * @param write add or delete
+ * @param protoPortMatchPriority the protocol match priroty
+ */
+ private void egressAclUdp(Long dpidLong, String segmentationId, String srcMac,
+ NeutronSecurityRule portSecurityRule, String dstAddress,
+ boolean write, Integer protoPortMatchPriority) {
+
+ MatchBuilder matchBuilder = new MatchBuilder();
+ String flowId = "Eress_UDP" + segmentationId + "_" + srcMac + "_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,srcMac,null);
+ if (portSecurityRule.getSecurityRulePortMin().equals(portSecurityRule.getSecurityRulePortMax())) {
+ flowId = flowId + portSecurityRule.getSecurityRulePortMin();
+ matchBuilder = MatchUtils.addLayer4Match(matchBuilder, MatchUtils.UDP_SHORT, 0,
+ portSecurityRule.getSecurityRulePortMin());
+ } else {
+ /*TODO UDP PortRange Match*/
+
+ }
+
+ if (null != dstAddress) {
+ flowId = flowId + dstAddress;
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,null,
+ MatchUtils.iPv4PrefixFromIPv4Address(dstAddress));
+
+ } else if (null != portSecurityRule.getSecurityRuleRemoteIpPrefix()) {
+ flowId = flowId + portSecurityRule.getSecurityRuleRemoteIpPrefix();
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder, null,
+ new Ipv4Prefix(portSecurityRule
+ .getSecurityRuleRemoteIpPrefix()));
+ }
+ flowId = flowId + "_Permit_";
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
public void egressACLDefaultTcpDrop(Long dpidLong, String segmentationId, String attachedMac,
int priority, boolean write) {
Ipv4Prefix srcIpPrefix = new Ipv4Prefix(securityRuleIpPrefix);
flowBuilder.setMatch(MatchUtils
- .createSmacTcpSynDstIpPrefixTcpPort(matchBuilder, new MacAddress(attachedMac),
- tcpPort, Constants.TCP_SYN, segmentationId, srcIpPrefix).build());
+ .createSmacTcpSynDstIpPrefixTcpPort(matchBuilder, new MacAddress(attachedMac),
+ tcpPort, Constants.TCP_SYN, segmentationId, srcIpPrefix).build());
LOG.debug(" MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "UcastEgress_" + segmentationId + "_" + attachedMac +
- securityRulePortMin + securityRuleIpPrefix;
+ securityRulePortMin + securityRuleIpPrefix;
// Add Flow Attributes
flowBuilder.setId(new FlowId(flowId));
FlowKey key = new FlowKey(new FlowId(flowId));
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setMatch(MatchUtils
- .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null).build());
+ .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null).build());
flowBuilder.setMatch(MatchUtils
- .createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId)).build());
+ .createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId)).build());
LOG.debug("MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "EgressAllProto_" + segmentationId + "_" +
- attachedMac + "_AllowEgressTCPSyn_" + securityRuleProtcol;
+ attachedMac + "_AllowEgressTCPSyn_" + securityRuleProtcol;
// Add Flow Attributes
flowBuilder.setId(new FlowId(flowId));
FlowKey key = new FlowKey(new FlowId(flowId));
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setMatch(MatchUtils.createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId))
- .build());
+ .build());
if (securityRuleIpPrefix != null) {
Ipv4Prefix srcIpPrefix = new Ipv4Prefix(securityRuleIpPrefix);
flowBuilder.setMatch(MatchUtils
- .createSmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, srcIpPrefix)
- .build());
+ .createSmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, srcIpPrefix)
+ .build());
} else {
flowBuilder.setMatch(MatchUtils
- .createSmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null)
- .build());
+ .createSmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null)
+ .build());
}
LOG.debug("MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "Egress_Proto_ACL" + segmentationId + "_" +
- attachedMac + "_Permit_" + securityRuleIpPrefix;
+ attachedMac + "_Permit_" + securityRuleIpPrefix;
// Add Flow Attributes
flowBuilder.setId(new FlowId(flowId));
FlowKey key = new FlowKey(new FlowId(flowId));
}
/**
- * Adds flow to allow any DHCP client traffic
+ * Adds flow to allow any DHCP client traffic.
*
* @param dpidLong the dpid
* @param write whether to write or delete the flow
* @param protoPortMatchPriority the priority
*/
- public void egressACLDHCPAllowClientTrafficFromVm(Long dpidLong,
- boolean write, Integer protoPortMatchPriority) {
+ private void egressAclDhcpAllowClientTrafficFromVm(Long dpidLong,
+ boolean write, Integer protoPortMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
MatchBuilder matchBuilder = new MatchBuilder();
NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
- FlowBuilder flowBuilder = new FlowBuilder();
- flowBuilder.setMatch(MatchUtils.createDHCPMatch(matchBuilder, DHCP_DESTINATION_PORT, DHCP_SOURCE_PORT).build());
- LOG.debug("egressACLDHCPAllowClientTrafficFromVm: MatchBuilder contains: {}", flowBuilder.getMatch());
+ MatchUtils.createDhcpMatch(matchBuilder, DHCP_DESTINATION_PORT, DHCP_SOURCE_PORT).build();
+ LOG.debug("egressAclDHCPAllowClientTrafficFromVm: MatchBuilder contains: {}", matchBuilder);
String flowId = "Egress_DHCP_Client" + "_Permit_";
- // Add Flow Attributes
- flowBuilder.setId(new FlowId(flowId));
- FlowKey key = new FlowKey(new FlowId(flowId));
- flowBuilder.setStrict(false);
- flowBuilder.setPriority(protoPortMatchPriority);
- flowBuilder.setBarrier(true);
- flowBuilder.setTableId(this.getTable());
- flowBuilder.setKey(key);
- flowBuilder.setFlowName(flowId);
- flowBuilder.setHardTimeout(0);
- flowBuilder.setIdleTimeout(0);
-
- if (write) {
- // Instantiate the Builders for the OF Actions and Instructions
- InstructionsBuilder isb = new InstructionsBuilder();
- List<Instruction> instructionsList = Lists.newArrayList();
-
- InstructionBuilder ib = this.getMutablePipelineInstructionBuilder();
- ib.setOrder(0);
- ib.setKey(new InstructionKey(0));
- instructionsList.add(ib.build());
- isb.setInstruction(instructionsList);
-
- LOG.debug("egressACLDHCPAllowClientTrafficFromVm: Instructions contain: {}", ib.getInstruction());
- // Add InstructionsBuilder to FlowBuilder
- flowBuilder.setInstructions(isb.build());
- writeFlow(flowBuilder, nodeBuilder);
- } else {
- removeFlow(flowBuilder, nodeBuilder);
- }
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
}
/**
* @param write is write or delete
* @param protoPortMatchPriority the priority
*/
- public void egressACLDHCPDropServerTrafficfromVM(Long dpidLong, long localPort,
- boolean write, Integer protoPortMatchPriority) {
+ private void egressAclDhcpDropServerTrafficfromVm(Long dpidLong, long localPort,
+ boolean write, Integer protoPortMatchPriority) {
- String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
MatchBuilder matchBuilder = new MatchBuilder();
- NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
- FlowBuilder flowBuilder = new FlowBuilder();
+ //FlowBuilder flowBuilder = new FlowBuilder();
MatchUtils.createInPortMatch(matchBuilder, dpidLong, localPort);
- flowBuilder.setMatch(MatchUtils.createDHCPMatch(matchBuilder, DHCP_SOURCE_PORT, DHCP_DESTINATION_PORT).build());
-
- LOG.debug("egressACLDHCPDropServerTrafficfromVM: MatchBuilder contains: {}", flowBuilder.getMatch());
+ MatchUtils.createDhcpMatch(matchBuilder, DHCP_SOURCE_PORT, DHCP_DESTINATION_PORT).build();
+ LOG.debug("egressAclDHCPDropServerTrafficfromVM: MatchBuilder contains: {}", matchBuilder);
String flowId = "Egress_DHCP_Server" + "_" + localPort + "_DROP_";
- // Add Flow Attributes
- flowBuilder.setId(new FlowId(flowId));
- FlowKey key = new FlowKey(new FlowId(flowId));
- flowBuilder.setStrict(false);
- flowBuilder.setPriority(protoPortMatchPriority);
- flowBuilder.setBarrier(true);
- flowBuilder.setTableId(this.getTable());
- flowBuilder.setKey(key);
- flowBuilder.setFlowName(flowId);
- flowBuilder.setHardTimeout(0);
- flowBuilder.setIdleTimeout(0);
-
- if (write) {
- // Instantiate the Builders for the OF Actions and Instructions
- InstructionBuilder ib = new InstructionBuilder();
- InstructionsBuilder isb = new InstructionsBuilder();
- List<Instruction> instructionsList = Lists.newArrayList();
-
- InstructionUtils.createDropInstructions(ib);
- ib.setOrder(0);
- ib.setKey(new InstructionKey(0));
- instructionsList.add(ib.build());
- isb.setInstruction(instructionsList);
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, true);
- LOG.debug("egressACLDHCPDropServerTrafficfromVM: Instructions contain: {}", ib.getInstruction());
- // Add InstructionsBuilder to FlowBuilder
- flowBuilder.setInstructions(isb.build());
- writeFlow(flowBuilder, nodeBuilder);
- } else {
- removeFlow(flowBuilder, nodeBuilder);
- }
}
/**
* @param protoPortMatchPriority the priority
* @param write is write or delete
*/
- public void egressACLAllowTrafficFromVmIpMacPair(Long dpidLong, long localPort,
- String attachedMac, String srcIp, Integer protoPortMatchPriority, boolean write) {
-
- String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ private void egressAclAllowTrafficFromVmIpMacPair(Long dpidLong, long localPort,
+ String attachedMac, String srcIp,
+ Integer protoPortMatchPriority, boolean write) {
MatchBuilder matchBuilder = new MatchBuilder();
+ MatchUtils.createSrcL3Ipv4MatchWithMac(matchBuilder, new Ipv4Prefix(srcIp),new MacAddress(attachedMac));
+ MatchUtils.createInPortMatch(matchBuilder, dpidLong, localPort);
+ LOG.debug("egressAclAllowTrafficFromVmIpMacPair: MatchBuilder contains: {}", matchBuilder);
+ String flowId = "Egress_Allow_VM_IP_MAC" + "_" + localPort + attachedMac + "_Permit_";
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
+
+ /**
+ * Add or remove flow to the node.
+ *
+ * @param flowId the the flow id
+ * @param nodeBuilder the node builder
+ * @param matchBuilder the matchbuilder
+ * @param protoPortMatchPriority the protocol priority
+ * @param write whether it is a write
+ * @param drop whether it is a drop or forward
+ */
+ private void syncFlow(String flowId, NodeBuilder nodeBuilder,
+ MatchBuilder matchBuilder,Integer protoPortMatchPriority,
+ boolean write,boolean drop) {
FlowBuilder flowBuilder = new FlowBuilder();
- MatchUtils.createSrcL3IPv4MatchWithMac(matchBuilder, new Ipv4Prefix(srcIp),new MacAddress(attachedMac));
- MatchUtils.createInPortMatch(matchBuilder, dpidLong, localPort);
flowBuilder.setMatch(matchBuilder.build());
-
- LOG.debug("egressACLAllowTrafficFromVmIpMacPair: MatchBuilder contains: {}", flowBuilder.getMatch());
- String flowId = "Egress_Allow_VM_IP_MAC" + "_" + localPort + attachedMac + "_Permit_";
- // Add Flow Attributes
flowBuilder.setId(new FlowId(flowId));
FlowKey key = new FlowKey(new FlowId(flowId));
flowBuilder.setStrict(false);
if (write) {
// Instantiate the Builders for the OF Actions and Instructions
- InstructionsBuilder isb = new InstructionsBuilder();
- List<Instruction> instructionsList = Lists.newArrayList();
InstructionBuilder ib = this.getMutablePipelineInstructionBuilder();
+ if (drop) {
+ InstructionUtils.createDropInstructions(ib);
+ }
ib.setOrder(0);
ib.setKey(new InstructionKey(0));
+ InstructionsBuilder isb = new InstructionsBuilder();
+ List<Instruction> instructionsList = Lists.newArrayList();
instructionsList.add(ib.build());
isb.setInstruction(instructionsList);
-
- LOG.debug("egressACLAllowTrafficFromVmIpMacPair: Instructions contain: {}", ib.getInstruction());
- // Add InstructionsBuilder to FlowBuilder
flowBuilder.setInstructions(isb.build());
writeFlow(flowBuilder, nodeBuilder);
} else {
removeFlow(flowBuilder, nodeBuilder);
}
+
}
+
+
+
@Override
public void setDependencies(BundleContext bundleContext, ServiceReference serviceReference) {
super.setDependencies(bundleContext.getServiceReference(EgressAclProvider.class.getName()), this);
+ securityServicesManager =
+ (SecurityServicesManager) ServiceHelper.getGlobalInstance(SecurityServicesManager.class, this);
}
@Override
import org.opendaylight.neutron.spi.NeutronSecurityGroup;
import org.opendaylight.neutron.spi.NeutronSecurityRule;
+import org.opendaylight.neutron.spi.Neutron_IPs;
import org.opendaylight.ovsdb.openstack.netvirt.api.Constants;
import org.opendaylight.ovsdb.openstack.netvirt.api.IngressAclProvider;
+import org.opendaylight.ovsdb.openstack.netvirt.api.SecurityServicesManager;
import org.opendaylight.ovsdb.openstack.netvirt.providers.ConfigInterface;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.AbstractServiceInstance;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.Service;
import org.opendaylight.ovsdb.utils.mdsal.openflow.InstructionUtils;
import org.opendaylight.ovsdb.utils.mdsal.openflow.MatchUtils;
+import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
public class IngressAclService extends AbstractServiceInstance implements IngressAclProvider, ConfigInterface {
- static final Logger LOG = LoggerFactory.getLogger(IngressAclService.class);
+ private static final Logger LOG = LoggerFactory.getLogger(IngressAclService.class);
+ private volatile SecurityServicesManager securityServicesManager;
public IngressAclService() {
super(Service.INGRESS_ACL);
}
@Override
- public void programPortSecurityACL(Long dpid, String segmentationId, String attachedMac,
- long localPort, NeutronSecurityGroup securityGroup) {
+ public void programPortSecurityAcl(Long dpid, String segmentationId, String attachedMac,
+ long localPort, NeutronSecurityGroup securityGroup,
+ List<Neutron_IPs> srcAddressList, boolean write) {
LOG.trace("programLocalBridgeRulesWithSec neutronSecurityGroup: {} ", securityGroup);
+ if (securityGroup == null || securityGroup.getSecurityRules() == null) {
+ return;
+ }
+
List<NeutronSecurityRule> portSecurityList = securityGroup.getSecurityRules();
/* Iterate over the Port Security Rules in the Port Security Group bound to the port*/
for (NeutronSecurityRule portSecurityRule : portSecurityList) {
/**
- * Neutron Port Security ACL "ingress" and "IPv4"
- *
+ * Neutron Port Security Acl "ingress" and "IPv4"
* Check that the base conditions for flow based Port Security are true:
* Port Security Rule Direction ("ingress") and Protocol ("IPv4")
* Neutron defines the direction "ingress" as the vSwitch to the VM as defined in:
* http://docs.openstack.org/api/openstack-network/2.0/content/security_groups.html
*
*/
- if (portSecurityRule.getSecurityRuleEthertype().equalsIgnoreCase("IPv4") &&
- portSecurityRule.getSecurityRuleDirection().equalsIgnoreCase("ingress")) {
- LOG.debug("ACL Rule matching IPv4 and ingress is: {} ", portSecurityRule);
+
+ if ("IPv4".equals(portSecurityRule.getSecurityRuleEthertype())
+ && "ingress".equals(portSecurityRule.getSecurityRuleDirection())) {
+ LOG.debug("Acl Rule matching IPv4 and ingress is: {} ", portSecurityRule);
+ if (null == portSecurityRule.getSecurityRuleProtocol()) {
+ ingressAclIPv4(dpid, segmentationId, attachedMac,
+ write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ } else if (null != portSecurityRule.getSecurityRemoteGroupID()) {
+ //Remote Security group is selected
+ List<Neutron_IPs> remoteSrcAddressList = securityServicesManager
+ .getVmListForSecurityGroup(srcAddressList,portSecurityRule.getSecurityRemoteGroupID());
+ if (null != remoteSrcAddressList) {
+ for (Neutron_IPs vmIp :remoteSrcAddressList ) {
+ switch (portSecurityRule.getSecurityRuleProtocol()) {
+ case MatchUtils.TCP:
+ ingressAclTcp(dpid, segmentationId, attachedMac, portSecurityRule,vmIp.getIpAddress(),
+ write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ case MatchUtils.UDP:
+ ingressAclUdp(dpid, segmentationId, attachedMac, portSecurityRule,vmIp.getIpAddress(),
+ write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ default:
+ LOG.error("programPortSecurityAcl: Protocol not supported", portSecurityRule);
+ break;
+ }
+ }
+ }
+ } else {
+ //CIDR is selected
+ switch (portSecurityRule.getSecurityRuleProtocol()) {
+ case MatchUtils.TCP:
+ ingressAclTcp(dpid, segmentationId, attachedMac,
+ portSecurityRule, null, write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ case MatchUtils.UDP:
+ ingressAclUdp(dpid, segmentationId, attachedMac,
+ portSecurityRule, null, write, Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
+ break;
+ default:
+ LOG.error("programPortSecurityAcl: Protocol not supported", portSecurityRule);
+ }
+ }
+
/**
* TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (True)
- */
+ * TODO Some part of the code will be used when conntrack is supported
+
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
/**
* TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (True)
*/
- if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
+ /*if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
(!String.valueOf(portSecurityRule.getSecurityRuleRemoteIpPrefix()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRuleRemoteIpPrefix(), Constants.PROTO_PORT_PREFIX_MATCH_PRIORITY);
continue;
}
- /**
+ *//**
* TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRuleRemoteIpPrefix(), Constants.PROTO_PREFIX_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//**
+ * TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRuleRemoteIpPrefix(), Constants.PREFIX_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (False)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (False)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
Constants.PREFIX_PORT_MATCH_PRIORITY_DROP);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
!String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
portSecurityRule.getSecurityRulePortMin(), Constants.PROTO_PORT_MATCH_PRIORITY);
continue;
}
- /**
- * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
- */
+ *//**
+ * TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
+ *//*
if (String.valueOf(portSecurityRule.getSecurityRuleProtocol()).equalsIgnoreCase("tcp") &&
String.valueOf(portSecurityRule.getSecurityRulePortMin()).equalsIgnoreCase("null") &&
String.valueOf(portSecurityRule.getSecurityRulePortMax()).equalsIgnoreCase("null") &&
handleIngressAllowProto(dpid, segmentationId, attachedMac, true,
portSecurityRule.getSecurityRuleProtocol(), Constants.PROTO_MATCH_PRIORITY);
continue;
- }
- LOG.debug("Ingress ACL Match combination not found for rule: {}", portSecurityRule);
+ }*/
+ LOG.debug("Ingress Acl Match combination not found for rule: {}", portSecurityRule);
}
}
}
@Override
- public void programFixedSecurityACL(Long dpid, String segmentationId, String dhcpMacAddress,
- long localPort, boolean isLastPortinSubnet, boolean isComputePort, boolean write){
- //If this port is the only port in the compute node add the DHCP server rule.
+ public void programFixedSecurityAcl(Long dpid, String segmentationId, String dhcpMacAddress,
+ long localPort, boolean isLastPortinSubnet,
+ boolean isComputePort, boolean write) {
+ //If this port is the only port in the compute node add the DHCP server rule.
if (isLastPortinSubnet && isComputePort ) {
- ingressACLDHCPAllowServerTraffic(dpid, segmentationId,dhcpMacAddress, write,Constants.PROTO_DHCP_SERVER_MATCH_PRIORITY);
+ ingressAclDhcpAllowServerTraffic(dpid, segmentationId,dhcpMacAddress,
+ write,Constants.PROTO_DHCP_SERVER_MATCH_PRIORITY);
}
}
+ /**
+ * Allows IPv4 packet ingress to the destination mac address.
+ * @param dpidLong the dpid
+ * @param segmentationId the segementation id
+ * @param dstMac the destination mac address
+ * @param write add or remove
+ * @param protoPortMatchPriority the protocol match priority.
+ */
+ private void ingressAclIPv4(Long dpidLong, String segmentationId, String dstMac,
+ boolean write, Integer protoPortMatchPriority ) {
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ MatchBuilder matchBuilder = new MatchBuilder();
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ String flowId = "Ingress_IP" + segmentationId + "_" + dstMac + "_Permit_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,null,dstMac);
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
+ /**
+ * Creates a ingress match to the dst macaddress. If src address is specified
+ * source specific match will be created. Otherwise a match with a CIDR will
+ * be created.
+ * @param dpidLong the dpid
+ * @param segmentationId the segmentation id
+ * @param dstMac the destination mac address.
+ * @param portSecurityRule the security rule in the SG
+ * @param srcAddress the destination IP address
+ * @param write add or delete
+ * @param protoPortMatchPriority the protocol match priroty
+ */
+ private void ingressAclTcp(Long dpidLong, String segmentationId, String dstMac,
+ NeutronSecurityRule portSecurityRule, String srcAddress, boolean write,
+ Integer protoPortMatchPriority ) {
+
+ MatchBuilder matchBuilder = new MatchBuilder();
+ FlowBuilder flowBuilder = new FlowBuilder();
+ String flowId = "Ingress_Custom_Tcp" + segmentationId + "_" + dstMac + "_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,null,dstMac);
+ if (portSecurityRule.getSecurityRulePortMin().equals(portSecurityRule.getSecurityRulePortMax())) {
+ flowId = flowId + portSecurityRule.getSecurityRulePortMin();
+ matchBuilder = MatchUtils.addLayer4Match(matchBuilder, MatchUtils.TCP_SHORT, 0,
+ portSecurityRule.getSecurityRulePortMin());
+ } else {
+ /*TODO TCP PortRange Match*/
+
+ }
+
+ if (null != srcAddress) {
+ flowId = flowId + srcAddress;
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,
+ MatchUtils.iPv4PrefixFromIPv4Address(srcAddress),null);
+
+ } else if (null != portSecurityRule.getSecurityRuleRemoteIpPrefix()) {
+ flowId = flowId + portSecurityRule.getSecurityRuleRemoteIpPrefix();
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,
+ new Ipv4Prefix(portSecurityRule
+ .getSecurityRuleRemoteIpPrefix()),null);
+ }
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ flowId = flowId + "_Permit_";
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
+
+ /**
+ * Creates a ingress match to the dst macaddress. If src address is specified
+ * source specific match will be created. Otherwise a match with a CIDR will
+ * be created.
+ * @param dpidLong the dpid
+ * @param segmentationId the segmentation id
+ * @param dstMac the destination mac address.
+ * @param portSecurityRule the security rule in the SG
+ * @param srcAddress the destination IP address
+ * @param write add or delete
+ * @param protoPortMatchPriority the protocol match priroty
+ */
+ private void ingressAclUdp(Long dpidLong, String segmentationId, String dstMac,
+ NeutronSecurityRule portSecurityRule, String srcAddress,
+ boolean write, Integer protoPortMatchPriority ) {
+ MatchBuilder matchBuilder = new MatchBuilder();
+ String flowId = "ingressAclUDP" + segmentationId + "_" + dstMac + "_";
+ matchBuilder = MatchUtils.createEtherMatchWithType(matchBuilder,null,dstMac);
+ if (portSecurityRule.getSecurityRulePortMin().equals(portSecurityRule.getSecurityRulePortMax())) {
+ flowId = flowId + portSecurityRule.getSecurityRulePortMin();
+ matchBuilder = MatchUtils.addLayer4Match(matchBuilder, MatchUtils.UDP_SHORT, 0,
+ portSecurityRule.getSecurityRulePortMin());
+ } else {
+ /*TODO TCP PortRange Match*/
+
+ }
+
+ if (null != srcAddress) {
+ flowId = flowId + srcAddress;
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,
+ MatchUtils.iPv4PrefixFromIPv4Address(srcAddress), null);
+
+ } else if (null != portSecurityRule.getSecurityRuleRemoteIpPrefix()) {
+ flowId = flowId + portSecurityRule.getSecurityRuleRemoteIpPrefix();
+ matchBuilder = MatchUtils.addRemoteIpPrefix(matchBuilder,
+ new Ipv4Prefix(portSecurityRule.getSecurityRuleRemoteIpPrefix()),null);
+ }
+ String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
+ NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
+ flowId = flowId + "_Permit_";
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+
+ }
+
public void ingressACLTcpSyn(Long dpidLong, String segmentationId, String attachedMac, boolean write,
- Integer securityRulePortMin, Integer protoPortMatchPriority) {
+ Integer securityRulePortMin, Integer protoPortMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
PortNumber tcpPort = new PortNumber(securityRulePortMin);
}
public void ingressACLTcpPortWithPrefix(Long dpidLong, String segmentationId, String attachedMac,
- boolean write, Integer securityRulePortMin, String securityRuleIpPrefix,
- Integer protoPortPrefixMatchPriority) {
+ boolean write, Integer securityRulePortMin, String securityRuleIpPrefix,
+ Integer protoPortPrefixMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
PortNumber tcpPort = new PortNumber(securityRulePortMin);
Ipv4Prefix srcIpPrefix = new Ipv4Prefix(securityRuleIpPrefix);
flowBuilder.setMatch(MatchUtils
- .createDmacTcpSynDstIpPrefixTcpPort(matchBuilder, new MacAddress(attachedMac),
- tcpPort, Constants.TCP_SYN, segmentationId, srcIpPrefix).build());
+ .createDmacTcpSynDstIpPrefixTcpPort(matchBuilder, new MacAddress(attachedMac),
+ tcpPort, Constants.TCP_SYN, segmentationId, srcIpPrefix).build());
LOG.debug(" MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "UcastOut2_" + segmentationId + "_" + attachedMac +
}
public void handleIngressAllowProto(Long dpidLong, String segmentationId, String attachedMac, boolean write,
- String securityRuleProtcol, Integer protoMatchPriority) {
+ String securityRuleProtcol, Integer protoMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setMatch(MatchUtils
- .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null).build());
+ .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null).build());
flowBuilder.setMatch(MatchUtils
- .createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId)).build());
+ .createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId)).build());
LOG.debug("MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "UcastOut_" + segmentationId + "_" +
public void ingressACLDefaultTcpDrop(Long dpidLong, String segmentationId, String attachedMac,
- int priority, boolean write) {
+ int priority, boolean write) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
MatchBuilder matchBuilder = new MatchBuilder();
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setMatch(MatchUtils.createDmacTcpPortWithFlagMatch(matchBuilder,
- attachedMac, Constants.TCP_SYN, segmentationId).build());
+ attachedMac, Constants.TCP_SYN, segmentationId).build());
LOG.debug("MatchBuilder contains: {}", flowBuilder.getMatch());
String flowId = "PortSec_TCP_Syn_Default_Drop_" + segmentationId + "_" + attachedMac;
}
public void ingressACLPermitAllProto(Long dpidLong, String segmentationId, String attachedMac,
- boolean write, String securityRuleIpPrefix, Integer protoPortMatchPriority) {
-
+ boolean write, String securityRuleIpPrefix, Integer protoPortMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
MatchBuilder matchBuilder = new MatchBuilder();
NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
FlowBuilder flowBuilder = new FlowBuilder();
flowBuilder.setMatch(MatchUtils.createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId))
- .build());
+ .build());
if (securityRuleIpPrefix != null) {
Ipv4Prefix srcIpPrefix = new Ipv4Prefix(securityRuleIpPrefix);
flowBuilder.setMatch(MatchUtils
- .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, srcIpPrefix)
- .build());
+ .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, srcIpPrefix)
+ .build());
} else {
flowBuilder.setMatch(MatchUtils
- .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null)
- .build());
+ .createDmacIpTcpSynMatch(matchBuilder, new MacAddress(attachedMac), null, null)
+ .build());
}
LOG.debug("MatchBuilder contains: {}", flowBuilder.getMatch());
* @param write is write or delete
* @param protoPortMatchPriority the priority
*/
- private void ingressACLDHCPAllowServerTraffic(Long dpidLong, String segmentationId, String dhcpMacAddress,
- boolean write, Integer protoPortMatchPriority) {
+ private void ingressAclDhcpAllowServerTraffic(Long dpidLong, String segmentationId, String dhcpMacAddress,
+ boolean write, Integer protoPortMatchPriority) {
String nodeName = Constants.OPENFLOW_NODE_PREFIX + dpidLong;
MatchBuilder matchBuilder = new MatchBuilder();
NodeBuilder nodeBuilder = createNodeBuilder(nodeName);
- FlowBuilder flowBuilder = new FlowBuilder();
-
- flowBuilder.setMatch(MatchUtils.createDHCPServerMatch(matchBuilder, dhcpMacAddress, 67, 68).build());
- LOG.debug("ingressACLDHCPAllowServerTraffic: MatchBuilder contains: {}", flowBuilder.getMatch());
+ MatchUtils.createDhcpServerMatch(matchBuilder, dhcpMacAddress, 67, 68).build();
+ LOG.debug("ingressAclDHCPAllowServerTraffic: MatchBuilder contains: {}", matchBuilder);
String flowId = "Ingress_DHCP_Server" + segmentationId + "_" + dhcpMacAddress + "_Permit_";
- // Add Flow Attributes
+ syncFlow(flowId, nodeBuilder, matchBuilder, protoPortMatchPriority, write, false);
+ }
+
+ /**
+ * Add or remove flow to the node.
+ *
+ * @param flowId the the flow id
+ * @param nodeBuilder the node builder
+ * @param matchBuilder the matchbuilder
+ * @param protoPortMatchPriority the protocol priority
+ * @param write whether it is a write
+ * @param drop whether it is a drop or forward
+ */
+ private void syncFlow(String flowId, NodeBuilder nodeBuilder,
+ MatchBuilder matchBuilder,Integer protoPortMatchPriority,
+ boolean write,boolean drop) {
+ FlowBuilder flowBuilder = new FlowBuilder();
+ flowBuilder.setMatch(matchBuilder.build());
flowBuilder.setId(new FlowId(flowId));
FlowKey key = new FlowKey(new FlowId(flowId));
flowBuilder.setStrict(false);
if (write) {
// Instantiate the Builders for the OF Actions and Instructions
- InstructionsBuilder isb = new InstructionsBuilder();
- List<Instruction> instructionsList = Lists.newArrayList();
-
InstructionBuilder ib = this.getMutablePipelineInstructionBuilder();
+ if (drop) {
+ InstructionUtils.createDropInstructions(ib);
+ }
ib.setOrder(0);
+ InstructionsBuilder isb = new InstructionsBuilder();
+ List<Instruction> instructionsList = Lists.newArrayList();
ib.setKey(new InstructionKey(0));
instructionsList.add(ib.build());
isb.setInstruction(instructionsList);
-
- LOG.debug("Instructions contain: {}", ib.getInstruction());
- // Add InstructionsBuilder to FlowBuilder
flowBuilder.setInstructions(isb.build());
writeFlow(flowBuilder, nodeBuilder);
} else {
removeFlow(flowBuilder, nodeBuilder);
}
+
}
+
@Override
public void setDependencies(BundleContext bundleContext, ServiceReference serviceReference) {
super.setDependencies(bundleContext.getServiceReference(IngressAclProvider.class.getName()), this);
+ securityServicesManager =
+ (SecurityServicesManager) ServiceHelper.getGlobalInstance(SecurityServicesManager.class, this);
}
@Override
package org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services;
import java.math.BigInteger;
+import java.net.Inet6Address;
import java.net.InetAddress;
import java.util.List;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.instruction.list.InstructionKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceReference;
public class L3ForwardingService extends AbstractServiceInstance implements L3ForwardingProvider, ConfigInterface {
+ private static final Logger LOG = LoggerFactory.getLogger(L3ForwardingService.class);
+
public L3ForwardingService() {
super(Service.L3_FORWARDING);
}
List<Instruction> instructions = Lists.newArrayList();
InstructionBuilder ib = new InstructionBuilder();
+ if (ipAddress instanceof Inet6Address) {
+ // WORKAROUND: For now ipv6 is not supported
+ // TODO: implement ipv6 case
+ LOG.debug("ipv6 address is not implemented yet. dpid {} segmentationId {} ipAddress {} macAddress {} Action {}",
+ dpid, segmentationId, ipAddress, macAddress, action);
+ return new Status(StatusCode.NOTIMPLEMENTED);
+ }
+
MatchUtils.createTunnelIDMatch(matchBuilder, new BigInteger(segmentationId));
MatchUtils.createDstL3IPv4Match(matchBuilder, MatchUtils.iPv4PrefixFromIPv4Address(ipAddress.getHostAddress()));
// TODO: implement ipv6 case
LOG.debug("ipv6 address is not implemented yet. address {}",
address);
- new Status(StatusCode.NOTIMPLEMENTED);
+ return new Status(StatusCode.NOTIMPLEMENTED);
}
final String prefixString = address.getHostAddress() + "/" + mask;
MatchUtils.createDstL3IPv4Match(matchBuilder, new Ipv4Prefix(prefixString));
}
private NodeConnectorId createNodeConnectorId(String connectorId, NodeId nodeId) {
- StringBuilder stringId = new StringBuilder(nodeId.getValue()).append(":").append(connectorId);
- return new NodeConnectorId(stringId.toString());
+ return new NodeConnectorId(nodeId.getValue() + ":" + connectorId);
}
/**
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.Nullable;
-
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.openflowplugin.api.OFConstants;
import org.opendaylight.ovsdb.openstack.netvirt.api.GatewayMacResolver;
import org.opendaylight.ovsdb.openstack.netvirt.providers.NetvirtProvidersProvider;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.AbstractServiceInstance;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.Service;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Address;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
private SalFlowService flowService;
private final AtomicLong flowCookie = new AtomicLong();
private final ConcurrentMap<Ipv4Address, ArpResolverMetadata> gatewayToArpMetadataMap =
- new ConcurrentHashMap<Ipv4Address, ArpResolverMetadata>();
- private final int ARP_WATCH_BROTHERS = 10;
- private final int WAIT_CYCLES = 3;
- private final int PER_CYCLE_WAIT_DURATION = 1000;
- private final int REFRESH_INTERVAL = 10;
+ new ConcurrentHashMap<>();
+ private static final int ARP_WATCH_BROTHERS = 10;
+ private static final int WAIT_CYCLES = 3;
+ private static final int PER_CYCLE_WAIT_DURATION = 1000;
+ private static final int REFRESH_INTERVAL = 10;
private final ListeningExecutorService arpWatcherWall = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(ARP_WATCH_BROTHERS));
private final ScheduledExecutorService gatewayMacRefresherPool = Executors.newScheduledThreadPool(1);
private final ScheduledExecutorService refreshRequester = Executors.newSingleThreadScheduledExecutor();
* @param sourceMacAddress Source Mac address for the ARP request packet
* @param periodicRefresh Enable/Disable periodic refresh of the Gateway Mac address
* NOTE:Periodic refresh is not supported yet.
- * @param gatewayIp Resolve MAC address of this Gateway Ip
* @return Future object
*/
@Override
});
}
- private static @Nullable Ipv4Address getIPv4Addresses(IpAddress ipAddress) {
- if (ipAddress.getIpv4Address() == null) {
- return null;
- }
- return ipAddress.getIpv4Address();
- }
-
private Flow createArpReplyToControllerFlow(final ArpMessageAddress senderAddress, final Ipv4Address ipForRequestedMac) {
checkNotNull(senderAddress);
checkNotNull(ipForRequestedMac);
arpFlow.setMatch(match);
arpFlow.setInstructions(new InstructionsBuilder().setInstruction(
ImmutableList.of(SEND_TO_CONTROLLER_INSTRUCTION)).build());
- arpFlow.setId(createFlowId(senderAddress, ipForRequestedMac));
+ arpFlow.setId(createFlowId(ipForRequestedMac));
return arpFlow.build();
}
- private FlowId createFlowId(ArpMessageAddress senderAddress, Ipv4Address ipForRequestedMac) {
+ private FlowId createFlowId(Ipv4Address ipForRequestedMac) {
String flowId = ARP_REPLY_TO_CONTROLLER_FLOW_NAME + "|" + ipForRequestedMac.getValue();
return new FlowId(flowId);
}
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
+import org.mockito.internal.matchers.Equality;
import org.mockito.runners.MockitoJUnitRunner;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.neutron.spi.NeutronSecurityGroup;
import org.opendaylight.neutron.spi.NeutronSecurityRule;
+import org.opendaylight.neutron.spi.Neutron_IPs;
+import org.opendaylight.ovsdb.openstack.netvirt.api.Constants;
+import org.opendaylight.ovsdb.openstack.netvirt.api.SecurityServicesManager;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.PipelineOrchestrator;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.Service;
+import org.opendaylight.ovsdb.utils.mdsal.openflow.MatchUtils;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
@InjectMocks private EgressAclService egressAclService = new EgressAclService();
@Spy private EgressAclService egressAclServiceSpy;
+ @Spy private MatchUtils matchUtil;
@Mock private DataBroker dataBroker;
@Mock private PipelineOrchestrator orchestrator;
@Mock private NeutronSecurityGroup securityGroup;
@Mock private NeutronSecurityRule portSecurityRule;
+ @Mock private SecurityServicesManager securityServices;
+
+ private Neutron_IPs neutron_ip_src;
+ private Neutron_IPs neutron_ip_dest_1;
+ private Neutron_IPs neutron_ip_dest_2;
+ private List<Neutron_IPs> neutronSrcIpList = new ArrayList<Neutron_IPs>();
+ private List<Neutron_IPs> neutronDestIpList = new ArrayList<Neutron_IPs>();
private static final String HOST_ADDRESS = "127.0.0.1/32";
private static final String MAC_ADDRESS = "87:1D:5E:02:40:B7";
+ private static final String SRC_IP = "192.168.0.1";
+ private static final String DEST_IP_1 = "192.169.0.1";
+ private static final String DEST_IP_2 = "192.169.0.2";
+ private static final String DEST_IP_1_WITH_MASK = "192.169.0.1/32";
+ private static final String DEST_IP_2_WITH_MASK = "192.169.0.2/32";
+ private static final String SECURITY_GROUP_UUID = "85cc3048-abc3-43cc-89b3-377341426ac5";
@Before
public void setUp() {
when(orchestrator.getNextServiceInPipeline(any(Service.class))).thenReturn(Service.ARP_RESPONDER);
portSecurityRule = mock(NeutronSecurityRule.class);
+
when(portSecurityRule.getSecurityRuleEthertype()).thenReturn("IPv4");
when(portSecurityRule.getSecurityRuleDirection()).thenReturn("egress");
List<NeutronSecurityRule> portSecurityList = new ArrayList<NeutronSecurityRule>();
portSecurityList.add(portSecurityRule);
+ neutron_ip_src = new Neutron_IPs();
+ neutron_ip_src.setIpAddress(SRC_IP);
+ neutronSrcIpList.add(neutron_ip_src);
+
+ neutron_ip_dest_1 = new Neutron_IPs();
+ neutron_ip_dest_1.setIpAddress(DEST_IP_1);
+ neutronDestIpList.add(neutron_ip_dest_1);
+
+ neutron_ip_dest_2 = new Neutron_IPs();
+ neutron_ip_dest_2.setIpAddress(DEST_IP_2);
+ neutronDestIpList.add(neutron_ip_dest_2);
+
when(securityGroup.getSecurityRules()).thenReturn(portSecurityList);
+ when(securityServices.getVmListForSecurityGroup(neutronSrcIpList, SECURITY_GROUP_UUID)).thenReturn(neutronDestIpList);
}
/**
* Rule 1: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (True)
*/
- @Test
+ /*@Test
public void testProgramPortSecurityACLRule1() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
when(portSecurityRule.getSecurityRulePortMax()).thenReturn(1);
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 2: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule2() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 3: TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule3() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 4: TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule4() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 5: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (False)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule5() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 6: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule6() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 7: TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule7() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(writeTransaction, times(1)).submit();
verify(commitFuture, times(1)).get();
}
-
+*/
/**
* Test method {@link EgressAclService#egressACLDefaultTcpDrop(Long, String, String, int, boolean)}
*/
verify(commitFuture, times(2)).get(); // 1 + 1 above
}
+ /**
+ * Test IPv4 add test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddIpv4() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(null);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn(null);
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test IPv4 remove test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveIpv4() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(null);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn(null);
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddTcp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP remove with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveTcp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddTcp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(4)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test TCP remove with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveTcp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(2)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test UDP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddUdp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test UDP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveUdp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test UDP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddUdp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(4)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test UDP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveUdp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(2)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test IPv4 invalid ether type test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleInvalidEther() throws Exception {
+ when(portSecurityRule.getSecurityRuleEthertype()).thenReturn("IPV6");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+
+ /**
+ * Test IPv4 invalid direction type test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleInvalidDirection() throws Exception {
+ when(portSecurityRule.getSecurityRuleDirection()).thenReturn("ingress");
+
+ egressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge false isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLAdd1() throws Exception {
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, false, false, true);
+
+ verify(writeTransaction, times(0)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+ /**
+ * Test With isLastPortInBridge false isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLRemove1() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, false, false, false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge false isComputeNode true
+ */
+ @Test
+ public void testProgramFixedSecurityACLAdd2() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, false, true, true);
+
+ verify(writeTransaction, times(6)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(3)).submit();
+ verify(commitFuture, times(3)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge false isComputeNode true
+ */
+ @Test
+ public void testProgramFixedSecurityACLRemove2() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, false, true, false);
+
+ verify(writeTransaction, times(3)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(3)).submit();
+ verify(commitFuture, times(3)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge true isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLAdd3() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, true, false, true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge true isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLRemove3() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, true, false, false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge true isComputeNode true
+ */
+ @Test
+ public void testProgramFixedSecurityACLAdd4() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, true, true, true);
+
+ verify(writeTransaction, times(8)).put(any(LogicalDatastoreType.class),
+ any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(4)).submit();
+ verify(commitFuture, times(4)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge true isComputeNode true
+ */
+ @Test
+ public void testProgramFixedSecurityACLRemove4() throws Exception {
+
+ egressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, neutronDestIpList, true, true, false);
+
+ verify(writeTransaction, times(4)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(4)).submit();
+ verify(commitFuture, times(4)).get();
+ }
+
/**
* Test method {@link EgressAclService#egressACLTcpPortWithPrefix(Long, String, String, boolean, Integer, String, Integer)}
*/
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.neutron.spi.NeutronSecurityGroup;
import org.opendaylight.neutron.spi.NeutronSecurityRule;
+import org.opendaylight.neutron.spi.Neutron_IPs;
+import org.opendaylight.ovsdb.openstack.netvirt.api.Constants;
+import org.opendaylight.ovsdb.openstack.netvirt.api.SecurityServicesManager;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.PipelineOrchestrator;
import org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.Service;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
@Mock private NeutronSecurityGroup securityGroup;
@Mock private NeutronSecurityRule portSecurityRule;
+ @Mock private SecurityServicesManager securityServices;
+
+ private List<Neutron_IPs> neutronSrcIpList = new ArrayList<Neutron_IPs>();
+ private List<Neutron_IPs> neutronDestIpList = new ArrayList<Neutron_IPs>();
+ private Neutron_IPs neutron_ip_src;
+ private Neutron_IPs neutron_ip_dest_1;
+ private Neutron_IPs neutron_ip_dest_2;
private static final String SEGMENTATION_ID = "2";
private static final int PRIORITY = 1;
private static final String HOST_ADDRESS = "127.0.0.1/32";
private static final String MAC_ADDRESS = "87:1D:5E:02:40:B8";
+ private static final String SRC_IP = "192.168.0.1";
+ private static final String DEST_IP_1 = "192.169.0.1";
+ private static final String DEST_IP_2 = "192.169.0.2";
+ private static final String SECURITY_GROUP_UUID = "85cc3048-abc3-43cc-89b3-377341426ac5";
@Before
public void setUp() {
List<NeutronSecurityRule> portSecurityList = new ArrayList<NeutronSecurityRule>();
portSecurityList.add(portSecurityRule);
+ neutron_ip_src = new Neutron_IPs();
+ neutron_ip_src.setIpAddress(SRC_IP);
+ neutronSrcIpList.add(neutron_ip_src);
+
+ neutron_ip_dest_1 = new Neutron_IPs();
+ neutron_ip_dest_1.setIpAddress(DEST_IP_1);
+ neutronDestIpList.add(neutron_ip_dest_1);
+
+ neutron_ip_dest_2 = new Neutron_IPs();
+ neutron_ip_dest_2.setIpAddress(DEST_IP_2);
+ neutronDestIpList.add(neutron_ip_dest_2);
+
+
when(securityGroup.getSecurityRules()).thenReturn(portSecurityList);
+ when(securityServices.getVmListForSecurityGroup
+ (neutronSrcIpList, SECURITY_GROUP_UUID)).thenReturn(neutronDestIpList);
}
- /**
+ /* *//**
* Rule 1: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule1() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
}
- /**
+ *//**
* Rule 2: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule2() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 3: TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule3() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 4: TCP Proto (False), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (True)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule4() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 5: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (True), IP Prefix (False)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule5() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 6: TCP Proto (True), TCP Port Minimum (True), TCP Port Max (False), IP Prefix (False)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule6() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(commitFuture, times(2)).get();
}
- /**
+ *//**
* Rule 7: TCP Proto (True), TCP Port Minimum (False), TCP Port Max (False), IP Prefix (False or 0.0.0.0/0)
- */
+ *//*
@Test
public void testProgramPortSecurityACLRule7() throws Exception {
when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
verify(writeTransaction, times(1)).submit();
verify(commitFuture, times(1)).get();
}
+*/
+ /**
+ * Test IPv4 add test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddIpv4() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(null);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn(null);
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test IPv4 remove test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveIpv4() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(null);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(null);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn(null);
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddTcp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP remove with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveTcp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test TCP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddTcp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(4)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test TCP remove with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveTcp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("tcp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(2)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test UDP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddUdp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(2)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test UDP add with port no and CIDR selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveUdp1() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+ verify(commitFuture, times(1)).get();
+ }
+
+ /**
+ * Test UDP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleAddUdp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,true);
+
+ verify(writeTransaction, times(4)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test UDP add with port no and remote SG selected.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleRemoveUdp2() throws Exception {
+ when(portSecurityRule.getSecurityRuleProtocol()).thenReturn("udp");
+ when(portSecurityRule.getSecurityRulePortMax()).thenReturn(50);
+ when(portSecurityRule.getSecurityRulePortMin()).thenReturn(50);
+ when(portSecurityRule.getSecurityRuleRemoteIpPrefix()).thenReturn("0.0.0.0/24");
+ when(portSecurityRule.getSecurityRemoteGroupID()).thenReturn("85cc3048-abc3-43cc-89b3-377341426ac5");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(2)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(2)).submit();
+ verify(commitFuture, times(2)).get();
+ }
+
+ /**
+ * Test IPv4 invalid ether type test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleInvalidEther() throws Exception {
+ when(portSecurityRule.getSecurityRuleEthertype()).thenReturn("IPV6");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+
+ /**
+ * Test IPv4 invalid direction type test case.
+ */
+ @Test
+ public void testProgramPortSecurityACLRuleInvalidDirection() throws Exception {
+ when(portSecurityRule.getSecurityRuleDirection()).thenReturn("edgress");
+
+ ingressAclServiceSpy.programPortSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 124, securityGroup,neutronSrcIpList,false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+
+ /**
+ * Test With isLastPortInBridge false isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLAdd1() throws Exception {
+ ingressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, false, false, true);
+
+ verify(writeTransaction, times(0)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(Node.class), eq(true));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
+ /**
+ * Test With isLastPortInBridge false isComputeNode false
+ */
+ @Test
+ public void testProgramFixedSecurityACLRemove1() throws Exception {
+
+ ingressAclServiceSpy.programFixedSecurityAcl(Long.valueOf(1554), "2", MAC_ADDRESS, 1, false, false, false);
+
+ verify(writeTransaction, times(0)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(0)).submit();
+ verify(commitFuture, times(0)).get();
+ }
/**
* Test method {@link IgressAclService#egressACLDefaultTcpDrop(Long, String, String, int, boolean)}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>binding-parent</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-api</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>ietf-yang-types-20130715</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>yang-ext</artifactId>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+module ietf-acl {
+ yang-version 1;
+
+ namespace "urn:ietf:params:xml:ns:yang:ietf-acl";
+
+ prefix acl;
+
+ import ietf-yang-types {
+ prefix "ietf";
+ }
+
+ import packet-fields {
+ prefix "packet-fields";
+ }
+
+ organization
+ "IETF NETMOD (NETCONF Data Modeling Language) Working Group";
+
+ contact
+ "WG Web: http://tools.ietf.org/wg/netmod/
+ WG List: netmod@ietf.org
+
+ WG Chair: Juergen Schoenwaelder
+ j.schoenwaelder@jacobs-university.de
+
+ WG Chair: Tom Nadeau
+ tnadeau@lucidvision.com
+
+ Editor: Dean Bogdanovic
+ deanb@juniper.net
+
+ Editor: Kiran Agrahara Sreenivasa
+ kkoushik@brocade.com
+
+ Editor: Lisa Huang
+ yihuan@cisco.com
+
+ Editor: Dana Blair
+ dblair@cisco.com";
+
+ description
+ "This YANG module defines a component that describing the
+ configuration of Access Control Lists (ACLs).";
+
+ revision 2014-10-10 {
+ description "Creating base model for netmod.";
+ reference
+ "RFC 6020: YANG - A Data Modeling Language for the
+ Network Configuration Protocol (NETCONF)";
+ }
+
+ identity acl-base {
+ description "Base acl type for all ACL type identifiers.";
+ }
+
+ identity ip-acl {
+ base "acl:acl-base";
+ description "layer 3 ACL type";
+ }
+ identity eth-acl {
+ base "acl:acl-base";
+ description "layer 2 ACL type";
+ }
+
+ typedef acl-type {
+ type identityref {
+ base "acl-base";
+ }
+ description
+ "This type is used to refer to an Access Control List
+ (ACL) type";
+ }
+
+ typedef acl-ref {
+ type leafref {
+ path "/acl:access-lists/acl:access-list/acl:acl-name";
+ }
+ description "This type is used by data models that
+ need to referenced an acl";
+ }
+
+ container access-lists {
+ description
+ "Access control lists.";
+
+ list access-list {
+ key acl-name;
+ description "
+ An access list (acl) is an ordered list of
+ access list entries (ace). Each ace has a
+ sequence number to define the order, list
+ of match criteria, and a list of actions.
+ Since there are several kinds of acls
+ implementeded with different attributes for
+ each and different for each vendor, this
+ model accomodates customizing acls for
+ each kind and for each vendor.
+ ";
+
+ leaf acl-name {
+ type string;
+ description "The name of access-list.
+ A device MAY restrict the length and value of
+ this name, possibly space and special
+ characters are not allowed.";
+ }
+
+ leaf acl-type {
+ type acl-type;
+ description "Type of ACL";
+ }
+
+ container acl-oper-data {
+ config false;
+
+ description "Overall ACL operational data";
+ leaf match-counter {
+ type ietf:counter64;
+ description "Total match count for ACL";
+ }
+
+ leaf-list targets {
+ type string;
+ description "List of targets where ACL is applied";
+ }
+ }
+
+ container access-list-entries {
+ description "The access-list-entries container contains
+ a list of access-list-entry(ACE).";
+
+ list access-list-entry {
+ key rule-name;
+ ordered-by user;
+
+ description "List of access list entries(ACE)";
+ leaf rule-name {
+ type string;
+ description "Entry name.";
+ }
+
+ container matches {
+ description "Define match criteria";
+ choice ace-type {
+ description "Type of ace.";
+ case ace-ip {
+ uses packet-fields:acl-ip-header-fields;
+ choice ace-ip-version {
+ description "Choice of IP version.";
+ case ace-ipv4 {
+ uses packet-fields:acl-ipv4-header-fields;
+ }
+ case ace-ipv6 {
+ uses packet-fields:acl-ipv6-header-fields;
+ }
+ }
+ }
+ case ace-eth {
+ uses packet-fields:acl-eth-header-fields;
+ }
+ }
+ uses packet-fields:metadata;
+ }
+
+ container actions {
+ description "Define action criteria";
+ choice packet-handling {
+ default deny;
+
+ description "Packet handling action.";
+ case deny {
+ leaf deny {
+ type empty;
+ description "Deny action.";
+ }
+ }
+ case permit {
+ leaf permit {
+ type empty;
+ description "Permit action.";
+ }
+ }
+ }
+ }
+
+ container ace-oper-data {
+ config false;
+
+ description "Per ace operational data";
+ leaf match-counter {
+ type ietf:counter64;
+ description "Number of matches for an ace";
+ }
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+module netvirt-sfc-acl {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:netvirt:sfc:acl";
+ prefix "acl";
+
+ import ietf-acl { prefix ietf-acl;}
+ import yang-ext { prefix ext; }
+
+ revision "2015-01-05" {
+ description "Initial revision of netvirt extensions to ietf-acl model";
+ }
+
+ // TODO: Add choice for Neutron and add fields there instead of at the root of matches
+ augment "/ietf-acl:access-lists/ietf-acl:access-list/ietf-acl:access-list-entries" +
+ "/ietf-acl:access-list-entry/ietf-acl:matches" {
+ description "Neutron network uuid";
+ leaf network-uuid {
+ type string;
+ }
+ }
+}
--- /dev/null
+module netvirt-sfc-classifier {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:netvirt:sfc:classifier";
+ prefix "classifier";
+
+ import ietf-inet-types { prefix inet;}
+ import ietf-yang-types { prefix yang;}
+
+ revision "2015-01-05" {
+ description "Initial revision of netvirt classifier model";
+ }
+
+ container classifiers {
+ description "Classifier container which represents the ACL being applied,
+ atachment point and the associated chain";
+
+ list classifier {
+ description "A list of SFC classifiers";
+ key "name";
+ leaf name {
+ type string;
+ }
+ leaf acl {
+ type string;
+ }
+ container sffs {
+ list sff {
+ description "The classifier will be attached to these SFFs";
+ key "name";
+ leaf name {
+ type string;
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+module netvirt-sfc {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:netvirt:sfc";
+ prefix "netvirt-sfc";
+
+ revision "2015-01-05" {
+ description "Initial revision of the netvirt sfc model";
+ }
+
+ container sfc {
+ leaf name {
+ type string;
+ }
+ }
+}
--- /dev/null
+module packet-fields {
+ yang-version 1;
+
+ namespace "urn:ietf:params:xml:ns:yang:packet-fields";
+
+ prefix packet-fields;
+
+ import ietf-inet-types {
+ prefix "inet";
+ }
+
+ import ietf-yang-types {
+ prefix "yang";
+ }
+
+ revision 2014-06-25 {
+ description "Initial version of packet fields used by access-lists";
+ }
+
+ grouping acl-transport-header-fields {
+ description "Transport header fields";
+
+ container source-port-range {
+ description "inclusive range of source ports";
+ leaf lower-port {
+ mandatory true;
+ type inet:port-number;
+ }
+ leaf upper-port {
+ type inet:port-number;
+ }
+ }
+
+ container destination-port-range {
+ description "inclusive range of destination ports";
+ leaf lower-port {
+ mandatory true;
+ type inet:port-number;
+ }
+ leaf upper-port {
+ type inet:port-number;
+ }
+ }
+ }
+
+ grouping acl-ip-header-fields {
+ description "Header fields common to ipv4 and ipv6";
+
+ uses acl-transport-header-fields;
+
+ leaf dscp {
+ type inet:dscp;
+ }
+
+ leaf ip-protocol {
+ type uint8;
+ }
+
+ }
+
+ grouping acl-ipv4-header-fields {
+ description "fields in IPv4 header";
+
+ leaf destination-ipv4-address {
+ type inet:ipv4-prefix;
+ }
+
+ leaf source-ipv4-address {
+ type inet:ipv4-prefix;
+ }
+
+ }
+
+ grouping acl-ipv6-header-fields {
+ description "fields in IPv6 header";
+
+ leaf destination-ipv6-address {
+ type inet:ipv6-prefix;
+ }
+
+ leaf source-ipv6-address {
+ type inet:ipv6-prefix;
+ }
+
+ leaf flow-label {
+ type inet:ipv6-flow-label;
+ }
+
+ }
+
+ grouping acl-eth-header-fields {
+ description "fields in ethernet header";
+
+ leaf destination-mac-address {
+ type yang:mac-address;
+ }
+
+ leaf destination-mac-address-mask {
+ type yang:mac-address;
+ }
+
+ leaf source-mac-address {
+ type yang:mac-address;
+ }
+
+ leaf source-mac-address-mask {
+ type yang:mac-address;
+ }
+ }
+
+ grouping timerange {
+ description "Define time range entries to restrict
+ the access. The time range is identified by a name
+ and then referenced by a function, so that those
+ time restrictions are imposed on the function itself.";
+
+ container absolute {
+ description
+ "Absolute time and date that
+ the associated function starts
+ going into effect.";
+
+ leaf start {
+ type yang:date-and-time;
+ description
+ "Start time and date";
+ }
+ leaf end {
+ type yang:date-and-time;
+ description "Absolute end time and date";
+ }
+ leaf active {
+ type boolean;
+ default "true";
+ description
+ "Specify the associated function
+ active or inactive state when
+ starts going into effect";
+ }
+ } // container absolute
+ } //grouping timerange
+
+ grouping metadata {
+ description "Fields associated with a packet but not in the header";
+
+ leaf input-interface {
+ description "Packet was received on this interface";
+ type string;
+ }
+ uses timerange;
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-artifacts</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-features</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>features-parent</artifactId>
+ <version>1.6.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-features</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <name>${project.artifactId}</name>
+ <modelVersion>4.0.0</modelVersion>
+ <prerequisites>
+ <maven>3.1.1</maven>
+ </prerequisites>
+ <properties>
+ <mdsal.model.version>0.8.0-SNAPSHOT</mdsal.model.version>
+ <mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
+ <restconf.version>1.3.0-SNAPSHOT</restconf.version>
+ <yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
+ <dlux.version>0.3.0-SNAPSHOT</dlux.version>
+ <configfile.directory>etc/opendaylight/karaf</configfile.directory>
+ </properties>
+ <dependencyManagement>
+ <dependencies>
+ <!-- project specific dependencies -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>${mdsal.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.netconf</groupId>
+ <artifactId>restconf-artifacts</artifactId>
+ <version>${restconf.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <classifier>features</classifier>
+ <version>${yangtools.version}</version>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>features-mdsal-model</artifactId>
+ <version>${mdsal.model.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-mdsal</artifactId>
+ <classifier>features</classifier>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.netconf</groupId>
+ <artifactId>features-restconf</artifactId>
+ <classifier>features</classifier>
+ <version>${restconf.version}</version>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.dlux</groupId>
+ <artifactId>features-dlux</artifactId>
+ <classifier>features</classifier>
+ <version>${dlux.version}</version>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-impl</artifactId>
+ <version>${project.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ </dependencies>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<features name="odl-ovsdb-sfc-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.mdsal.model/features-mdsal-model/${mdsal.model.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.netconf/features-restconf/${restconf.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.dlux/features-dlux/${dlux.version}/xml/features</repository>
+ <feature name='odl-ovsdb-sfc-api' version='${project.version}' description='OpenDaylight :: ovsdb-sfc :: api'>
+ <feature version='${mdsal.model.version}'>odl-mdsal-models</feature>
+ <bundle>mvn:org.opendaylight.ovsdb/openstack.net-virt-sfc-api/${project.version}</bundle>
+ </feature>
+ <feature name='odl-ovsdb-sfc' version='${project.version}' description='OpenDaylight :: ovsdb-sfc'>
+ <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-ovsdb-sfc-api</feature>
+ <bundle>mvn:org.opendaylight.ovsdb/openstack.net-virt-sfc-impl/${project.version}</bundle>
+ <configfile finalname="${configfile.directory}/openstack.net-virt-sfc.xml">mvn:org.opendaylight.ovsdb/openstack.net-virt-sfc-impl/${project.version}/xml/config</configfile>
+ </feature>
+ <feature name='odl-ovsdb-sfc-rest' version='${project.version}' description='OpenDaylight :: ovsdb-sfc :: REST'>
+ <feature version="${project.version}">odl-ovsdb-sfc</feature>
+ <feature version="${restconf.version}">odl-restconf</feature>
+ </feature>
+ <feature name='odl-ovsdb-sfc-ui' version='${project.version}' description='OpenDaylight :: ovsdb-sfc :: UI'>
+ <feature version="${project.version}">odl-ovsdb-sfc-rest</feature>
+ <feature version="${restconf.version}">odl-mdsal-apidocs</feature>
+ <feature version="${mdsal.version}">odl-mdsal-xsql</feature>
+ <feature version="${dlux.version}">odl-dlux-yangui</feature>
+ </feature>
+
+</features>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-parent</artifactId>
+ <version>0.4.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-impl</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <!-- Testing Dependencies -->
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:netvirt:sfc?module=netvirt-sfc&revision=2014-12-10</capability>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
+ </required-capabilities>
+ <configuration>
+
+ <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+ <module>
+ <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:netvirt:sfc">prefix:netvirt-sfc</type>
+ <name>netvirt-sfc-default</name>
+ <broker>
+ <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
+ <name>binding-osgi-broker</name>
+ </broker>
+ </module>
+ </modules>
+ </data>
+ </configuration>
+</snapshot>
--- /dev/null
+/*
+ * Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.ovsdb.openstack.netvirt.sfc;
+
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
+import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NetvirtSfcProvider implements BindingAwareProvider, AutoCloseable {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NetvirtSfcProvider.class);
+
+ @Override
+ public void onSessionInitiated(ProviderContext session) {
+ LOG.info("NetvirtSfcProvider Session Initiated");
+ }
+
+ @Override
+ public void close() throws Exception {
+ LOG.info("NetvirtSfcProvider Closed");
+ }
+
+}
--- /dev/null
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210;
+
+import org.opendaylight.ovsdb.openstack.netvirt.sfc.NetvirtSfcProvider;
+
+public class NetvirtSfcModule extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210.AbstractNetvirtSfcModule {
+ public NetvirtSfcModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+ super(identifier, dependencyResolver);
+ }
+
+ public NetvirtSfcModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210.NetvirtSfcModule oldModule, java.lang.AutoCloseable oldInstance) {
+ super(identifier, dependencyResolver, oldModule, oldInstance);
+ }
+
+ @Override
+ public void customValidation() {
+ // add custom validation form module attributes here.
+ }
+
+ @Override
+ public java.lang.AutoCloseable createInstance() {
+ NetvirtSfcProvider provider = new NetvirtSfcProvider();
+ getBrokerDependency().registerProvider(provider);
+ return provider;
+ }
+
+}
--- /dev/null
+/*
+* Generated file
+*
+* Generated from: yang module name: netvirt-sfc yang module local name: netvirt-sfc
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Wed Sep 23 15:18:24 EDT 2015
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210;
+public class NetvirtSfcModuleFactory extends org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210.AbstractNetvirtSfcModuleFactory {
+
+}
--- /dev/null
+module netvirt-sfc {
+ yang-version 1;
+ namespace "urn:opendaylight:params:xml:ns:yang:netvirt:sfc";
+ prefix "netvirt-sfc";
+
+ import config { prefix config; revision-date 2013-04-05; }
+ import opendaylight-md-sal-binding { prefix md-sal-binding; revision-date 2013-10-28;}
+
+ description
+ "Service definition for netvirt sfc project";
+
+ revision "2014-12-10" {
+ description
+ "Initial revision";
+ }
+
+ identity netvirt-sfc {
+ base config:module-type;
+ config:java-name-prefix NetvirtSfc;
+ }
+
+ augment "/config:modules/config:module/config:configuration" {
+ case netvirt-sfc {
+ when "/config:modules/config:module/config:type = 'netvirt-sfc'";
+ container broker {
+ uses config:service-ref {
+ refine type {
+ mandatory true;
+ config:required-identity md-sal-binding:binding-broker-osgi-registry;
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210;
+
+import org.junit.Test;
+
+public class NetvirtSfcModuleFactoryTest {
+ @Test
+ public void testFactoryConstructor() {
+ // ensure no exceptions on construction
+ new NetvirtSfcModuleFactory();
+ }
+}
--- /dev/null
+/*
+ * Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.netvirt.sfc.rev141210;
+
+import org.junit.Test;
+import org.opendaylight.controller.config.api.DependencyResolver;
+import org.opendaylight.controller.config.api.JmxAttribute;
+import org.opendaylight.controller.config.api.ModuleIdentifier;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.ovsdb.openstack.netvirt.sfc.NetvirtSfcProvider;
+
+import javax.management.ObjectName;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class NetvirtSfcModuleTest {
+ @Test
+ public void testCustomValidation() {
+ NetvirtSfcModule module = new NetvirtSfcModule(mock(ModuleIdentifier.class), mock(DependencyResolver.class));
+
+ // ensure no exceptions on validation
+ // currently this method is empty
+ module.customValidation();
+ }
+
+ @Test
+ public void testCreateInstance() throws Exception {
+ // configure mocks
+ DependencyResolver dependencyResolver = mock(DependencyResolver.class);
+ BindingAwareBroker broker = mock(BindingAwareBroker.class);
+ when(dependencyResolver.resolveInstance(eq(BindingAwareBroker.class), any(ObjectName.class), any(JmxAttribute.class))).thenReturn(broker);
+
+ // create instance of module with injected mocks
+ NetvirtSfcModule module = new NetvirtSfcModule(mock(ModuleIdentifier.class), dependencyResolver);
+
+ // getInstance calls resolveInstance to get the broker dependency and then calls createInstance
+ AutoCloseable closeable = module.getInstance();
+
+ // verify that the module registered the returned provider with the broker
+ verify(broker).registerProvider((NetvirtSfcProvider)closeable);
+
+ // ensure no exceptions on close
+ closeable.close();
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>mdsal-it-parent</artifactId>
+ <version>1.3.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-it</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+
+ <properties>
+ <skipITs>false</skipITs>
+ <karaf.distro.groupId>org.opendaylight.ovsdb</karaf.distro.groupId>
+ <karaf.distro.artifactId>openstack.net-virt-sfc-karaf</karaf.distro.artifactId>
+ <karaf.distro.version>1.0.0-SNAPSHOT</karaf.distro.version>
+ <karaf.distro.type>zip</karaf.distro.type>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-features</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ </dependencies>
+
+</project>
--- /dev/null
+/*
+ * Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.ovsdb.openstack.netvirt.sfc;
+
+import static org.ops4j.pax.exam.CoreOptions.composite;
+import static org.ops4j.pax.exam.CoreOptions.maven;
+import static org.ops4j.pax.exam.karaf.options.KarafDistributionOption.editConfigurationFilePut;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.opendaylight.controller.mdsal.it.base.AbstractMdsalTestBase;
+import org.ops4j.pax.exam.Option;
+import org.ops4j.pax.exam.junit.PaxExam;
+import org.ops4j.pax.exam.karaf.options.LogLevelOption.LogLevel;
+import org.ops4j.pax.exam.options.MavenUrlReference;
+import org.ops4j.pax.exam.spi.reactors.ExamReactorStrategy;
+import org.ops4j.pax.exam.spi.reactors.PerClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@RunWith(PaxExam.class)
+@ExamReactorStrategy(PerClass.class)
+public class NetvirtSfcIT extends AbstractMdsalTestBase {
+ private static final Logger LOG = LoggerFactory.getLogger(NetvirtSfcIT.class);
+
+ @Override
+ public String getModuleName() {
+ return "netvirt-sfc";
+ }
+
+ @Override
+ public String getInstanceName() {
+ return "netvirt-sfc-default";
+ }
+
+ @Override
+ public MavenUrlReference getFeatureRepo() {
+ return maven()
+ .groupId("org.opendaylight.ovsdb")
+ .artifactId("openstack.net-virt-sfc-features")
+ .classifier("features")
+ .type("xml")
+ .versionAsInProject();
+ }
+
+ @Override
+ public String getFeatureName() {
+ return "odl-ovsdb-sfc-ui";
+ }
+
+ @Override
+ public Option getLoggingOption() {
+ Option option = editConfigurationFilePut(ORG_OPS4J_PAX_LOGGING_CFG,
+ logConfiguration(NetvirtSfcIT.class),
+ LogLevel.INFO.name());
+ option = composite(option, super.getLoggingOption());
+ return option;
+ }
+
+ @Test
+ public void testnetvirtsfcFeatureLoad() {
+ Assert.assertTrue(true);
+ }
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf-parent</artifactId>
+ <version>1.6.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-karaf</artifactId>
+ <version>1.0.0-SNAPSHOT</version>
+ <name>${project.artifactId}</name>
+ <prerequisites>
+ <maven>3.1.1</maven>
+ </prerequisites>
+ <properties>
+ <karaf.localFeature>odl-ovsdb-sfc-ui</karaf.localFeature>
+ </properties>
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-artifacts</artifactId>
+ <version>${project.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+ <dependencies>
+ <dependency>
+ <!-- scope is compile so all features (there is only one) are installed
+ into startup.properties and the feature repo itself is not installed -->
+ <groupId>org.apache.karaf.features</groupId>
+ <artifactId>framework</artifactId>
+ <type>kar</type>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>openstack.net-virt-sfc-features</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ </dependencies>
+ <!-- DO NOT install or deploy the karaf artifact -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright © 2015 Red Hat, Inc. and others. All rights reserved.
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <parent>
+ <groupId>org.opendaylight.odlparent</groupId>
+ <artifactId>odlparent</artifactId>
+ <version>1.6.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>openstack.net-virt-sfc-aggregator</artifactId>
+ <version>1.2.1-SNAPSHOT</version>
+ <packaging>pom</packaging>
+ <modelVersion>4.0.0</modelVersion>
+ <prerequisites>
+ <maven>3.1.1</maven>
+ </prerequisites>
+ <modules>
+ <module>api</module>
+ <module>impl</module>
+ <module>karaf</module>
+ <module>features</module>
+ <module>artifacts</module>
+ <module>it</module>
+ </modules>
+ <!-- DO NOT install or deploy the repo root pom as it's only needed to initiate a build -->
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-deploy-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-install-plugin</artifactId>
+ <configuration>
+ <skip>true</skip>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
+ </scm>
+</project>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>openstack.net-virt</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<properties>
<networkconfig.neutron.version>0.6.0-SNAPSHOT</networkconfig.neutron.version>
- <ovsdb.utils.config.version>1.2.0-SNAPSHOT</ovsdb.utils.config.version>
- <ovsdb.utils.servicehelper.version>1.2.0-SNAPSHOT</ovsdb.utils.servicehelper.version>
+ <ovsdb.utils.config.version>1.2.1-SNAPSHOT</ovsdb.utils.config.version>
+ <ovsdb.utils.servicehelper.version>1.2.1-SNAPSHOT</ovsdb.utils.servicehelper.version>
<powermock.version>1.5.2</powermock.version>
<sonar-jacoco-listeners.version>2.4</sonar-jacoco-listeners.version>
<root.directory>${env.PWD}</root.directory>
<sonar.jacoco.itReportPath>${root.directory}/target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
</properties>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>2.0.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>mdsal-model-artifacts</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
<dependencies>
<dependency>
<groupId>org.apache.commons</groupId>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>southbound-api</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-topology</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>opendaylight-l2-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-yang-types-20130715</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-yang-types</artifactId>
</dependency>
<dependency>
}
public static InstanceIdentifier<Topology> createInstanceIdentifier() {
- InstanceIdentifier<Topology> path = InstanceIdentifier
+ return InstanceIdentifier
.create(NetworkTopology.class)
.child(Topology.class, new TopologyKey(OVSDB_TOPOLOGY_ID));
- return path;
}
public static InstanceIdentifier<Node> createInstanceIdentifier(NodeId nodeId) {
- InstanceIdentifier<Node> nodePath = InstanceIdentifier
+ return InstanceIdentifier
.create(NetworkTopology.class)
.child(Topology.class, new TopologyKey(OVSDB_TOPOLOGY_ID))
.child(Node.class,new NodeKey(nodeId));
- return nodePath;
}
public static InstanceIdentifier<Node> createInstanceIdentifier(NodeKey ovsdbNodeKey, String bridgeName) {
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
+
package org.opendaylight.ovsdb.openstack.netvirt.api;
import java.util.List;
import org.opendaylight.neutron.spi.Neutron_IPs;
/**
- * This interface allows egress Port Security flows to be written to devices
+ * This interface allows egress Port Security flows to be written to devices.
*/
public interface EgressAclProvider {
* @param attachedMac the attached mac
* @param localPort the local port
* @param securityGroup the security group
+ * @param srcAddressList the src address associated with the vm port
+ * @param write is this flow write or delete
*/
- void programPortSecurityACL(Long dpid, String segmentationId, String attachedMac,
- long localPort, NeutronSecurityGroup securityGroup);
+ void programPortSecurityAcl(Long dpid, String segmentationId, String attachedMac,
+ long localPort, NeutronSecurityGroup securityGroup,
+ List<Neutron_IPs> srcAddressList, boolean write);
/**
* Program fixed egress ACL rules that will be associated with the VM port when a vm is spawned.
*
* @param isComputePort indicates whether this port is a compute port or not
* @param write is this flow writing or deleting
*/
- void programFixedSecurityACL(Long dpid, String segmentationId, String attachedMac,
- long localPort, List<Neutron_IPs> srcAddressList, boolean isLastPortinBridge, boolean isComputePort, boolean write);
+ void programFixedSecurityAcl(Long dpid, String segmentationId,String attachedMac, long localPort,
+ List<Neutron_IPs> srcAddressList, boolean isLastPortinBridge,
+ boolean isComputePort, boolean write);
}
+
package org.opendaylight.ovsdb.openstack.netvirt.api;
+import java.util.List;
+
import org.opendaylight.neutron.spi.NeutronSecurityGroup;
+import org.opendaylight.neutron.spi.Neutron_IPs;
/**
- * This interface allows ingress Port Security flows to be written to devices
+ * This interface allows ingress Port Security flows to be written to devices.
*/
public interface IngressAclProvider {
* @param attachedMac the attached mac
* @param localPort the local port
* @param securityGroup the security group
+ * @param srcAddressList the src address associated with the vm port
+ * @param write is this flow write or delete
*/
- void programPortSecurityACL(Long dpid, String segmentationId, String attachedMac,
- long localPort, NeutronSecurityGroup securityGroup);
+ void programPortSecurityAcl(Long dpid, String segmentationId, String attachedMac,
+ long localPort, NeutronSecurityGroup securityGroup,
+ List<Neutron_IPs> srcAddressList, boolean write);
/**
* Program fixed ingress ACL rules that will be associated with the VM port when a vm is spawned.
* *
* @param isComputePort indicates whether this port is a compute port or not
* @param write is this flow writing or deleting
*/
- void programFixedSecurityACL(Long dpid, String segmentationId,
- String attachedMac, long localPort, boolean isLastPortinSubnet, boolean isComputePort, boolean write);
+ void programFixedSecurityAcl(Long dpid, String segmentationId, String attachedMac, long localPort,
+ boolean isLastPortinSubnet, boolean isComputePort, boolean write);
}
+
import org.opendaylight.neutron.spi.NeutronPort;
import org.opendaylight.neutron.spi.NeutronSecurityGroup;
import org.opendaylight.neutron.spi.Neutron_IPs;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.*;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbTerminationPointAugmentation;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
/**
- * Open vSwitch isolates Tenant Networks using VLANs on the Integration Bridge
+ * Open vSwitch isolates Tenant Networks using VLANs on the Integration Bridge.
* This class manages the provisioning of these VLANs
*/
public interface SecurityServicesManager {
* Gets security group in port.
*
* @param intf the intf
- * @return the security group in port
+ * @return the list of security group in port, returns empty list if no group associated.
*/
- NeutronSecurityGroup getSecurityGroupInPort(OvsdbTerminationPointAugmentation intf);
- /**
+ List<NeutronSecurityGroup> getSecurityGroupInPortList(OvsdbTerminationPointAugmentation intf);
+ /**
* Gets the DHCP server port corresponding to a network.
*
* @param intf the intf
- * @return the security group in port
+ * @return the dhcp server port
*/
- NeutronPort getDHCPServerPort(OvsdbTerminationPointAugmentation intf);
+ NeutronPort getDhcpServerPort(OvsdbTerminationPointAugmentation intf);
/**
* Is the port a compute port.
*
* @param intf the intf
- * @return the security group in port
+ * @return whether it is a compute port or not
*/
boolean isComputePort(OvsdbTerminationPointAugmentation intf);
/**
* Is this the last port in the subnet to which interface belongs to.
- *
+ * @param node The node to which the intf is connected.
* @param intf the intf
- * @return the security group in port
+ * @return whether last port in the subnet
*/
boolean isLastPortinSubnet(Node node, OvsdbTerminationPointAugmentation intf);
/**
* Is this the last port in the bridge to which interface belongs to.
- *
+ * @param node The node to which the intf is connected.
* @param intf the intf
- * @return the security group in port
+ * @return whether last port in bridge
*/
boolean isLastPortinBridge(Node node, OvsdbTerminationPointAugmentation intf);
/**
- * Returns the list of ip adddress assigned to the interface.
- *
+ * Returns the list of ip address assigned to the interface.
+ * @param node The node to which the intf is connected.
* @param intf the intf
- * @return the security group in port
+ * @return the list of ip address associated with the vm
+ */
+ List<Neutron_IPs> getIpAddressList(Node node, OvsdbTerminationPointAugmentation intf);
+ /**
+ * Get the list of vm belonging to a security group.
+ * @param srcAddressList the address list of the connected vm.
+ * @param securityGroupUuid the UUID of the remote security group.
+ * @return the list of all vm belonging to the security group UUID passed.
*/
- List<Neutron_IPs> getIpAddress(Node node, OvsdbTerminationPointAugmentation intf);
-}
\ No newline at end of file
+ List<Neutron_IPs> getVmListForSecurityGroup(List<Neutron_IPs> srcAddressList,
+ String securityGroupUuid);
+}
import java.util.List;
import java.util.Map;
+
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbBridgeAugmentation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbNodeAugmentation;
Long getOFPort(OvsdbTerminationPointAugmentation port);
Long getOFPort(Node bridgeNode, String portName);
DataBroker getDatabroker();
+ OvsdbBridgeAugmentation getBridgeFromConfig(Node ovsdbNode, String bridgeName);
}
Preconditions.checkNotNull(portNameExt);
if (southbound.isBridgeOnOvsdbNode(ovsdbNode, brExt)) {
- //this would look better if used a method like isNetworkPatchCreated()
- if (isPortOnBridge(bridgeNode, portNameInt)) {
- Node extBridgeNode = southbound.readBridgeNode(ovsdbNode, brExt);
- if (isPortOnBridge(extBridgeNode, portNameExt)) {
- ready = true;
- } else {
- LOG.trace("isNodeL3Ready: node: {}, {} missing",
- bridgeNode, portNameExt);
- }
- } else {
- LOG.trace("isNodeL3Ready: node: {}, {} missing",
- bridgeNode, portNameInt);
- }
+ ready = isNetworkPatchCreated(bridgeNode, southbound.readBridgeNode(ovsdbNode, brExt));
} else {
LOG.trace("isNodeL3Ready: node: {}, {} missing",
bridgeNode, brExt);
/**
* Returns true if a patch port exists between the Integration Bridge and Network Bridge
*/
- private boolean isNetworkPatchCreated(Node node, Node intBridge, Node netBridge) {
+ private boolean isNetworkPatchCreated(Node intBridge, Node netBridge) {
Preconditions.checkNotNull(configurationService);
boolean isPatchCreated = false;
/**
* Creates the Integration Bridge
*/
- private boolean createIntegrationBridge(Node ovsdbNode) throws Exception {
+ private boolean createIntegrationBridge(Node ovsdbNode) {
Preconditions.checkNotNull(configurationService);
if (!addBridge(ovsdbNode, configurationService.getIntegrationBridgeName())) {
return true;
}
- private boolean createExternalBridge(Node ovsdbNode) throws Exception {
+ private boolean createExternalBridge(Node ovsdbNode) {
Preconditions.checkNotNull(configurationService);
if (!addBridge(ovsdbNode, configurationService.getExternalBridgeName())) {
Interface br-int
type: internal
*/
- private boolean createBridges(Node bridgeNode, Node ovsdbNode, NeutronNetwork network) throws Exception {
+ private boolean createBridges(Node bridgeNode, Node ovsdbNode, NeutronNetwork network) {
Preconditions.checkNotNull(configurationService);
Preconditions.checkNotNull(networkingProviderManager);
/**
* Add a Port to a Bridge
*/
- private boolean addPortToBridge (Node node, String bridgeName, String portName) throws Exception {
+ private boolean addPortToBridge (Node node, String bridgeName, String portName) {
boolean rv = true;
if (southbound.extractTerminationPointAugmentation(node, portName) == null) {
/**
* Add a Patch Port to a Bridge
*/
- private boolean addPatchPort (Node node, String bridgeName, String portName, String peerPortName) throws Exception {
+ private boolean addPatchPort (Node node, String bridgeName, String portName, String peerPortName) {
boolean rv = true;
if (southbound.extractTerminationPointAugmentation(node, portName) == null) {
/**
* Add Bridge to a Node
*/
- private boolean addBridge(Node ovsdbNode, String bridgeName) throws Exception {
+ private boolean addBridge(Node ovsdbNode, String bridgeName) {
boolean rv = true;
- if (!southbound.isBridgeOnOvsdbNode(ovsdbNode, bridgeName)) {
+ if ((!southbound.isBridgeOnOvsdbNode(ovsdbNode, bridgeName)) ||
+ (southbound.getBridgeFromConfig(ovsdbNode, bridgeName) == null)) {
rv = southbound.addBridge(ovsdbNode, bridgeName, getControllerTarget(ovsdbNode));
}
return rv;
}
private String getControllerIPAddress() {
- InetAddress controllerIP = null;
-
String addressString = ConfigProperties.getProperty(this.getClass(), "ovsdb.controller.address");
if (addressString != null) {
try {
- controllerIP = InetAddress.getByName(addressString);
- if (controllerIP != null) {
+ if (InetAddress.getByName(addressString) != null) {
return addressString;
}
} catch (UnknownHostException e) {
addressString = ConfigProperties.getProperty(this.getClass(), "of.address");
if (addressString != null) {
try {
- controllerIP = InetAddress.getByName(addressString);
- if (controllerIP != null) {
+ if (InetAddress.getByName(addressString) != null) {
return addressString;
}
} catch (UnknownHostException e) {
private volatile GatewayMacResolver gatewayMacResolver;
private class FloatIpData {
- private final Long dpid; // br-int of node where floating ip is associated with tenant port
- private final Long ofPort; // patch port in br-int used to reach br-ex
- private final String segId; // segmentation id of the net where fixed ip is instantiated
- private final String macAddress; // mac address assigned to neutron port of floating ip
+ // br-int of node where floating ip is associated with tenant port
+ private final Long dpid;
+ // patch port in br-int used to reach br-ex
+ private final Long ofPort;
+ // segmentation id of the net where fixed ip is instantiated
+ private final String segId;
+ // mac address assigned to neutron port of floating ip
+ private final String macAddress;
private final String floatingIpAddress;
- private final String fixedIpAddress; // ip address given to tenant vm
+ // ip address given to tenant vm
+ private final String fixedIpAddress;
private final String neutronRouterMac;
FloatIpData(final Long dpid, final Long ofPort, final String segId, final String macAddress,
}else{
NeutronNetwork externalNetwork = neutronNetworkCache.getNetwork(neutronPort.getNetworkUUID());
- if(externalNetwork != null){
- if(externalNetwork.isRouterExternal()){
- final NeutronSubnet externalSubnet = getExternalNetworkSubnet(neutronPort);
- // TODO support IPv6
- if (externalSubnet != null &&
+ if (externalNetwork != null && externalNetwork.isRouterExternal()) {
+ final NeutronSubnet externalSubnet = getExternalNetworkSubnet(neutronPort);
+ // TODO support IPv6
+ if (externalSubnet != null &&
externalSubnet.getIpVersion() == 4) {
- gatewayMacResolver.stopPeriodicRefresh(new Ipv4Address(externalSubnet.getGatewayIP()));
- }
+ gatewayMacResolver.stopPeriodicRefresh(new Ipv4Address(externalSubnet.getGatewayIP()));
}
}
}
for (Neutron_IPs neutronIP : neutronPort.getFixedIPs()) {
NeutronRouter_Interface neutronRouterInterface =
new NeutronRouter_Interface(neutronIP.getSubnetUUID(), neutronPort.getPortUUID());
- neutronRouterInterface.setID(neutronIP.getSubnetUUID()); // id of router interface to be same as subnet
+ // id of router interface to be same as subnet
+ neutronRouterInterface.setID(neutronIP.getSubnetUUID());
neutronRouterInterface.setTenantID(neutronPort.getTenantID());
this.handleNeutronRouterInterfaceEvent(null /*neutronRouter*/, neutronRouterInterface, action);
// need to do this check here because a router interface is not added to a node until tenant becomes needed
// there.
//
- if (!isDelete) {
- if (neutronPort.getFixedIPs() != null) {
- for (Neutron_IPs neutronIP : neutronPort.getFixedIPs()) {
- NeutronRouter_Interface neutronRouterInterface =
+ if (!isDelete && neutronPort.getFixedIPs() != null) {
+ for (Neutron_IPs neutronIP : neutronPort.getFixedIPs()) {
+ NeutronRouter_Interface neutronRouterInterface =
subnetIdToRouterInterfaceCache.get(neutronIP.getSubnetUUID());
- if (neutronRouterInterface != null) {
- this.handleNeutronRouterInterfaceEvent(null /*neutronRouter*/, neutronRouterInterface, action);
- }
- }
- }
- }
+ if (neutronRouterInterface != null) {
+ this.handleNeutronRouterInterfaceEvent(null /*neutronRouter*/, neutronRouterInterface, action);
+ }
+ }
+ }
this.updateL3ForNeutronPort(neutronPort, isDelete);
}
}
// this.programFlowsForFloatingIP(neutronFloatingIP, action == Action.DELETE);
if (action != Action.DELETE) {
- programFlowsForFloatingIPArpAdd(neutronFloatingIP); // must be first, as it updates floatIpDataMapCache
+ // must be first, as it updates floatIpDataMapCache
+ programFlowsForFloatingIPArpAdd(neutronFloatingIP);
programFlowsForFloatingIPInbound(neutronFloatingIP, Action.ADD);
programFlowsForFloatingIPOutbound(neutronFloatingIP, Action.ADD);
programFlowsForFloatingIPOutbound(neutronFloatingIP, Action.DELETE);
programFlowsForFloatingIPInbound(neutronFloatingIP, Action.DELETE);
- programFlowsForFloatingIPArpDelete(neutronFloatingIP.getID()); // must be last, as it updates floatIpDataMapCache
+ // must be last, as it updates floatIpDataMapCache
+ programFlowsForFloatingIPArpDelete(neutronFloatingIP.getID());
}
}
// will look at desired action for node.
final String cacheKey = node.getNodeId().getValue() + ":" + providerSegmentationId + ":" + ipStr;
- final Boolean isProgrammed = l3ForwardingCache.contains(cacheKey);
+ final boolean isProgrammed = l3ForwardingCache.contains(cacheKey);
- if (actionForNode == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (actionForNode == Action.DELETE && !isProgrammed) {
LOG.trace("programL3ForwardingStage1 for node {} providerId {} mac {} ip {} action {} is already done",
node.getNodeId().getValue(), providerSegmentationId, macAddress, ipStr, actionForNode);
return;
}
- if (actionForNode == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (actionForNode == Action.ADD && isProgrammed) {
LOG.trace("programL3ForwardingStage1 for node {} providerId {} mac {} ip {} action {} is already done",
node.getNodeId().getValue(), providerSegmentationId, macAddress, ipStr, actionForNode);
return;
final String cacheKey = node.getNodeId().getValue() + ":" +
sourceSegmentationId + ":" + destinationSegmentationId + ":" +
ipStr + "/" + Integer.toString(mask);
- final Boolean isProgrammed = routerInterfacesCache.contains(cacheKey);
+ final boolean isProgrammed = routerInterfacesCache.contains(cacheKey);
- if (actionForNode == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (actionForNode == Action.DELETE && !isProgrammed) {
LOG.trace("programRouterInterfaceStage1 for node {} sourceSegId {} destSegId {} mac {} ip {} mask {}" +
" action {} is already done",
node.getNodeId().getValue(), sourceSegmentationId, destinationSegmentationId,
macAddress, ipStr, mask, actionForNode);
return;
}
- if (actionForNode == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (actionForNode == Action.ADD && isProgrammed) {
LOG.trace("programRouterInterfaceStage1 for node {} sourceSegId {} destSegId {} mac {} ip {} mask {}" +
" action {} is already done",
node.getNodeId().getValue(), sourceSegmentationId, destinationSegmentationId,
// will look at desired action for node.
//
final String cacheKey = dpid + ":" + segOrOfPort + ":" + ipStr;
- final Boolean isProgrammed = staticArpEntryCache.contains(cacheKey);
+ final boolean isProgrammed = staticArpEntryCache.contains(cacheKey);
- if (action == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (action == Action.DELETE && !isProgrammed) {
LOG.trace("programStaticArpStage1 dpid {} segOrOfPort {} mac {} ip {} action {} is already done",
dpid, segOrOfPort, macAddress, ipStr, action);
return true;
}
- if (action == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (action == Action.ADD && isProgrammed) {
LOG.trace("programStaticArpStage1 dpid {} segOrOfPort {} mac {} ip {} action {} is already done",
dpid, segOrOfPort, macAddress, ipStr, action);
return true;
// will look at desired action for node.
//
final String cacheKey = dpid + ":" + inboundOFPort + ":" + providerSegmentationId + ":" + matchAddress;
- final Boolean isProgrammed = inboundIpRewriteCache.contains(cacheKey);
+ final boolean isProgrammed = inboundIpRewriteCache.contains(cacheKey);
- if (action == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (action == Action.DELETE && !isProgrammed) {
LOG.trace("programInboundIpRewriteStage1 dpid {} OFPort {} seg {} matchAddress {} rewriteAddress {}" +
" action {} is already done",
dpid, inboundOFPort, providerSegmentationId, matchAddress, rewriteAddress, action);
return true;
}
- if (action == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (action == Action.ADD && isProgrammed) {
LOG.trace("programInboundIpRewriteStage1 dpid {} OFPort {} seg {} matchAddress {} rewriteAddress {}" +
" action is already done",
dpid, inboundOFPort, providerSegmentationId, matchAddress, rewriteAddress, action);
// will look at desired action for node.
//
final String cacheKey = node.getNodeId().getValue() + ":" + providerSegmentationId + ":" + cidr;
- final Boolean isProgrammed = outboundIpRewriteExclusionCache.contains(cacheKey);
+ final boolean isProgrammed = outboundIpRewriteExclusionCache.contains(cacheKey);
- if (actionForRewriteExclusion == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (actionForRewriteExclusion == Action.DELETE && !isProgrammed) {
LOG.trace("programIpRewriteExclusionStage1 node {} providerId {} cidr {} action {} is already done",
node.getNodeId().getValue(), providerSegmentationId, cidr, actionForRewriteExclusion);
return;
}
- if (actionForRewriteExclusion == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (actionForRewriteExclusion == Action.ADD && isProgrammed) {
LOG.trace("programIpRewriteExclusionStage1 node {} providerId {} cidr {} action {} is already done",
node.getNodeId().getValue(), providerSegmentationId, cidr, actionForRewriteExclusion);
return;
// will look at desired action for node.
//
final String cacheKey = fid.dpid + ":" + fid.segId + ":" + fid.fixedIpAddress;
- final Boolean isProgrammed = outboundIpRewriteCache.contains(cacheKey);
+ final boolean isProgrammed = outboundIpRewriteCache.contains(cacheKey);
- if (action == Action.DELETE && isProgrammed == Boolean.FALSE) {
+ if (action == Action.DELETE && !isProgrammed) {
LOG.trace("programOutboundIpRewriteStage1 dpid {} seg {} fixedIpAddress {} floatIp {} action {} " +
"is already done",
fid.dpid, fid.segId, fid.fixedIpAddress, fid.floatingIpAddress, action);
return;
}
- if (action == Action.ADD && isProgrammed == Boolean.TRUE) {
+ if (action == Action.ADD && isProgrammed) {
LOG.trace("programOutboundIpRewriteStage1 dpid {} seg {} fixedIpAddress {} floatIp {} action {} " +
"is already done",
fid.dpid, fid.segId, fid.fixedIpAddress, fid.floatingIpAddress, action);
for (Neutron_IPs neutronIPs : gatewayPort.getFixedIPs()) {
String subnetUUID = neutronIPs.getSubnetUUID();
NeutronSubnet extSubnet = neutronSubnetCache.getSubnet(subnetUUID);
- if (extSubnet.getGatewayIP() == null) {
- continue;
+ if (extSubnet != null && extSubnet.getGatewayIP() != null) {
+ return extSubnet;
}
- return extSubnet;
+ if (extSubnet == null) {
+ // TODO: when subnet is created, try again.
+ LOG.debug("subnet {} in not found", subnetUUID);
+ }
}
return null;
}
package org.opendaylight.ovsdb.openstack.netvirt.impl;
+import java.util.ArrayList;
import java.util.List;
import org.opendaylight.neutron.spi.INeutronPortCRUD;
import org.opendaylight.ovsdb.openstack.netvirt.api.SecurityServicesManager;
import org.opendaylight.ovsdb.openstack.netvirt.api.Southbound;
import org.opendaylight.ovsdb.utils.servicehelper.ServiceHelper;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.*;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ovsdb.rev150105.OvsdbTerminationPointAugmentation;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
import org.osgi.framework.BundleContext;
private volatile INeutronSubnetCRUD neutronSubnetCache;
private volatile Southbound southbound;
- /**
- * Is security group ready.
- *
- * @param terminationPointAugmentation the intf
- * @return the boolean
- */
+ @Override
public boolean isPortSecurityReady(OvsdbTerminationPointAugmentation terminationPointAugmentation) {
if (neutronPortCache == null) {
LOG.error("neutron port is null");
}
LOG.trace("isPortSecurityReady for {}", terminationPointAugmentation.getName());
String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
+ Constants.EXTERNAL_ID_INTERFACE_ID);
if (neutronPortId == null) {
return false;
}
List<NeutronSecurityGroup> securityGroups = neutronPort.getSecurityGroups();
if (securityGroups.isEmpty()) {
LOG.debug("Check for device: {} does not contain a Security Group for port: {}", deviceOwner,
- neutronPortId);
+ neutronPortId);
return false;
}
- LOG.debug("Security Group Check {} DOES contain a Neutron Security Group", neutronPortId);
+ LOG.debug("Security Group Check {} does contain a Neutron Security Group", neutronPortId);
return true;
}
- /**
- * Gets security group in port.
- *
- * @param terminationPointAugmentation the intf
- * @return the security group in port
- */
- public NeutronSecurityGroup getSecurityGroupInPort(OvsdbTerminationPointAugmentation terminationPointAugmentation) {
+ @Override
+ public List<NeutronSecurityGroup> getSecurityGroupInPortList(OvsdbTerminationPointAugmentation
+ terminationPointAugmentation) {
+ List<NeutronSecurityGroup> neutronSecurityGroups = new ArrayList<NeutronSecurityGroup>();
if (neutronPortCache == null) {
LOG.error("neutron port is null");
- return null;
+ return neutronSecurityGroups;
}
LOG.trace("isPortSecurityReady for {}", terminationPointAugmentation.getName());
String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
+ Constants.EXTERNAL_ID_INTERFACE_ID);
if (neutronPortId == null) {
- return null;
+ return neutronSecurityGroups;
}
NeutronPort neutronPort = neutronPortCache.getPort(neutronPortId);
if (neutronPort == null) {
- return null;
+ return neutronSecurityGroups;
}
+ neutronSecurityGroups = neutronPort.getSecurityGroups();
+ return neutronSecurityGroups;
- List<NeutronSecurityGroup> neutronSecurityGroups = neutronPort.getSecurityGroups();
- if (neutronSecurityGroups != null) {
- return (NeutronSecurityGroup) neutronSecurityGroups.toArray()[0];
- } else {
- return null;
- }
}
@Override
- public NeutronPort getDHCPServerPort(
- OvsdbTerminationPointAugmentation terminationPointAugmentation) {
+ public NeutronPort getDhcpServerPort(OvsdbTerminationPointAugmentation terminationPointAugmentation) {
if (neutronPortCache == null) {
LOG.error("getDHCPServerPort: neutron port is null");
return null;
}
LOG.trace("getDHCPServerPort for {}",
- terminationPointAugmentation.getName());
- String neutronPortId = southbound.getInterfaceExternalIdsValue(
- terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
- if (neutronPortId == null) {
- return null;
- }
- NeutronPort neutronPort = neutronPortCache.getPort(neutronPortId);
- if (neutronPort == null) {
- LOG.error("getDHCPServerPort: neutron port of {} is not found", neutronPortId);
- return null;
- }
- //Since all the fixed ip assigned to a port should be from the same network, first port is sufficient.
- List<Neutron_IPs> fixedIps = neutronPort.getFixedIPs();
- if(null==fixedIps || 0 == fixedIps.size() )
- {
- LOG.error("getDHCPServerPort: No fixed ip is assigned");
- return null;
- }
-
- String networkUUID = neutronPort.getNetworkUUID();
- for (NeutronPort port : neutronPortCache.getAllPorts()) {
- if (port.getNetworkUUID() == networkUUID && port.getDeviceOwner().contains("dhcp")) {
- return port;
+ terminationPointAugmentation.getName());
+ try {
+ String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
+ Constants.EXTERNAL_ID_INTERFACE_ID);
+ if (neutronPortId == null) {
+ return null;
}
+ NeutronPort neutronPort = neutronPortCache.getPort(neutronPortId);
+ if (neutronPort == null) {
+ LOG.error("getDHCPServerPort: neutron port of {} is not found", neutronPortId);
+ return null;
+ }
+ /* if the current port is a DHCP port, return the same*/
+ if (neutronPort.getDeviceOwner().contains("dhcp")) {
+ return neutronPort;
+ }
+ /*Since all the fixed ip assigned to a port should be
+ *from the same network, first port is sufficient.*/
+ List<Neutron_IPs> fixedIps = neutronPort.getFixedIPs();
+ if (null == fixedIps || 0 == fixedIps.size() ) {
+ LOG.error("getDHCPServerPort: No fixed ip is assigned");
+ return null;
+ }
+ /* Get all the ports in the subnet and identify the dhcp port*/
+ String subnetUuid = fixedIps.iterator().next().getSubnetUUID();
+ NeutronSubnet neutronSubnet = neutronSubnetCache.getSubnet(subnetUuid);
+ List<NeutronPort> ports = neutronSubnet.getPortsInSubnet();
+ for (NeutronPort port : ports) {
+ if (port.getDeviceOwner().contains("dhcp")) {
+ return port;
+ }
+ }
+ } catch (Exception e) {
+ LOG.error("getDHCPServerPort:getDHCPServerPort failed due to ", e);
+ return null;
}
return null;
}
LOG.trace("isComputePort for {}", terminationPointAugmentation.getName());
String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
+ Constants.EXTERNAL_ID_INTERFACE_ID);
if (neutronPortId == null) {
return false;
}
if (neutronPort == null) {
return false;
}
+ /*Check the device owner and if it contains compute to identify
+ * whether it is a compute port.*/
String deviceOwner = neutronPort.getDeviceOwner();
if (!deviceOwner.contains("compute")) {
- LOG.debug("isComputePort : Port {} is not a DHCP server port", neutronPortId, deviceOwner);
+ LOG.debug("isComputePort : Port {} is not a DHCP server port for device owner {}",
+ neutronPortId,deviceOwner);
return false;
}
return true;
LOG.error("isLastPortinSubnet: neutron port is null");
return false;
}
- LOG.trace("isLastPortinSubnet: for {}", terminationPointAugmentation.getName());
- String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
- if (neutronPortId == null) {
- return false;
- }
- NeutronPort neutronPort = neutronPortCache.getPort(neutronPortId);
- if (neutronPort == null) {
- LOG.error("isLastPortinSubnet: neutron port of {} is not found", neutronPortId);
- return false;
- }
- List<Neutron_IPs> neutronPortFixedIp = neutronPort.getFixedIPs();
- if(null == neutronPortFixedIp || neutronPortFixedIp.isEmpty()) {
- return false;
- }
- List<TerminationPoint> terminationPoints = node.getTerminationPoint();
- if(terminationPoints != null && !terminationPoints.isEmpty()) {
- for(TerminationPoint tp : terminationPoints) {
- OvsdbTerminationPointAugmentation ovsdbTerminationPointAugmentation =
- tp.getAugmentation(OvsdbTerminationPointAugmentation.class);
- if (ovsdbTerminationPointAugmentation != null && !ovsdbTerminationPointAugmentation.
- getName().equals(Constants.INTEGRATION_BRIDGE)) {
- String portId = southbound.getInterfaceExternalIdsValue(ovsdbTerminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
- if(null!=portId) {
- NeutronPort port = neutronPortCache.getPort(portId);
- if(null!=port) {
- if(!(port.getID().equals(neutronPort.getID())) && port.getDeviceOwner().contains("compute")) {
+ try {
+ LOG.trace("isLastPortinSubnet: for {}", terminationPointAugmentation.getName());
+ String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
+ Constants.EXTERNAL_ID_INTERFACE_ID);
+ if (neutronPortId == null) {
+ return false;
+ }
+ NeutronPort neutronPort = neutronPortCache.getPort(neutronPortId);
+ if (neutronPort == null) {
+ LOG.error("isLastPortinSubnet: neutron port of {} is not found", neutronPortId);
+ return false;
+ }
+ List<Neutron_IPs> neutronPortFixedIp = neutronPort.getFixedIPs();
+ if (null == neutronPortFixedIp || neutronPortFixedIp.isEmpty()) {
+ return false;
+ }
+ /*Get all the ports in the current node and check whether there
+ * is any port belonging to the same subnet of the input
+ */
+ List<TerminationPoint> terminationPoints = node.getTerminationPoint();
+ if (terminationPoints != null && !terminationPoints.isEmpty()) {
+ for (TerminationPoint tp : terminationPoints) {
+ OvsdbTerminationPointAugmentation ovsdbTerminationPointAugmentation =
+ tp.getAugmentation(OvsdbTerminationPointAugmentation.class);
+ if (ovsdbTerminationPointAugmentation != null && !ovsdbTerminationPointAugmentation
+ .getName().equals(Constants.INTEGRATION_BRIDGE)) {
+ String portId = southbound.getInterfaceExternalIdsValue(ovsdbTerminationPointAugmentation,
+ Constants.EXTERNAL_ID_INTERFACE_ID);
+ if (null != portId) {
+ NeutronPort port = neutronPortCache.getPort(portId);
+ if (null != port && !(port.getID().equals(neutronPort.getID()))
+ && port.getDeviceOwner().contains("compute")) {
List<Neutron_IPs> portFixedIp = port.getFixedIPs();
- if(null == portFixedIp || portFixedIp.isEmpty()) {
+ if (null == portFixedIp || portFixedIp.isEmpty()) {
return false;
}
- if(portFixedIp.iterator().next().getSubnetUUID().equals
- (neutronPort.getFixedIPs().iterator().next().getSubnetUUID())) {
+ if (portFixedIp.iterator().next().getSubnetUUID()
+ .equals(neutronPort.getFixedIPs().iterator().next().getSubnetUUID())) {
+ LOG.trace("isLastPortinSubnet: Port is not the only port.");
return false;
}
}
}
}
}
+ } catch (Exception e) {
+ LOG.error("isLastPortinSubnet: isLastPortinSubnet failed due to ", e);
+ return false;
}
return true;
}
public boolean isLastPortinBridge(Node node, OvsdbTerminationPointAugmentation terminationPointAugmentation) {
LOG.trace("isLastPortinBridge: for {}", terminationPointAugmentation.getName());
List<TerminationPoint> terminationPoints = node.getTerminationPoint();
- if(terminationPoints != null && !terminationPoints.isEmpty()){
- for(TerminationPoint tp : terminationPoints){
+ /*Check whether the node has any port other than br-int*/
+ if (terminationPoints != null && !terminationPoints.isEmpty()) {
+ for (TerminationPoint tp : terminationPoints) {
OvsdbTerminationPointAugmentation ovsdbTerminationPointAugmentation =
tp.getAugmentation(OvsdbTerminationPointAugmentation.class);
- if(null!=ovsdbTerminationPointAugmentation)
- {
- if(!(ovsdbTerminationPointAugmentation.getName().equals(Constants.INTEGRATION_BRIDGE))
- && !(terminationPointAugmentation.getInterfaceUuid().equals
- (ovsdbTerminationPointAugmentation.getInterfaceUuid()))) {
- return false;
- }
+ if (null != ovsdbTerminationPointAugmentation
+ && !(ovsdbTerminationPointAugmentation.getName().equals(Constants.INTEGRATION_BRIDGE))
+ && !(terminationPointAugmentation.getInterfaceUuid()
+ .equals(ovsdbTerminationPointAugmentation.getInterfaceUuid()))) {
+ LOG.debug("isLastPortinBridge: it the last port in bridge {}",
+ terminationPointAugmentation.getName());
+ return false;
}
}
}
}
@Override
- public List<Neutron_IPs> getIpAddress(Node node,
- OvsdbTerminationPointAugmentation terminationPointAugmentation) {
+ public List<Neutron_IPs> getIpAddressList(Node node,
+ OvsdbTerminationPointAugmentation terminationPointAugmentation) {
if (neutronPortCache == null) {
LOG.error("getIpAddress: neutron port is null");
return null;
}
LOG.trace("getIpAddress: for {}", terminationPointAugmentation.getName());
String neutronPortId = southbound.getInterfaceExternalIdsValue(terminationPointAugmentation,
- Constants.EXTERNAL_ID_INTERFACE_ID);
+ Constants.EXTERNAL_ID_INTERFACE_ID);
if (neutronPortId == null) {
return null;
}
return neutronPort.getFixedIPs();
}
+ @Override
+ public List<Neutron_IPs> getVmListForSecurityGroup(List<Neutron_IPs> srcAddressList, String securityGroupUuid) {
+ List<Neutron_IPs> vmListForSecurityGroup = new ArrayList<Neutron_IPs>();
+ /*For every port check whether security grouplist contains the current
+ * security group.*/
+ try {
+ for (NeutronPort neutronPort:neutronPortCache.getAllPorts()) {
+ if (!neutronPort.getDeviceOwner().contains("compute")) {
+ LOG.debug("getVMListForSecurityGroup : the port {} is not "
+ + "compute port belongs to {}", neutronPort.getID(), neutronPort.getDeviceOwner());
+ continue;
+ }
+ List<NeutronSecurityGroup> securityGroups = neutronPort.getSecurityGroups();
+ if (null != securityGroups) {
+ for (NeutronSecurityGroup securityGroup:securityGroups) {
+ if (securityGroup.getSecurityGroupUUID().equals(securityGroupUuid)
+ && !neutronPort.getFixedIPs().containsAll(srcAddressList)) {
+ LOG.debug("getVMListForSecurityGroup : adding ports with ips {} "
+ + "compute port", neutronPort.getFixedIPs());
+ vmListForSecurityGroup.addAll(neutronPort.getFixedIPs());
+ }
+ }
+ }
+
+ }
+ } catch (Exception e) {
+ LOG.error("getVMListForSecurityGroup: getVMListForSecurityGroup"
+ + " failed due to ", e);
+ return null;
+ }
+ return vmListForSecurityGroup;
+
+ }
+
@Override
public void setDependencies(BundleContext bundleContext, ServiceReference serviceReference) {
southbound =
public void setDependencies(Object impl) {
if (impl instanceof INeutronPortCRUD) {
neutronPortCache = (INeutronPortCRUD)impl;
+ } else if (impl instanceof INeutronSubnetCRUD) {
+ neutronSubnetCache = (INeutronSubnetCRUD) impl;
}
else if (impl instanceof INeutronSubnetCRUD) {
neutronSubnetCache = (INeutronSubnetCRUD) impl;
}
return ofPort;
}
+ public OvsdbBridgeAugmentation getBridgeFromConfig(Node node, String bridge) {
+ OvsdbBridgeAugmentation ovsdbBridgeAugmentation = null;
+ InstanceIdentifier<Node> bridgeIid =
+ MdsalHelper.createInstanceIdentifier(node.getKey(), bridge);
+ Node bridgeNode = mdsalUtils.read(LogicalDatastoreType.CONFIGURATION, bridgeIid);
+ if (bridgeNode != null) {
+ ovsdbBridgeAugmentation = bridgeNode.getAugmentation(OvsdbBridgeAugmentation.class);
+ }
+ return ovsdbBridgeAugmentation;
+ }
}
}
/**
- * Test method {@link SecurityServicesImpl#getSecurityGroupInPort(Interface)}
+ * Test method {@link SecurityServicesImpl#getSecurityGroupInPortList(Interface)}
*/
@Test
public void testSecurityGroupInPort(){
- assertEquals("Error, did not return the good neutronSecurityGroup of securityGroups", neutronSecurityGroup, securityServicesImpl.getSecurityGroupInPort(mock(OvsdbTerminationPointAugmentation.class)));
+ assertEquals("Error, did not return the good neutronSecurityGroup of securityGroups",
+ neutronSecurityGroup, securityServicesImpl.getSecurityGroupInPortList(mock(OvsdbTerminationPointAugmentation.class)).get(0));
}
@Test
</parent>
<artifactId>openstack</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<packaging>pom</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<module>net-virt</module>
<module>net-virt-providers</module>
<module>net-virt-it</module>
+ <module>net-virt-sfc</module>
</modules>
</project>
<artifactId>concepts</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.mdsal</groupId>
<artifactId>yang-binding</artifactId>
</dependency>
<dependency>
<artifactId>yang-common</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>opendaylight-l2-types</artifactId>
</dependency>
<dependency>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-bundle-plugin</artifactId>
- <version>${bundle.plugin.version}</version>
<extensions>true</extensions>
<configuration>
<instructions>
<version>${controller.config.version}</version>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools</groupId>
+ <groupId>org.opendaylight.mdsal</groupId>
<artifactId>maven-sal-api-gen-plugin</artifactId>
<version>${yangtools.version}</version>
</dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>ovsdb-artifacts</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>pom</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>commons</artifactId>
- <version>1.4.0-SNAPSHOT</version>
- <relativePath>../commons/parent</relativePath>
- </parent>
-
- <artifactId>ovsdb-plugin-compatibility-layer</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <name>${project.artifactId}</name>
- <packaging>bundle</packaging>
- <description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- <licenses>
- <license>
- <name>Eclipse Public License v1.0</name>
- <url>http://www.eclipse.org/legal/epl-v10.html</url>
- </license>
- </licenses>
- <developers>
- <developer>
- <name>Sam Hague</name>
- <email>shague@gmail.com</email>
- <url>https://github.com/shague</url>
- </developer>
- </developers>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- </scm>
-
- <dependencies>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.dependencymanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <testResources>
- <testResource>
- <filtering>true</filtering>
- <directory>src/test/resources</directory>
- </testResource>
- </testResources>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.4.0</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Import-Package>
- org.opendaylight.controller.sal.core,
- org.opendaylight.controller.sal.utils,
- org.opendaylight.ovsdb.plugin.api,
- org.apache.commons.lang3.builder,
- org.apache.commons.lang3.tuple,
- org.apache.felix.dm,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- javax.net.ssl,
- *
- </Import-Package>
- <Embed-Dependency>commons-codec,javax.servlet-api,portlet-api,commons-collections,utils.config;type=!pom;inline=false</Embed-Dependency>
- <Embed-Transitive>true</Embed-Transitive>
- <Bundle-Activator>org.opendaylight.ovsdb.compatibility.plugin.internal.Activator</Bundle-Activator>
- <Private-Package>
- org.opendaylight.ovsdb.compatibility.plugin.impl,
- org.opendaylight.ovsdb.compatibility.plugin.internal
- </Private-Package>
- <Export-Package>
- org.opendaylight.ovsdb.compatibility.plugin,
- org.opendaylight.ovsdb.compatibility.plugin.api,
- org.opendaylight.ovsdb.compatibility.plugin.error
- </Export-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-failsafe-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import java.util.ArrayList;
-import java.util.List;
-import org.opendaylight.controller.sal.core.ConstructionException;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class NodeUtils {
- private static final Logger LOG = LoggerFactory.getLogger(NodeUtils.class);
-
- public static String getId (String identifier) {
- String id = identifier;
-
- String[] pair = identifier.split("\\|");
- if (pair[0].equals("OVS")) {
- id = pair[1];
- }
- return id;
- }
-
- public static Node getMdsalNode (org.opendaylight.controller.sal.core.Node salNode) {
- String identifier = salNode.getNodeIDString();
-
- NodeId nodeId = new NodeId("OVS" + "|" + identifier);
- NodeKey nodeKey = new NodeKey(nodeId);
- Node node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
-
- return node;
- }
-
- public static org.opendaylight.controller.sal.core.Node getSalNode (Node mdsalNode) {
- String identifier = NodeUtils.getId(mdsalNode.getId().getValue());
- org.opendaylight.controller.sal.core.Node node = null;
-
- try {
- node = new org.opendaylight.controller.sal.core.Node("OVS", identifier);
- } catch (ConstructionException e) {
- LOG.error("Failed to allocate sal Node", e);
- }
-
- return node;
- }
-
- public static List<org.opendaylight.controller.sal.core.Node> getSalNodes (List<Node> mdsalNodes) {
- List<org.opendaylight.controller.sal.core.Node> nodes = new ArrayList<>();
-
- for (Node mdsalNode : mdsalNodes) {
- nodes.add(NodeUtils.getSalNode(mdsalNode));
- }
- return nodes;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.utils.Status;
-import org.opendaylight.ovsdb.compatibility.plugin.error.OvsdbPluginException;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-
-public interface OvsdbConfigurationService {
-
- /**
- * @deprecated This version of insertRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #insertRow(Node, String, String, UUID, Row) insertRow} and
- * {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is inserted
- * @param parentUuid UUID of the parent table to which this operation will result in attaching/mutating.
- * @param row Row of table Content to be inserted
- * @return UUID of the inserted Row
- */
- @Deprecated
- StatusWithUuid insertRow(Node node, String tableName, String parentUuid, Row<GenericTableSchema> row);
-
- /**
- * insert a Row in a Table of a specified Database Schema. This is a convenience method on top of
- * {@link #insertRow(Node, String, String, String, UUID, String, Row) insertRow}
- * which assumes that OVSDB schema implementation that corresponds to the databaseName will provide
- * the necessary service to populate the Parent Table Name and Parent Column Name.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of the parent table to which this operation will result in attaching/mutating.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- UUID insertRow(Node node, String databaseName, String tableName, UUID parentRowUuid,
- Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * insert a Row in a Table of a specified Database Schema.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- UUID insertRow(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct.
- * This is a convenience method on top of {@link #insertTree(Node, String, String, String, UUID, String, Row) insertTree}
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, UUID parentRowUuid,
- Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of updateRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #updateRow(Node, String, String, UUID, Row, boolean) updateRow}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is Updated
- * @param parentUuid UUID of the parent row on which this operation might result in mutating.
- * @param rowUuid UUID of the row that is being updated
- * @param row Row of table Content to be Updated. Include just those columns that needs to be updated.
- */
- @Deprecated
- Status updateRow(Node node, String tableName, String parentUuid, String rowUuid, Row row);
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is updated
- * @param rowUuid UUID of the row being updated
- * @param row Row of table Content to be updated
- * @param overwrite true will overwrite/replace the existing row (matching the rowUuid) with the passed row object.
- * false will update the existing row (matching the rowUuid) using only the columns in the passed row object.
- * @throws OvsdbPluginException Any failure during the update operation will result in a specific exception.
- * @return Returns the entire Row after the update operation.
- */
- Row<GenericTableSchema> updateRow(Node node, String databaseName, String tableName, UUID rowUuid,
- Row<GenericTableSchema> row, boolean overwrite) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of deleteRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by {@link #deleteRow(Node, String, String, UUID) deleteRow}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is Updated
- * @param rowUuid UUID of the row that is being deleted
- */
- @Deprecated
- Status deleteRow(Node node, String tableName, String rowUuid);
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is Updated
- * @param rowUuid UUID of the row that is being deleted
- * @throws OvsdbPluginException Any failure during the delete operation will result in a specific exception.
- */
-
- void deleteRow(Node node, String databaseName, String tableName, UUID rowUuid) throws OvsdbPluginException;
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is Updated
- * @param parentTable Name of the Parent Table to which this operation will result in mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated.
- * @param rowUuid UUID of the row that is being deleted
- * @throws OvsdbPluginException Any failure during the delete operation will result in a specific exception.
- */
-
- void deleteRow(Node node, String databaseName, String tableName, String parentTable,
- UUID parentRowUuid, String parentColumn, UUID rowUuid) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of getRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by {@link #getRow(Node, String, String, UUID) getRow}
- *
- * @param node OVSDB Node
- * @param tableName Table Name
- * @param uuid UUID of the row being queried
- * @return a row with a list of Column data that corresponds to an unique Row-identifier called uuid in a given table.
- */
- @Deprecated
- Row getRow(Node node, String tableName, String uuid);
-
- /**
- * Returns a Row from a table for the specified uuid.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @param uuid UUID of the row being queried
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return a row with a list of Column data that corresponds to an unique Row-identifier called uuid in a given table.
- */
- Row<GenericTableSchema> getRow(Node node, String databaseName, String tableName, UUID uuid) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of getRows is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #getRows(Node, String, String) getRows} and {@link #getRows(Node, String, String, String) getRows}
- *
- * @param node OVSDB Node
- * @param tableName Table Name
- * @return List of rows that makes the entire Table.
- */
- @Deprecated
- ConcurrentMap<String, Row> getRows(Node node, String tableName);
-
- /**
- * Returns all rows of a table.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return Map of rows to its UUID that makes the entire Table.
- */
- ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node, String databaseName, String tableName) throws OvsdbPluginException;
-
- /**
- * Returns all rows of a table filtered by query string.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @param fiqlQuery FIQL style String Query <a href="http://tools.ietf.org/html/draft-nottingham-atompub-fiql-00">draft-nottingham-atompub-fiql</a> to filter rows
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return Map of rows to its UUID that makes the entire Table.
- */
- ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node, String databaseName, String tableName, String fiqlQuery) throws OvsdbPluginException;
-
- /**
- * @deprecated Returns all the Tables in a given Ndoe.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #getTables(Node, String) getTables}
- * @param node OVSDB node
- * @return List of Table Names that make up Open_vSwitch schema.
- */
- @Deprecated
- List<String> getTables(Node node);
-
- /**
- * Returns all the Tables in a given Node.
- *
- * @param node OVSDB node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return List of Table Names that make up the schema represented by the databaseName
- */
- List<String> getTables(Node node, String databaseName) throws OvsdbPluginException;
-
- /**
- * setOFController is a convenience method used by existing applications to setup Openflow Controller on
- * a Open_vSwitch Bridge.
- * This API assumes an Open_vSwitch database Schema.
- *
- * @param node Node
- * @param bridgeUUID uuid of the Bridge for which the ip-address of Openflow Controller should be programmed.
- * @return Boolean representing success or failure of the operation.
- *
- * @throws InterruptedException
- * @throws ExecutionException
- */
- Boolean setOFController(Node node, String bridgeUUID) throws InterruptedException, ExecutionException;
-
- <T extends TypedBaseTable<?>> String getTableName(Node node, Class<T> typedClass);
- <T extends TypedBaseTable<?>> T getTypedRow(Node node, Class<T> typedClass, Row row);
- <T extends TypedBaseTable<?>> T createTypedRow(Node node, Class<T> typedClass);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import java.util.List;
-import java.util.Map;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.ConnectionConstants;
-
-public interface OvsdbConnectionService {
- Connection getConnection(Node node);
- List<Node> getNodes();
- Node getNode(String identifier);
- Node connect(String identifier, Map<ConnectionConstants, String> params);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.ovsdb.lib.notation.Row;
-
-import java.net.InetAddress;
-
-public interface OvsdbInventoryListener {
- void nodeAdded(Node node, InetAddress address, int port);
- void nodeRemoved(Node node);
- void rowAdded(Node node, String tableName, String uuid, Row row);
- void rowUpdated(Node node, String tableName, String uuid, Row old, Row row);
- void rowRemoved(Node node, String tableName, String uuid, Row row, Object context);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import java.net.InetAddress;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.core.Property;
-import org.opendaylight.controller.sal.core.UpdateType;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Row;
-
-public interface OvsdbInventoryService{
- ConcurrentMap<String, ConcurrentMap<String, Row>> getCache(Node n, String databaseName);
- ConcurrentMap<String, Row> getTableCache(Node n, String databaseName, String tableName);
- Row getRow(Node n, String databaseName, String tableName, String uuid);
- void updateRow(Node n, String databaseName, String tableName, String uuid, Row row);
- void removeRow(Node n, String databaseName, String tableName, String uuid);
- void processTableUpdates(Node n, String databaseName, TableUpdates tableUpdates);
- void printCache(Node n);
- void addNode(Node n, Set<Property> props);
- void notifyNodeAdded(Node n, InetAddress address, int port);
- void removeNode(Node n);
- void addNodeProperty(Node node, UpdateType type, Set<Property> props);
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.api;
-
-import org.opendaylight.controller.sal.utils.Status;
-import org.opendaylight.controller.sal.utils.StatusCode;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-
-/**
- * Extends the Status class to allow functions to return a uuid
- */
-public class StatusWithUuid extends Status {
- private static final long serialVersionUID = -5413085099514964003L;
- private UUID uuid;
-
- public StatusWithUuid(StatusCode errorCode) {
- super(errorCode);
- }
-
- public StatusWithUuid(StatusCode errorCode, String description) {
- super(errorCode, description);
- }
-
- public StatusWithUuid(StatusCode errorCode, long requestId) {
- super(errorCode, requestId);
- }
-
- public StatusWithUuid(StatusCode errorCode, UUID uuid) {
- super(errorCode);
- this.uuid = uuid;
- }
-
- public UUID getUuid() {
- return uuid;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.compatibility.plugin.error;
-
-public class OvsdbPluginException extends RuntimeException {
- public OvsdbPluginException(String message){
- super(message);
- }
-
- public OvsdbPluginException(String message, Throwable cause){
- super(message, cause);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.compatibility.plugin.impl;
-
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.utils.Status;
-import org.opendaylight.ovsdb.compatibility.plugin.api.NodeUtils;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.compatibility.plugin.api.StatusWithUuid;
-import org.opendaylight.ovsdb.compatibility.plugin.error.OvsdbPluginException;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-
-/**
- * This is a proxy class for ovsdb plugin's OvsdbConfigurationService class
- * It just forward the call to OvsdbConfigurationService instance and pass
- * back the response to the caller.
- *
- * @author Anil Vishnoi (vishnoianil@gmail.com)
- *
- */
-public class ConfigurationServiceImpl implements OvsdbConfigurationService
-{
- private volatile org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService pluginOvsdbConfigurationService;
-
- void init() {
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- *
- */
- void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- *
- */
- void start() {
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- *
- */
- void stop() {
- }
-
- public void setOvsdbConfigurationService(org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService pluginOvsdbConfigurationService){
- this.pluginOvsdbConfigurationService = pluginOvsdbConfigurationService;
- }
-
- public void unsetOvsdbConfigurationService(org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService pluginOvsdbConfigurationService){
- this.pluginOvsdbConfigurationService = null;
- }
-
-
- @Override
- public StatusWithUuid insertRow(Node node, String tableName, String parentUuid, Row<GenericTableSchema> row) {
- return StatusConvertorUtil.convertOvsdbStatusWithUuidToCompLayerStatusWithUuid(pluginOvsdbConfigurationService.insertRow(NodeUtils.getMdsalNode(node), tableName, parentUuid, row));
- }
-
- @Override
- public Status updateRow (Node node, String tableName, String parentUUID, String rowUUID, Row row) {
- return StatusConvertorUtil
- .convertOvsdbStatusToSalStatus(pluginOvsdbConfigurationService
- .updateRow(NodeUtils.getMdsalNode(node), tableName, parentUUID, rowUUID, row));
- }
-
- @Override
- public Status deleteRow(Node node, String tableName, String uuid) {
- return StatusConvertorUtil
- .convertOvsdbStatusToSalStatus(pluginOvsdbConfigurationService.
- deleteRow(NodeUtils.getMdsalNode(node), tableName, uuid));
- }
-
- @Override
- public ConcurrentMap<String, Row> getRows(Node node, String tableName) {
- return pluginOvsdbConfigurationService.getRows(NodeUtils.getMdsalNode(node), tableName);
- }
-
- @Override
- public Row getRow(Node node, String tableName, String uuid) {
- return pluginOvsdbConfigurationService.getRow(NodeUtils.getMdsalNode(node), tableName, uuid);
- }
-
- @Override
- public List<String> getTables(Node node) {
- return pluginOvsdbConfigurationService.getTables(NodeUtils.getMdsalNode(node));
- }
-
- @Override
- public Boolean setOFController(Node node, String bridgeUUID) throws InterruptedException, ExecutionException {
- return pluginOvsdbConfigurationService.setOFController(NodeUtils.getMdsalNode(node), bridgeUUID);
- }
-
- @Override
- public <T extends TypedBaseTable<?>> String getTableName(Node node, Class<T> typedClass) {
- return pluginOvsdbConfigurationService.getTableName(NodeUtils.getMdsalNode(node), typedClass);
- }
-
- @Override
- public <T extends TypedBaseTable<?>> T getTypedRow(Node node, Class<T> typedClass, Row row) {
- return pluginOvsdbConfigurationService.getTypedRow(NodeUtils.getMdsalNode(node), typedClass, row);
- }
-
- @Override
- public <T extends TypedBaseTable<?>> T createTypedRow(Node node, Class<T> typedClass) {
- return pluginOvsdbConfigurationService.createTypedRow(NodeUtils.getMdsalNode(node), typedClass);
- }
-
- @Override
- public UUID insertRow(Node node, String databaseName, String tableName, String parentTable, UUID parentUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService
- .insertRow(NodeUtils.getMdsalNode(node), databaseName, tableName, parentTable, parentUuid,
- parentColumn, row);
- }
-
- @Override
- public UUID insertRow(Node node, String databaseName, String tableName,
- UUID parentRowUuid, Row<GenericTableSchema> row)
- throws OvsdbPluginException {
- return this.insertRow(node, databaseName, tableName, null, parentRowUuid, null, row);
- }
-
- @Override
- public Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, String parentTable, UUID parentUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService
- .insertTree(NodeUtils.getMdsalNode(node), databaseName, tableName, parentTable,
- parentUuid, parentColumn, row);
- }
-
- @Override
- public Row<GenericTableSchema> insertTree(Node node, String databaseName,
- String tableName, UUID parentRowUuid, Row<GenericTableSchema> row)
- throws OvsdbPluginException {
- return this.insertTree(node, databaseName, tableName, null, parentRowUuid, null, row);
- }
-
- @Override
- public Row<GenericTableSchema> updateRow(Node node, String databaseName,
- String tableName, UUID rowUuid, Row<GenericTableSchema> row,
- boolean overwrite) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService
- .updateRow(NodeUtils.getMdsalNode(node), databaseName, tableName, rowUuid, row, overwrite);
- }
-
- @Override
- public void deleteRow(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, UUID rowUuid) throws OvsdbPluginException {
- pluginOvsdbConfigurationService
- .deleteRow(NodeUtils.getMdsalNode(node), databaseName, tableName, parentTable,
- parentRowUuid, parentColumn, rowUuid);
- }
-
- @Override
- public void deleteRow(Node node, String databaseName, String tableName, UUID rowUuid) throws OvsdbPluginException {
- this.deleteRow(node, databaseName, tableName, null, null, null, rowUuid);
- }
-
- @Override
- public Row<GenericTableSchema> getRow(Node node, String databaseName,
- String tableName, UUID uuid) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService
- .getRow(NodeUtils.getMdsalNode(node), databaseName, tableName, uuid);
- }
-
- @Override
- public ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node,
- String databaseName, String tableName) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService
- .getRows(NodeUtils.getMdsalNode(node), databaseName, tableName);
- }
-
- @Override
- public ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node,
- String databaseName, String tableName, String fiqlQuery)
- throws OvsdbPluginException {
- return this.getRows(node, databaseName, tableName);
- }
-
- @Override
- public List<String> getTables(Node node, String databaseName) throws OvsdbPluginException {
- return pluginOvsdbConfigurationService.getTables(NodeUtils.getMdsalNode(node), databaseName);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.compatibility.plugin.impl;
-
-import java.util.List;
-import java.util.Map;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.ovsdb.compatibility.plugin.api.NodeUtils;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.ConnectionConstants;
-
-/**
- * This is a proxy class for ovsdb plugin's OvsdbConnectionService class
- * It just forward the call to OvsdbConnectionService instance and pass
- * back the response to the caller.
- *
- * @author Anil Vishnoi (vishnoianil@gmail.com)
- *
- */
-public class ConnectionServiceImpl implements OvsdbConnectionService{
-
- private volatile org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService pluginOvsdbConnectionService;
-
- public void init() {
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- */
- void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- */
- void start() {
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- */
- void stopping() {
- }
-
- public void setOvsdbConnectionService(org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService pluginOvsdbConnectionService){
- this.pluginOvsdbConnectionService = pluginOvsdbConnectionService;
- }
-
- public void unsetOvsdbConnectionService(org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService pluginOvsdbConnectionService){
- this.pluginOvsdbConnectionService = null;
- }
-
- @Override
- public Connection getConnection(Node node) {
- return pluginOvsdbConnectionService.getConnection(NodeUtils.getMdsalNode(node));
- }
-
- @Override
- public Node getNode (String identifier) {
- return NodeUtils.getSalNode(pluginOvsdbConnectionService.getNode(identifier));
- }
-
- @Override
- public List<Node> getNodes() {
- return NodeUtils.getSalNodes(pluginOvsdbConnectionService.getNodes());
- }
-
- @Override
- public Node connect(String identifier, Map<ConnectionConstants, String> params) {
- return NodeUtils.getSalNode(pluginOvsdbConnectionService.connect(identifier, params));
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.compatibility.plugin.impl;
-
-import java.net.InetAddress;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.core.Property;
-import org.opendaylight.controller.sal.core.UpdateType;
-import org.opendaylight.ovsdb.compatibility.plugin.api.NodeUtils;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Row;
-
-import com.google.common.collect.Sets;
-
-/**
- * This is a proxy class for ovsdb plugin's OvsdbInventoryService class
- * It just forward the call to OvsdbInventoryService instance and pass
- * back the response to the caller.
- * It also register as a listener to ovsdb plugin and relay the notification
- * back to all the subscriber of this compatibility layer.
- *
- * @author Anil Vishnoi (vishnoianil@gmail.com)
- *
- */
-public class InventoryServiceImpl implements OvsdbInventoryService,
- org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener {
- private volatile org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService pluginOvsdbInventoryService;
-
- private Set<OvsdbInventoryListener> ovsdbInventoryListeners = Sets.newCopyOnWriteArraySet();
-
-
- /**
- * Function called by the dependency manager when all the required
- * dependencies are satisfied
- *
- */
- public void init() {
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- *
- */
- public void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- *
- */
- public void start() {
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- *
- */
- public void stop() {
- }
-
- public void setOvsdbInventoryService(org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService pluginOvsdbInventoryService){
- this.pluginOvsdbInventoryService = pluginOvsdbInventoryService;
- }
-
- public void unsetOvsdbInventoryService(org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService pluginOvsdbInventoryService){
- this.pluginOvsdbInventoryService = pluginOvsdbInventoryService;
- }
-
- //Register listener for ovsdb.compatibility
- public void addOvsdbInventoryListener(OvsdbInventoryListener pluginOvsdbInventoryListener){
- this.ovsdbInventoryListeners.add(pluginOvsdbInventoryListener);
- }
-
- public void removeOvsdbInventoryListener(OvsdbInventoryListener pluginOvsdbInventoryListener){
- if(this.ovsdbInventoryListeners.contains(ovsdbInventoryListeners)) {
- this.ovsdbInventoryListeners.remove(ovsdbInventoryListeners);
- }
- }
-
- @Override
- public ConcurrentMap<String, ConcurrentMap<String, Row>> getCache(Node n, String databaseName) {
- return pluginOvsdbInventoryService.getCache(NodeUtils.getMdsalNode(n), databaseName);
- }
-
-
- @Override
- public ConcurrentMap<String, Row> getTableCache(Node n, String databaseName, String tableName) {
- return pluginOvsdbInventoryService.getTableCache(NodeUtils.getMdsalNode(n), databaseName, tableName);
- }
-
-
- @Override
- public Row getRow(Node n, String databaseName, String tableName, String uuid) {
- return pluginOvsdbInventoryService.getRow(NodeUtils.getMdsalNode(n), databaseName, tableName, uuid);
- }
-
- @Override
- public void updateRow(Node n, String databaseName, String tableName, String uuid, Row row) {
- pluginOvsdbInventoryService.updateRow(NodeUtils.getMdsalNode(n), databaseName, tableName, uuid, row);
- }
-
- @Override
- public void removeRow(Node n, String databaseName, String tableName, String uuid) {
- pluginOvsdbInventoryService.removeRow(NodeUtils.getMdsalNode(n), databaseName, tableName, uuid);
- }
-
- @Override
- public void processTableUpdates(Node n, String databaseName, TableUpdates tableUpdates) {
- pluginOvsdbInventoryService.processTableUpdates(NodeUtils.getMdsalNode(n), databaseName, tableUpdates);
- }
-
- @Override
- public void printCache(Node n) {
- pluginOvsdbInventoryService.printCache(NodeUtils.getMdsalNode(n));
- }
-
- @Override
- public void addNode(Node node, Set<Property> props) {
- }
-
- @Override
- public void notifyNodeAdded(Node node, InetAddress address, int port) {
- pluginOvsdbInventoryService.notifyNodeAdded(NodeUtils.getMdsalNode(node), address, port);
- }
-
- @Override
- public void addNodeProperty(Node node, UpdateType type, Set<Property> props) {
- }
-
- @Override
- public void removeNode(Node node) {
- pluginOvsdbInventoryService.removeNode(NodeUtils.getMdsalNode(node));
- }
-
- @Override
- public void nodeAdded(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node node,
- InetAddress address, int port) {
- for(OvsdbInventoryListener listener : this.ovsdbInventoryListeners) {
- listener.nodeAdded(NodeUtils.getSalNode(node), address, port);
- }
-
- }
-
- @Override
- public void nodeRemoved(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node node) {
- for(OvsdbInventoryListener listener : this.ovsdbInventoryListeners) {
- listener.nodeRemoved(NodeUtils.getSalNode(node));
- }
-
- }
-
- @Override
- public void rowAdded(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node node,
- String tableName, String uuid, Row row) {
- for(OvsdbInventoryListener listener : this.ovsdbInventoryListeners) {
- listener.rowAdded(NodeUtils.getSalNode(node), tableName, uuid, row);
- }
-
- }
-
- @Override
- public void rowUpdated(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node node,
- String tableName, String uuid, Row old,
- Row row) {
- for(OvsdbInventoryListener listener : this.ovsdbInventoryListeners) {
- listener.rowUpdated(NodeUtils.getSalNode(node), tableName, uuid, old, row);
- }
-
- }
-
- @Override
- public void rowRemoved(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node node,
- String tableName, String uuid, Row row,
- Object context) {
- for(OvsdbInventoryListener listener : this.ovsdbInventoryListeners) {
- listener.rowRemoved(NodeUtils.getSalNode(node), tableName, uuid, row, context);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.compatibility.plugin.impl;
-
-import org.opendaylight.controller.sal.utils.Status;
-import org.opendaylight.controller.sal.utils.StatusCode;
-import org.opendaylight.ovsdb.compatibility.plugin.api.StatusWithUuid;
-
-/**
- * Class providers convenience methods for converting *.plugin.api.Status object to *.sal.utils.Status object.
- *
- * @author Anil Vishnoi ( vishnoianil@gmail.com)
- *
- */
-public class StatusConvertorUtil {
-
- public static StatusWithUuid convertOvsdbStatusWithUuidToCompLayerStatusWithUuid(org.opendaylight.ovsdb.plugin.api.StatusWithUuid statusWithUuid){
- if(statusWithUuid.getUuid() != null){
- return new StatusWithUuid(convertOvsdbStatusCodeToSalStatusCode(statusWithUuid.getCode()),statusWithUuid.getUuid());
- }else if(statusWithUuid.getRequestId() != 0){
- return new StatusWithUuid(convertOvsdbStatusCodeToSalStatusCode(statusWithUuid.getCode()),statusWithUuid.getRequestId());
- }else{
- return new StatusWithUuid(convertOvsdbStatusCodeToSalStatusCode(statusWithUuid.getCode()),statusWithUuid.getDescription());
- }
-
- }
-
- public static Status convertOvsdbStatusToSalStatus(org.opendaylight.ovsdb.plugin.api.Status status){
- if(status.getRequestId() != 0){
- return new org.opendaylight.controller.sal.utils.Status(convertOvsdbStatusCodeToSalStatusCode(status.getCode()),status.getRequestId());
- }else{
- return new org.opendaylight.controller.sal.utils.Status(convertOvsdbStatusCodeToSalStatusCode(status.getCode()),status.getDescription());
- }
- }
-
- private static StatusCode convertOvsdbStatusCodeToSalStatusCode(org.opendaylight.ovsdb.plugin.api.StatusCode statusCode){
- switch(statusCode){
- case SUCCESS:
- return StatusCode.SUCCESS;
- case CREATED:
- return StatusCode.CREATED;
- case BADREQUEST:
- return StatusCode.BADREQUEST;
- case UNAUTHORIZED:
- return StatusCode.UNAUTHORIZED;
- case FORBIDDEN:
- return StatusCode.FORBIDDEN;
- case NOTFOUND:
- return StatusCode.NOTFOUND;
- case NOTALLOWED:
- return StatusCode.NOTALLOWED;
- case NOTACCEPTABLE:
- return StatusCode.NOTACCEPTABLE;
- case TIMEOUT:
- return StatusCode.TIMEOUT;
- case CONFLICT:
- return StatusCode.CONFLICT;
- case GONE:
- return StatusCode.GONE;
- case UNSUPPORTED:
- return StatusCode.UNSUPPORTED;
-
- case INTERNALERROR:
- return StatusCode.INTERNALERROR;
- case NOTIMPLEMENTED:
- return StatusCode.NOTIMPLEMENTED;
- case NOSERVICE:
- return StatusCode.NOSERVICE;
- case UNDEFINED:
- return StatusCode.UNDEFINED;
- default:
- return StatusCode.UNSUPPORTED;
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.ovsdb.compatibility.plugin.internal;
-
-import java.util.Dictionary;
-import java.util.Hashtable;
-
-import org.apache.felix.dm.Component;
-import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.compatibility.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.compatibility.plugin.impl.ConfigurationServiceImpl;
-import org.opendaylight.ovsdb.compatibility.plugin.impl.ConnectionServiceImpl;
-import org.opendaylight.ovsdb.compatibility.plugin.impl.InventoryServiceImpl;
-
-/**
- * Activator for ovsdb plugin compatibility layer
- * @author Anil Vishnoi (vishnoianil@gmail.com)
- *
- */
-public class Activator extends ComponentActivatorAbstractBase {
-
- /**
- * Function called when the activator starts just after some initializations
- * are done by the ComponentActivatorAbstractBase.
- * Here it registers the node Type
- *
- */
- @Override
- public void init() {
- Node.NodeIDType.registerIDType("OVS", String.class);
- }
-
- /**
- * Function called when the activator stops just before the cleanup done by
- * ComponentActivatorAbstractBase
- *
- */
- @Override
- public void destroy() {
- Node.NodeIDType.unRegisterIDType("OVS");
- }
- @Override
- public Object[] getGlobalImplementations() {
- return new Object[]{ ConnectionServiceImpl.class, ConfigurationServiceImpl.class, InventoryServiceImpl.class };
- }
-
- @Override
- public void configureGlobalInstance(Component c, Object imp){
- if (imp.equals(ConfigurationServiceImpl.class)) {
- // export the service to be used by SAL
- Dictionary<String, Object> props = new Hashtable<>();
- c.setInterface(new String[] { OvsdbConfigurationService.class.getName()}, props);
- c.add(createServiceDependency()
- .setService(org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService.class)
- .setCallbacks("setOvsdbConfigurationService", "unsetOvsdbConfigurationService")
- .setRequired(true));
- }
-
- if (imp.equals(ConnectionServiceImpl.class)) {
- // export the service to be used by SAL
- Dictionary<String, Object> props = new Hashtable<>();
- c.setInterface(
- new String[] {OvsdbConnectionService.class.getName()}, props);
- c.add(createServiceDependency()
- .setService(org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService.class)
- .setCallbacks("setOvsdbConnectionService", "unsetOvsdbConnectionService")
- .setRequired(true));
- }
-
- if (imp.equals(InventoryServiceImpl.class)) {
- Dictionary<String, Object> props = new Hashtable<>();
- c.setInterface(
- new String[]{OvsdbInventoryService.class.getName(),
- org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener.class.getName()}, props);
- c.add(createServiceDependency()
- .setService(org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService.class)
- .setCallbacks("setOvsdbInventoryService", "unsetOvsdbInventoryService")
- .setRequired(true));
- c.add(createServiceDependency()
- .setService(OvsdbInventoryListener.class)
- .setCallbacks("addOvsdbInventoryListener", "removeOvsdbInventoryListener")
- .setRequired(true));
- }
-
- }
-}
+++ /dev/null
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Sam Hague
- */
-package org.opendaylight.ovsdb.compatibility.plugin.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.sal.core.ConstructionException;
-import org.opendaylight.controller.sal.core.Node;
-import org.opendaylight.controller.sal.core.NodeConnector;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-
-public class ConnectionServiceImplTest {
- private static final String OVS = "OVS";
- private static final String IDENTIFIER = "192.168.120.31:45001";
- private static final String OVS_IDENTIFIER = OVS + "|" + IDENTIFIER;
- private static final String BAD_IDENTIFIER = "BAD" + "|" + IDENTIFIER;
- private static org.opendaylight.ovsdb.plugin.impl.ConnectionServiceImpl pluginConnectionService;
- private static ConnectionServiceImpl connectionService;
-
- @BeforeClass
- public static void setUp () {
- Node.NodeIDType.registerIDType(OVS, String.class);
- NodeConnector.NodeConnectorIDType.registerIDType(OVS, String.class, OVS);
- pluginConnectionService = new org.opendaylight.ovsdb.plugin.impl.ConnectionServiceImpl();
- Connection connection = new Connection(IDENTIFIER, null);
- pluginConnectionService.putOvsdbConnection(IDENTIFIER, connection);
-
- connectionService = new ConnectionServiceImpl();
- connectionService.setOvsdbConnectionService(pluginConnectionService);
- }
-
- @Test
- public void testGetNode () {
- Node node = connectionService.getNode(IDENTIFIER);
- assertNotNull("Node " + IDENTIFIER + " is null", node);
-
- node = connectionService.getNode(OVS_IDENTIFIER);
- assertNotNull("Node " + OVS_IDENTIFIER + " is null", node);
-
- try {
- node = connectionService.getNode(BAD_IDENTIFIER);
- fail("Expected a NullPointerException to be thrown");
- } catch (NullPointerException e) {
- assertSame(NullPointerException.class, e.getClass());
- }
- }
-
- @Test
- public void testGetConnection () {
- Node node = connectionService.getNode(IDENTIFIER);
- assertNotNull("Node " + IDENTIFIER + " is null", node);
-
- Connection connection = connectionService.getConnection(node);
- assertNotNull("Connection " + IDENTIFIER + " is null", connection);
-
- try {
- connection = connectionService.getConnection(null);
- fail("Expected a NullPointerException to be thrown");
- } catch (NullPointerException e) {
- assertSame(NullPointerException.class, e.getClass());
- }
-
- try {
- node = new Node("OVS", BAD_IDENTIFIER);
- } catch (ConstructionException e) {
- fail("Exception should not have occurred" + e);
- }
- connection = connectionService.getConnection(node);
- assertNull("Connection " + BAD_IDENTIFIER + " is not null", connection);
- }
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (C) 2014 Red Hat, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>commons</artifactId>
- <version>1.4.0-SNAPSHOT</version>
- <relativePath>../commons/parent</relativePath>
- </parent>
-
- <artifactId>plugin-mdsal-adapter</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.dependencymanager</artifactId>
- </dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </dependency>
- <!-- Yang Models -->
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>concepts</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-inet-types</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.openflowplugin.model</groupId>
- <artifactId>model-flow-service</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>yang-ext</artifactId>
- </dependency>
- <!-- Controller Dependencies -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-common-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>schema.openvswitch</artifactId>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.4.0</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Import-Package>
- org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.*,
- org.opendaylight.controller.sal.binding.api,
- org.opendaylight.ovsdb.plugin.api,
- org.apache.felix.dm,
- org.slf4j,
- org.eclipse.osgi.framework.console,
- org.osgi.framework,
- javax.net.ssl,
- *
- </Import-Package>
- <Embed-Transitive>true</Embed-Transitive>
- <Bundle-Activator>
- org.opendaylight.ovsdb.plugin.md.Activator
- </Bundle-Activator>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.codehaus.mojo</groupId>
- <artifactId>build-helper-maven-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
-</project>
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.core.ComponentActivatorAbstractBase;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-
-import org.apache.felix.dm.Component;
-
-/**
- * OSGi Bundle Activator for the Neutron providers
- */
-public class Activator extends ComponentActivatorAbstractBase {
- /**
- * Function called when the activator starts just after some
- * initializations are done by the
- * ComponentActivatorAbstractBase.
- */
- @Override
- public void init() {
- }
-
- /**
- * Function called when the activator stops just before the
- * cleanup done by ComponentActivatorAbstractBase.
- *
- */
- @Override
- public void destroy() {
- }
-
- /**
- * Function that is used to communicate to dependency manager the
- * list of known implementations for services inside a container.
- *
- * @return An array containing all the CLASS objects that will be
- * instantiated in order to get an fully working implementation
- * Object
- */
- @Override
- public Object[] getImplementations() {
- Object[] res = {OvsdbBindingAwareProviderImpl.class,
- OvsdbInventoryManager.class };
- return res;
- }
-
- /**
- * Function that is called when configuration of the dependencies
- * is required.
- *
- * @param c dependency manager Component object, used for
- * configuring the dependencies exported and imported
- * @param imp Implementation class that is being configured,
- * needed as long as the same routine can configure multiple
- * implementations
- * @param containerName The containerName being configured, this allow
- * also optional per-container different behavior if needed, usually
- * should not be the case though.
- */
- @Override
- public void configureInstance(Component c, Object imp,
- String containerName) {
-
- if (imp.equals(OvsdbBindingAwareProviderImpl.class)) {
- c.setInterface(OvsdbBindingAwareProvider.class.getName(), null);
- c.add(createServiceDependency()
- .setService(BindingAwareBroker.class)
- .setRequired(true));
- }
-
- if (imp.equals(OvsdbInventoryManager.class)) {
- c.setInterface(OvsdbInventoryListener.class.getName(), null);
- c.add(createServiceDependency()
- .setService(OvsdbBindingAwareProvider.class)
- .setRequired(true));
- c.add(createServiceDependency()
- .setService(OvsdbConfigurationService.class)
- .setRequired(true));
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.sal.binding.api.NotificationService;
-
-/**
- * Created by dave on 01/08/2014.
- */
-public interface OvsdbBindingAwareProvider {
- public DataBroker getDataBroker();
- public NotificationService getNotificationService();
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.NotificationService;
-
-import org.apache.felix.dm.Component;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class OvsdbBindingAwareProviderImpl extends AbstractBindingAwareProvider implements OvsdbBindingAwareProvider {
-
- private DataBroker dataBroker;
- private NotificationProviderService notificationService;
-
- static final Logger logger = LoggerFactory.getLogger(OvsdbBindingAwareProvider.class);
-
- private BundleContext bc;
- private volatile BindingAwareBroker broker;
-
- void init(Component c) {
- this.bc = c.getDependencyManager().getBundleContext();
- broker.registerProvider(this, this.bc);
- logger.info("OVSDB MD-SAL Inventory Adapter Registered With the MD-SAL");
- }
-
- void destroy() {
- this.dataBroker = null;
- this.notificationService = null;
- }
-
- @Override
- public void onSessionInitiated(BindingAwareBroker.ProviderContext providerContext) {
- this.dataBroker = providerContext.getSALService(DataBroker.class);
- this.notificationService = providerContext.getSALService(NotificationProviderService.class);
- }
-
- @Override
- public DataBroker getDataBroker() {
- return this.dataBroker;
- }
-
- @Override
- public NotificationService getNotificationService() {
- return this.notificationService;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.utils.HexEncode;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.PortNumber;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.OvsdbCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.OvsdbCapableNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.OvsdbManagedNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.OvsdbManagedNodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.nodes.node.OvsdbBridge;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.ovsdb.node.inventory.rev140731.nodes.node.OvsdbBridgeBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-
-/**
- * Handle OVSDB Inventory Updates and create the necessary entries in the MD-SAL config datastore
- */
-public class OvsdbInventoryManager implements OvsdbInventoryListener {
-
- // Dependencies injected by OSGi
- private volatile OvsdbBindingAwareProvider provider;
- private volatile OvsdbConfigurationService ovsdbConfigurationService;
-
- static final String OVS_NODE_PREFIX = "openvswitch:";
- static final String OPENFLOW_NODE_PREFIX = "openflow:";
-
- static final Logger LOGGER = LoggerFactory.getLogger(OvsdbInventoryManager.class);
-
-
- /**
- * Called by the framework when the bundle is started
- */
- public void start() {
- //ToDo: Add existing nodes from inventory
- //This case is required for surviving controller reboot
- }
-
- /**
- * When an AD-SAL node is added by the OVSDB Inventory Service, Add an MD-SAL node
- *
- * @param node The AD-SAL node
- * @param address The {@link java.net.InetAddress} of the Node
- * @param port The ephemeral port number used by this connection
- */
- @Override
- public synchronized void nodeAdded(org.opendaylight.controller.sal.core.Node node,
- InetAddress address,
- int port) {
- DataBroker dataBroker = provider.getDataBroker();
- Preconditions.checkNotNull(dataBroker);
-
- NodeId nodeId = new NodeId(OVS_NODE_PREFIX + node.getNodeIDString());
- NodeKey nodeKey = new NodeKey(nodeId);
-
- OvsdbCapableNode ovsdbNode = new OvsdbCapableNodeBuilder()
- .setIpAddress(Utils.convertIpAddress(address))
- .setPort(new PortNumber(port))
- .setManagedNodes(new ArrayList<NodeId>())
- .build();
-
- Node newNode = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .addAugmentation(OvsdbCapableNode.class, ovsdbNode)
- .build();
-
- InstanceIdentifier<Node> path = InstanceIdentifier.builder(Nodes.class)
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, nodeKey)
- .toInstance();
-
- WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
- tx.put(LogicalDatastoreType.CONFIGURATION, path, newNode, true);
- try {
- tx.submit().get();
- LOGGER.debug("Removed Node {}", path.toString());
- } catch (InterruptedException | ExecutionException e) {
- LOGGER.error(e.getMessage(), e);
- }
- }
-
- /**
- * When an AD-SAL node is removed by the OVSDB Inventory Service, Remove the MD-SAL node
- *
- * @param node The AD-SAL node
- */
- @Override
- public synchronized void nodeRemoved(org.opendaylight.controller.sal.core.Node node) {
- DataBroker dataBroker = provider.getDataBroker();
- Preconditions.checkNotNull(dataBroker);
-
- NodeId nodeId = new NodeId(new NodeId(OVS_NODE_PREFIX + node.getNodeIDString()));
- NodeKey nodeKey = new NodeKey(nodeId);
-
- InstanceIdentifier<Node> path = InstanceIdentifier.builder(Nodes.class)
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, nodeKey)
- .toInstance();
-
- WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
- tx.delete(LogicalDatastoreType.CONFIGURATION, path);
- try {
- tx.submit().get();
- LOGGER.debug("Removed Node {}", path.toString());
- } catch (InterruptedException | ExecutionException e) {
- LOGGER.error(e.getMessage(), e);
- }
- }
-
- /**
- * Handle OVSDB row removed When a Bridge row is removed, the OpenFlow Node is deleted The parent OVSDB node is
- * updated and the OpenFlow node removed from it's managed-nodes list
- *
- * @param node The AD-SAL node
- * @param tableName The name of modified table
- * @param uuid The UUID of the deleted row
- * @param row The deleted Row
- */
- @Override
- public synchronized void rowRemoved(org.opendaylight.controller.sal.core.Node node,
- String tableName,
- String uuid,
- Row row,
- Object context) {
- if (tableName.equalsIgnoreCase(ovsdbConfigurationService.getTableName(node, Bridge.class))) {
- LOGGER.debug("OVSDB Bridge Row removed on node {}", node.toString());
- DataBroker dataBroker = provider.getDataBroker();
- Preconditions.checkNotNull(dataBroker);
-
- Bridge bridge = ovsdbConfigurationService.getTypedRow(node, Bridge.class, row);
- Set<String> dpidString = bridge.getDatapathIdColumn().getData();
- Long dpid = HexEncode.stringToLong((String) dpidString.toArray()[0]);
-
- NodeId openflowNodeId = new NodeId(OPENFLOW_NODE_PREFIX + dpid.toString());
- NodeKey openflowNodeKey = new NodeKey(openflowNodeId);
-
- InstanceIdentifier<Node> openflowNodePath = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, openflowNodeKey)
- .toInstance();
-
- NodeId ovsdbNodeId = new NodeId(OVS_NODE_PREFIX + node.getNodeIDString());
- NodeKey ovsdbNodeKey = new NodeKey(ovsdbNodeId);
-
- InstanceIdentifier<OvsdbCapableNode> ovsdbNodePath = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, ovsdbNodeKey)
- .augmentation(OvsdbCapableNode.class)
- .toInstance();
-
- // Read the current OVSDB Node from the DataStore
- ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
- OvsdbCapableNode ovsdbNode;
- try {
- Optional<OvsdbCapableNode> data = tx.read(LogicalDatastoreType.CONFIGURATION, ovsdbNodePath).get();
- if (!data.isPresent()) {
- LOGGER.error("OVSDB node not updated. Parent node for {} does not exist", ovsdbNodePath.toString());
- return;
- }
- ovsdbNode = data.get();
- } catch (InterruptedException | ExecutionException e) {
- LOGGER.error("OVSDB node not updated. Parent node for {} does not exist", ovsdbNodePath.toString());
- return;
- }
-
- // Update the list of Nodes
- List<NodeId> managedNodesList = ovsdbNode.getManagedNodes();
- managedNodesList.remove(openflowNodeId);
-
- // Write changes to DataStore
- OvsdbCapableNode updatedNode = new OvsdbCapableNodeBuilder(ovsdbNode)
- .setManagedNodes(managedNodesList)
- .build();
- tx.delete(LogicalDatastoreType.CONFIGURATION, openflowNodePath);
- tx.put(LogicalDatastoreType.CONFIGURATION, ovsdbNodePath, updatedNode);
-
- try {
- tx.submit().get();
- LOGGER.debug("Transaction success for delete of {} and update of {}",
- openflowNodePath.toString(),
- ovsdbNodePath.toString());
- } catch (InterruptedException | ExecutionException e) {
- LOGGER.error(e.getMessage(), e);
- }
- }
- }
-
- /**
- * Handle OVSDB row updates When a Bridge row is updated and it contains a DPID then add a new OpenFlow node to the
- * inventory A relationship is created between the OpenFlow and OVSDB nodes
- *
- * @param node The AD-SAL node
- * @param tableName The name of the updated table
- * @param uuid The UUID of the updated row
- * @param old The old contents of the row
- * @param row The updated Row
- */
- @Override
- public synchronized void rowUpdated(org.opendaylight.controller.sal.core.Node node,
- String tableName,
- String uuid,
- Row old,
- Row row) {
- LOGGER.debug("OVSDB Bridge Row updated on node {}", node.toString());
- if (tableName.equalsIgnoreCase(ovsdbConfigurationService.getTableName(node, Bridge.class))) {
- DataBroker dataBroker = provider.getDataBroker();
- Bridge bridge = ovsdbConfigurationService.getTypedRow(node, Bridge.class, row);
-
- Set<String> dpidString = bridge.getDatapathIdColumn().getData();
- Long dpid;
- try {
- dpid = HexEncode.stringToLong((String) dpidString.toArray()[0]);
- } catch (ArrayIndexOutOfBoundsException e) {
- return;
- }
-
- NodeId openflowNodeId = new NodeId(OPENFLOW_NODE_PREFIX + dpid.toString());
- NodeKey openflowNodeKey = new NodeKey(openflowNodeId);
-
- InstanceIdentifier<OvsdbManagedNode> openflowNodepath = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, openflowNodeKey)
- .augmentation(OvsdbManagedNode.class)
- .toInstance();
-
- NodeId ovsdbNodeId = new NodeId(OVS_NODE_PREFIX + node.getNodeIDString());
- NodeKey ovsdbNodeKey = new NodeKey(ovsdbNodeId);
-
- InstanceIdentifier<OvsdbCapableNode> ovsdbNodePath = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class, ovsdbNodeKey)
- .augmentation(OvsdbCapableNode.class)
- .toInstance();
-
- // Create an OvsdbBridge object using the information from the update
- OvsdbBridge ovsdbBridge = new OvsdbBridgeBuilder()
- .setBridgeName(bridge.getName())
- .setBridgeUuid(uuid)
- .setManagedBy(ovsdbNodeId)
- .build();
-
- // Add the bridge to the OvsdbManagedNode
- OvsdbManagedNode ovsdbManagedNode = new OvsdbManagedNodeBuilder()
- .setOvsdbBridge(ovsdbBridge)
- .build();
-
- // Read the current OVSDB Node from the DataStore
- ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
- OvsdbCapableNode ovsdbNode;
- try {
- Optional<OvsdbCapableNode> data = tx.read(LogicalDatastoreType.CONFIGURATION, ovsdbNodePath).get();
- if (!data.isPresent()) {
- LOGGER.error("OVSDB node not updated. Parent node for {} does not exist", ovsdbNodePath.toString());
- return;
- }
- ovsdbNode = data.get();
- } catch (InterruptedException | ExecutionException e) {
- throw new RuntimeException("Node does not exist");
- }
-
- // Update the list of Nodes
- List<NodeId> managedNodesList = ovsdbNode.getManagedNodes();
- managedNodesList.add(openflowNodeId);
-
- // Create a delta object
- OvsdbCapableNode updatedNode = new OvsdbCapableNodeBuilder(ovsdbNode)
- .setManagedNodes(managedNodesList)
- .build();
-
- // Create parent if we get to this node before openflowplugin
- tx.put(LogicalDatastoreType.CONFIGURATION, openflowNodepath, ovsdbManagedNode, true);
- tx.put(LogicalDatastoreType.CONFIGURATION, ovsdbNodePath, updatedNode);
-
- try {
- tx.submit().get();
- LOGGER.debug("Transaction success for addition of {} and update of {}",
- openflowNodepath.toString(),
- ovsdbNodePath.toString());
- } catch (InterruptedException | ExecutionException e) {
- LOGGER.error(e.getMessage(), e);
- }
- }
- }
-
- @Override
- public synchronized void rowAdded(org.opendaylight.controller.sal.core.Node node,
- String tableName,
- String uuid,
- Row row) {
- // noop
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Address;
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv6Address;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.net.Inet4Address;
-import java.net.InetAddress;
-
-/**
- * Utilities to convert Java types to the types specified in the Yang models
- */
-public final class Utils {
-
- static final Logger logger = LoggerFactory.getLogger(Utils.class);
-
- /**
- * Returns a {@link org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress}
- * from a @{link java.net.InetAddress}
- */
- public static IpAddress convertIpAddress(InetAddress inetAddress){
-
- if (inetAddress instanceof Inet4Address){
- Ipv4Address ipv4Address = new Ipv4Address(inetAddress.getHostAddress());
- return new IpAddress(ipv4Address);
- }
- else {
- Ipv6Address ipv6Address = new Ipv6Address(inetAddress.getHostAddress());
- return new IpAddress(ipv6Address);
- }
- }
-}
+++ /dev/null
-module ovsdb-node-inventory {
- namespace "urn:opendaylight:ovsdb-node-inventory";
- prefix "ovsdbinv";
-
- import opendaylight-inventory {
- prefix inv;
- revision-date "2013-08-19";
- }
-
- import yang-ext {
- prefix ext;
- revision-date "2013-07-09";
- }
-
- import ietf-inet-types {
- prefix inet;
- revision-date "2010-09-24";
- }
-
- import flow-node-inventory {
- prefix flowcapable;
- revision-date "2013-08-19";
- }
-
- revision "2014-07-31" {
- description "Initial revision of the OVSDB Inventory model";
- }
-
- grouping ovsdb-bridge-attributes {
- leaf bridge-uuid {
- description "The unique identifier of the bridge";
- type string;
- }
-
- leaf bridge-name {
- description "The name of the bridge";
- type string;
- }
-
- leaf managed-by {
- description "The OVSDB which this bridge belongs to";
- type inv:node-id;
- }
- }
-
- grouping ovsdb-node-attributes {
- leaf ip-address {
- description "The IP Address of an OVSDB node";
- type inet:ip-address;
- }
-
- leaf port {
- description "The port that an OVSDB node is connected on";
- type inet:port-number;
- }
-
- leaf-list managed-nodes {
- type inv:node-id;
- }
- }
-
- augment /inv:nodes/inv:node {
- ext:augment-identifier "ovsdb-managed-node";
- // when "/inv:nodes/inv:node/flowcapable:manufacturer = '*'";
- container ovsdb-bridge {
- uses ovsdb-bridge-attributes;
- }
- }
-
- augment /inv:nodes/inv:node {
- ext:augment-identifier "ovsdb-capable-node";
- uses ovsdb-node-attributes;
- }
-
-}
+++ /dev/null
-package org.opendaylight.ovsdb.plugin.md;
-
-import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.net.Inet4Address;
-import java.net.Inet6Address;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
-public class UtilsTest {
-
- static final String IPV4_ADDRESS = "10.10.10.10";
- static final String IPV6_ADDRESS = "2001:db8:0:0:0:ff00:42:8329";
-
- @Test
- public void testConvertIpAddress() throws UnknownHostException {
-
- InetAddress addressV4 = Inet4Address.getByName(IPV4_ADDRESS);
- InetAddress addressV6 = Inet6Address.getByName(IPV6_ADDRESS);
-
- IpAddress ipAddresV4 = Utils.convertIpAddress(addressV4);
- IpAddress ipAddresV6 = Utils.convertIpAddress(addressV6);
-
- Assert.assertEquals(IPV4_ADDRESS, ipAddresV4.getIpv4Address().getValue());
- Assert.assertEquals(IPV6_ADDRESS, ipAddresV6.getIpv6Address().getValue());
- }
-}
\ No newline at end of file
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (C) 2014 Red Hat, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>commons</artifactId>
- <version>1.4.0-SNAPSHOT</version>
- <relativePath>../commons/parent</relativePath>
- </parent>
-
- <artifactId>plugin-shell</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- <licenses>
- <license>
- <name>Eclipse Public License v1.0</name>
- <url>http://www.eclipse.org/legal/epl-v10.html</url>
- </license>
- </licenses>
- <developers>
- <developer>
- <name>Sam Hague</name>
- <email>shague@gmail.com</email>
- <url>https://github.com/shague</url>
- </developer>
- </developers>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- </scm>
-
- <dependencies>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.karaf.shell</groupId>
- <artifactId>org.apache.karaf.shell.console</artifactId>
- <version>${karaf.version}</version>
- </dependency>
- <dependency>
- <groupId>com.fasterxml.jackson.core</groupId>
- <artifactId>jackson-databind</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>plugin</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.4.0</version>
- <extensions>true</extensions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
-</project>
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.shell;
-
-import org.apache.karaf.shell.commands.Argument;
-import org.apache.karaf.shell.commands.Command;
-import org.apache.karaf.shell.console.OsgiCommandSupport;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-
-@Command(scope = "ovs", name = "printCache", description="Prints OVSDB Cache")
-public class PrintCache extends OsgiCommandSupport{
- private OvsdbInventoryService ovsdbInventory;
-
- @Argument(index=0, name="nodeName", description="Node Name", required=true, multiValued=false)
- String nodeName = null;
-
- @Override
- protected Object doExecute() throws Exception {
- NodeId nodeId = new NodeId(nodeName);
- NodeKey nodeKey = new NodeKey(nodeId);
- Node node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
- ovsdbInventory.printCache(node);
- return null;
- }
-
- public void setOvsdbInventory(OvsdbInventoryService inventoryService){
- this.ovsdbInventory = inventoryService;
- }
-}
+++ /dev/null
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0">
- <reference id="ovsdbInventoryRef" interface="org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService"/>
- <command-bundle xmlns="http://karaf.apache.org/xmlns/shell/v1.1.0">
- <command>
- <action class="org.opendaylight.ovsdb.plugin.shell.PrintCache">
- <property name="ovsdbInventory" ref="ovsdbInventoryRef"/>
- </action>
- </command>
- </command-bundle>
-</blueprint>
+++ /dev/null
-/*
- * Copyright (C) 2014 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Sam Hague
- */
-package org.opendaylight.ovsdb.plugin.shell;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import java.io.ByteArrayOutputStream;
-import java.io.InputStream;
-import java.io.PrintStream;
-import java.lang.reflect.Field;
-import java.util.Set;
-import org.junit.Test;
-import org.opendaylight.ovsdb.lib.notation.Column;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.notation.Version;
-import org.opendaylight.ovsdb.lib.schema.ColumnSchema;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.plugin.api.OvsVswitchdSchemaConstants;
-import org.opendaylight.ovsdb.plugin.impl.InventoryServiceImpl;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-
-public class PrintCacheTest {
- private static final String NODESTRING = "OVS|10.10.10.10:65342";
- private static final String BRIDGES = "bridges";
- private static final String OVS = "OVS";
- private static final String BRIDGE = "bridge1";
- private static final String OPENVSWITCH = "Open_vSwitch";
- private static final String CACHE =
- "Database Open_vSwitch" + System.getProperty("line.separator") +
- "\tTable Open_vSwitch" + System.getProperty("line.separator") +
- "\t\t1==bridges : bridge1 " + System.getProperty("line.separator") +
- "-----------------------------------------------------------" + System.getProperty("line.separator");
-
- @Test
- public void testDoExecute () throws Exception {
- // Read in schema and create the DatabaseSchema
- InputStream resourceAsStream = PrintCacheTest.class.getResourceAsStream("test_schema.json");
- ObjectMapper mapper = new ObjectMapper();
- JsonNode jsonNode = mapper.readTree(resourceAsStream);
- DatabaseSchema schema = DatabaseSchema.fromJson("some", jsonNode.get("result"));
- assertNotNull(schema);
- assertEquals(Version.fromString("6.12.0"), schema.getVersion());
-
- // Add params to PrintCache
- PrintCache printCacheTest = new PrintCache();
- Field cNField = printCacheTest.getClass().getDeclaredField("nodeName");
- cNField.setAccessible(true);
- cNField.set(printCacheTest, NODESTRING);
- InventoryServiceImpl inventoryService = new InventoryServiceImpl();
- inventoryService.init();
- printCacheTest.setOvsdbInventory(inventoryService);
-
- // Test that an empty cache prints nothing
- // Capture the output from PrintCache and compare it to what is expected
- PrintStream originalBaos = System.out;
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- System.setOut(new PrintStream(baos));
- printCacheTest.doExecute();
- assertEquals("PrintCache output does not match expected output:", "", baos.toString());
-
- // Add some data to the bridges row in the Open_vSwitch table
- GenericTableSchema ovsTable = schema.table(OPENVSWITCH, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, Set<UUID>> bridges = ovsTable.multiValuedColumn(BRIDGES, UUID.class);
- Column column = new Column(bridges, new UUID(BRIDGE).toString());
- Row row = new Row(ovsTable);
- row.addColumn(BRIDGES, column);
-
- NodeId nodeId = new NodeId(NODESTRING);
- NodeKey nodeKey = new NodeKey(nodeId);
- Node node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
- inventoryService.updateRow(node, OvsVswitchdSchemaConstants.DATABASE_NAME, OPENVSWITCH, new UUID("1").toString(), row);
-
- // Test that a populated cache is printed correctly
- // Capture the output from PrintCache and compare it to what is expected
- printCacheTest.doExecute();
- System.setOut(originalBaos);
- assertEquals("PrintCache output does not match expected output:", CACHE, baos.toString());
- }
-}
+++ /dev/null
-{
- "id": 0,
- "result": {
- "tables": {
- "Port": {
- "columns": {
- "name": {
- "mutable": false,
- "type": "string"
- },
- "statistics": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "integer",
- "max": "unlimited"
- }
- },
- "vlan_mode": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "access",
- "native-tagged",
- "native-untagged",
- "trunk"
- ]
- ]
- },
- "min": 0
- }
- },
- "qos": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "QoS"
- },
- "min": 0
- }
- },
- "status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "trunks": {
- "type": {
- "key": {
- "maxInteger": 4095,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0,
- "max": 4096
- }
- },
- "mac": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "interfaces": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Interface"
- },
- "max": "unlimited"
- }
- },
- "bond_downdelay": {
- "type": "integer"
- },
- "bond_mode": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "active-backup",
- "balance-slb",
- "balance-tcp",
- "stable"
- ]
- ]
- },
- "min": 0
- }
- },
- "bond_updelay": {
- "type": "integer"
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "tag": {
- "type": {
- "key": {
- "maxInteger": 4095,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- },
- "bond_fake_iface": {
- "type": "boolean"
- },
- "fake_bridge": {
- "type": "boolean"
- },
- "lacp": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "active",
- "off",
- "passive"
- ]
- ]
- },
- "min": 0
- }
- }
- },
- "indexes": [
- [
- "name"
- ]
- ]
- },
- "Manager": {
- "columns": {
- "is_connected": {
- "ephemeral": true,
- "type": "boolean"
- },
- "target": {
- "type": "string"
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "max_backoff": {
- "type": {
- "key": {
- "minInteger": 1000,
- "type": "integer"
- },
- "min": 0
- }
- },
- "connection_mode": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "in-band",
- "out-of-band"
- ]
- ]
- },
- "min": 0
- }
- },
- "status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "inactivity_probe": {
- "type": {
- "key": "integer",
- "min": 0
- }
- }
- },
- "indexes": [
- [
- "target"
- ]
- ]
- },
- "Bridge": {
- "columns": {
- "name": {
- "mutable": false,
- "type": "string"
- },
- "flood_vlans": {
- "type": {
- "key": {
- "maxInteger": 4095,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0,
- "max": 4096
- }
- },
- "netflow": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "NetFlow"
- },
- "min": 0
- }
- },
- "mirrors": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Mirror"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "datapath_id": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "controller": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Controller"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "protocols": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "OpenFlow10",
- "OpenFlow12",
- "OpenFlow13"
- ]
- ]
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "fail_mode": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "secure",
- "standalone"
- ]
- ]
- },
- "min": 0
- }
- },
- "ports": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Port"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "flow_tables": {
- "type": {
- "key": {
- "maxInteger": 254,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0,
- "value": {
- "type": "uuid",
- "refTable": "Flow_Table"
- },
- "max": "unlimited"
- }
- },
- "sflow": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "sFlow"
- },
- "min": 0
- }
- },
- "datapath_type": {
- "type": "string"
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "stp_enable": {
- "type": "boolean"
- }
- },
- "indexes": [
- [
- "name"
- ]
- ]
- },
- "Interface": {
- "columns": {
- "options": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "name": {
- "mutable": false,
- "type": "string"
- },
- "statistics": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "integer",
- "max": "unlimited"
- }
- },
- "link_speed": {
- "ephemeral": true,
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "mtu": {
- "ephemeral": true,
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "mac_in_use": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "type": {
- "type": "string"
- },
- "ingress_policing_rate": {
- "type": {
- "key": {
- "minInteger": 0,
- "type": "integer"
- }
- }
- },
- "cfm_remote_opstate": {
- "ephemeral": true,
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "down",
- "up"
- ]
- ]
- },
- "min": 0
- }
- },
- "status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "mac": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "ofport": {
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "cfm_fault_status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "max": "unlimited"
- }
- },
- "duplex": {
- "ephemeral": true,
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "full",
- "half"
- ]
- ]
- },
- "min": 0
- }
- },
- "lacp_current": {
- "ephemeral": true,
- "type": {
- "key": "boolean",
- "min": 0
- }
- },
- "cfm_fault": {
- "ephemeral": true,
- "type": {
- "key": "boolean",
- "min": 0
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "admin_state": {
- "ephemeral": true,
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "down",
- "up"
- ]
- ]
- },
- "min": 0
- }
- },
- "link_state": {
- "ephemeral": true,
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "down",
- "up"
- ]
- ]
- },
- "min": 0
- }
- },
- "cfm_remote_mpids": {
- "ephemeral": true,
- "type": {
- "key": "integer",
- "min": 0,
- "max": "unlimited"
- }
- },
- "cfm_mpid": {
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "ofport_request": {
- "type": {
- "key": {
- "maxInteger": 65279,
- "minInteger": 1,
- "type": "integer"
- },
- "min": 0
- }
- },
- "ingress_policing_burst": {
- "type": {
- "key": {
- "minInteger": 0,
- "type": "integer"
- }
- }
- },
- "cfm_health": {
- "ephemeral": true,
- "type": {
- "key": {
- "maxInteger": 100,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- },
- "link_resets": {
- "ephemeral": true,
- "type": {
- "key": "integer",
- "min": 0
- }
- }
- },
- "indexes": [
- [
- "name"
- ]
- ]
- },
- "SSL": {
- "columns": {
- "ca_cert": {
- "type": "string"
- },
- "private_key": {
- "type": "string"
- },
- "bootstrap_ca_cert": {
- "type": "boolean"
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "certificate": {
- "type": "string"
- }
- },
- "maxRows": 1
- },
- "Open_vSwitch": {
- "columns": {
- "ovs_version": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "system_version": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "bridges": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Bridge"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "statistics": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "next_cfg": {
- "type": "integer"
- },
- "manager_options": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "Manager"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "system_type": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "ssl": {
- "type": {
- "key": {
- "type": "uuid",
- "refTable": "SSL"
- },
- "min": 0
- }
- },
- "db_version": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "cur_cfg": {
- "type": "integer"
- }
- },
- "maxRows": 1,
- "isRoot": true
- },
- "Queue": {
- "columns": {
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "dscp": {
- "type": {
- "key": {
- "maxInteger": 63,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- }
- },
- "isRoot": true
- },
- "NetFlow": {
- "columns": {
- "engine_type": {
- "type": {
- "key": {
- "maxInteger": 255,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- },
- "targets": {
- "type": {
- "key": "string",
- "max": "unlimited"
- }
- },
- "add_id_to_interface": {
- "type": "boolean"
- },
- "active_timeout": {
- "type": {
- "key": {
- "minInteger": -1,
- "type": "integer"
- }
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "engine_id": {
- "type": {
- "key": {
- "maxInteger": 255,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- }
- }
- },
- "Mirror": {
- "columns": {
- "name": {
- "type": "string"
- },
- "output_port": {
- "type": {
- "key": {
- "refType": "weak",
- "type": "uuid",
- "refTable": "Port"
- },
- "min": 0
- }
- },
- "output_vlan": {
- "type": {
- "key": {
- "maxInteger": 4095,
- "minInteger": 1,
- "type": "integer"
- },
- "min": 0
- }
- },
- "statistics": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "integer",
- "max": "unlimited"
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "select_dst_port": {
- "type": {
- "key": {
- "refType": "weak",
- "type": "uuid",
- "refTable": "Port"
- },
- "min": 0,
- "max": "unlimited"
- }
- },
- "select_all": {
- "type": "boolean"
- },
- "select_vlan": {
- "type": {
- "key": {
- "maxInteger": 4095,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0,
- "max": 4096
- }
- },
- "select_src_port": {
- "type": {
- "key": {
- "refType": "weak",
- "type": "uuid",
- "refTable": "Port"
- },
- "min": 0,
- "max": "unlimited"
- }
- }
- }
- },
- "QoS": {
- "columns": {
- "queues": {
- "type": {
- "key": {
- "maxInteger": 4294967295,
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0,
- "value": {
- "type": "uuid",
- "refTable": "Queue"
- },
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "type": {
- "type": "string"
- }
- },
- "isRoot": true
- },
- "Controller": {
- "columns": {
- "is_connected": {
- "ephemeral": true,
- "type": "boolean"
- },
- "enable_async_messages": {
- "type": {
- "key": "boolean",
- "min": 0
- }
- },
- "controller_rate_limit": {
- "type": {
- "key": {
- "minInteger": 100,
- "type": "integer"
- },
- "min": 0
- }
- },
- "target": {
- "type": "string"
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "other_config": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "local_netmask": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "local_gateway": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "max_backoff": {
- "type": {
- "key": {
- "minInteger": 1000,
- "type": "integer"
- },
- "min": 0
- }
- },
- "local_ip": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "connection_mode": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "in-band",
- "out-of-band"
- ]
- ]
- },
- "min": 0
- }
- },
- "status": {
- "ephemeral": true,
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "role": {
- "ephemeral": true,
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "master",
- "other",
- "slave"
- ]
- ]
- },
- "min": 0
- }
- },
- "inactivity_probe": {
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "controller_burst_limit": {
- "type": {
- "key": {
- "minInteger": 25,
- "type": "integer"
- },
- "min": 0
- }
- }
- }
- },
- "Flow_Table": {
- "columns": {
- "groups": {
- "type": {
- "key": "string",
- "min": 0,
- "max": "unlimited"
- }
- },
- "name": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "overflow_policy": {
- "type": {
- "key": {
- "type": "string",
- "enum": [
- "set",
- [
- "evict",
- "refuse"
- ]
- ]
- },
- "min": 0
- }
- },
- "flow_limit": {
- "type": {
- "key": {
- "minInteger": 0,
- "type": "integer"
- },
- "min": 0
- }
- }
- }
- },
- "sFlow": {
- "columns": {
- "polling": {
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "targets": {
- "type": {
- "key": "string",
- "max": "unlimited"
- }
- },
- "header": {
- "type": {
- "key": "integer",
- "min": 0
- }
- },
- "agent": {
- "type": {
- "key": "string",
- "min": 0
- }
- },
- "external_ids": {
- "type": {
- "key": "string",
- "min": 0,
- "value": "string",
- "max": "unlimited"
- }
- },
- "sampling": {
- "type": {
- "key": "integer",
- "min": 0
- }
- }
- }
- }
- },
- "cksum": "2180939265 17455",
- "name": "Open_vSwitch",
- "version": "6.12.0"
- },
- "error": null
-}
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (C) 2014 Red Hat, Inc. and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>commons</artifactId>
- <version>1.4.0-SNAPSHOT</version>
- <relativePath>../commons/parent</relativePath>
- </parent>
-
- <artifactId>plugin</artifactId>
- <version>1.2.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- <licenses>
- <license>
- <name>Eclipse Public License v1.0</name>
- <url>http://www.eclipse.org/legal/epl-v10.html</url>
- </license>
- </licenses>
- <developers>
- <developer>
- <name>Sam Hague</name>
- <email>shague@gmail.com</email>
- <url>https://github.com/shague</url>
- </developer>
- </developers>
- <scm>
- <connection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</connection>
- <developerConnection>scm:git:ssh://git.opendaylight.org:29418/ovsdb.git</developerConnection>
- <tag>HEAD</tag>
- <url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
- </scm>
-
- <dependencies>
- <dependency>
- <groupId>com.fasterxml.jackson.core</groupId>
- <artifactId>jackson-databind</artifactId>
- </dependency>
- <dependency>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-collections</groupId>
- <artifactId>commons-collections</artifactId>
- </dependency>
- <dependency>
- <groupId>equinoxSDK381</groupId>
- <artifactId>org.eclipse.osgi</artifactId>
- </dependency>
- <dependency>
- <groupId>io.netty</groupId>
- <artifactId>netty-all</artifactId>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.felix</groupId>
- <artifactId>org.apache.felix.dependencymanager</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>library</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>schema.openvswitch</artifactId>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.ovsdb</groupId>
- <artifactId>utils.config</artifactId>
- <version>${ovsdb.utils.config.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller.model</groupId>
- <artifactId>model-inventory</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-api</artifactId>
- </dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-simple</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <testResources>
- <testResource>
- <filtering>true</filtering>
- <directory>src/test/resources</directory>
- </testResource>
- </testResources>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.4.0</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Embed-Dependency>commons-codec,javax.servlet-api,portlet-api,commons-collections,utils.config;type=!pom;inline=false</Embed-Dependency>
- <Embed-Transitive>true</Embed-Transitive>
- <Bundle-Activator>org.opendaylight.ovsdb.plugin.internal.Activator</Bundle-Activator>
- <Private-Package>
- org.opendaylight.ovsdb.plugin.impl,
- org.opendaylight.ovsdb.plugin.internal
- </Private-Package>
- <Export-Package>
- org.opendaylight.ovsdb.plugin,
- org.opendaylight.ovsdb.plugin.api,
- org.opendaylight.ovsdb.plugin.error
- </Export-Package>
- </instructions>
- <manifestLocation>${project.basedir}/META-INF</manifestLocation>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-checkstyle-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-failsafe-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.jacoco</groupId>
- <artifactId>jacoco-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-</project>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-
-<configuration>
-
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
- <!-- encoders are assigned the type
- ch.qos.logback.classic.encoder.PatternLayoutEncoder by default -->
- <encoder>
- <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{5} - %msg%n</pattern>
- </encoder>
- </appender>
-
- <logger name="org.opendaylight.ovsdb" level="INFO" additivity="false">
- <appender-ref ref="STDOUT" />
- </logger>
-
- <!--<logger name="com.lordofthejars.foo" level="INFO" additivity="false">-->
- <!--<appender-ref ref="STDOUT" />-->
- <!--</logger>-->
-
- <root level="INFO">
- <appender-ref ref="STDOUT" />
- </root>
-
-</configuration>
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-
-public class Connection {
- private Node node;
- private String identifier;
- private OvsdbClient client;
-
- public Long getIdCounter() {
- return idCounter;
- }
-
- public void setIdCounter(Long idCounter) {
- this.idCounter = idCounter;
- }
-
- private Long idCounter;
-
- public Connection(String identifier, OvsdbClient client) {
- super();
-
- this.identifier = identifier;
- this.client = client;
- this.idCounter = 0L;
- NodeId nodeId = new NodeId("OVS" + "|" + identifier);
- NodeKey nodeKey = new NodeKey(nodeId);
- node = new NodeBuilder()
- .setId(nodeId)
- .setKey(nodeKey)
- .build();
- }
-
- public String getIdentifier() {
- return identifier;
- }
-
- public void setIdentifier(String identifier) {
- this.identifier = identifier;
- }
-
- public OvsdbClient getClient() {
- return this.client;
- }
-
- public void setClient(OvsdbClient client) {
- this.client = client;
- }
-
- public Node getNode() {
- return node;
- }
-
- public void setNode(Node node) {
- this.node = node;
- }
-
- public void disconnect() {
- client.disconnect();
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((identifier == null) ? 0 : identifier.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- Connection other = (Connection) obj;
- if (identifier == null) {
- if (other.identifier != null) {
- return false;
- }
- } else if (!identifier.equals(other.identifier)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-/**
- * ConnectionConstants
- * Expand this enum as and when needed to support other connection parameters that
- * might be needed for certain protocol plugins.
- */
-public enum ConnectionConstants {
- ADDRESS("address"),
- PORT("port"),
- PROTOCOL("protocol"),
- USERNAME("username"),
- PASSWORD("password");
-
- private ConnectionConstants(String name) {
- this.name = name;
- }
-
- private String name;
-
- public String toString() {
- return name;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import org.opendaylight.ovsdb.utils.config.ConfigProperties;
-
-import java.util.Map;
-
-import com.google.common.collect.Maps;
-
-public final class OvsVswitchdSchemaConstants {
- public static String DATABASE_NAME = "Open_vSwitch";
-
- private static final String OVSDB_AUTOCONFIGURECONTROLLER = "ovsdb.autoconfigurecontroller";
- private static final boolean defaultAutoConfigureController = true;
- private static boolean autoConfigureController = defaultAutoConfigureController;
-
- private static Map<String, String[]> columnToMutate = Maps.newHashMap();
- public static String[] getParentColumnToMutate(String childTabletoInsert) {
- return columnToMutate.get(childTabletoInsert);
- }
- private static void addParentColumnToMutate(String childTable, String parentTable, String columnName) {
- String[] parentColumn = {parentTable, columnName};
- columnToMutate.put(childTable, parentColumn);
- }
-
- static {
- addParentColumnToMutate("Bridge", "Open_vSwitch", "bridges");
- addParentColumnToMutate("Port", "Bridge", "ports");
- addParentColumnToMutate("Interface", "Port", "interfaces");
- addParentColumnToMutate("SSL", "Open_vSwitch", "ssl");
- addParentColumnToMutate("IPFIX", "Bridge", "ipfix");
- addParentColumnToMutate("sFlow", "Bridge", "sflow");
- addParentColumnToMutate("Flow_Table", "Bridge", "flow_tables");
- addParentColumnToMutate("QoS", "Port", "qos");
- addParentColumnToMutate("NetFlow", "Bridge", "netflow");
- addParentColumnToMutate("Mirror", "Bridge", "mirrors");
- addParentColumnToMutate("Manager", "Open_vSwitch", "manager_options");
- addParentColumnToMutate("Controller", "Bridge", "controller");
- // Keep the default value if the property is not set
- final String autoConfigureControllerStr =
- ConfigProperties.getProperty(OvsVswitchdSchemaConstants.class, OVSDB_AUTOCONFIGURECONTROLLER);
- if (autoConfigureControllerStr != null) {
- autoConfigureController = Boolean.getBoolean(autoConfigureControllerStr);
- }
- }
-
- public static void setAutoConfigureController(boolean autoConfigure) {
- autoConfigureController = autoConfigure;
- }
-
- public static boolean shouldConfigureController (String databaseName, String tableName) {
- return autoConfigureController && databaseName.equals(DATABASE_NAME) && tableName.equals("Bridge");
- }
-
- public enum PortType {
- VLAN("vlan"),
- TUNNEL("Tunnel"),
- BONDING("Bonding"),
- PATCH("patch"),
- INTERNAL("internal");
-
- private PortType(String name) {
- this.name = name;
- }
-
- private String name;
-
- @Override
- public String toString() {
- return name;
- }
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-import org.opendaylight.ovsdb.plugin.error.OvsdbPluginException;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-public interface OvsdbConfigurationService {
-
- /**
- * @deprecated This version of insertRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #insertRow(Node, String, String, Row)} and
- * {@link #insertTree(Node, String, String, UUID, Row)}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is inserted
- * @param parentUuid UUID of the parent table to which this operation will result in attaching/mutating.
- * @param row Row of table Content to be inserted
- * @return UUID of the inserted Row
- */
- @Deprecated
- StatusWithUuid insertRow(Node node, String tableName, String parentUuid, Row<GenericTableSchema> row);
-
- /**
- * insert a Row in a Table of a specified Database Schema. This is a convenience method on top of
- * {@link insertRow(Node, String, String, String, UUID, String, Row) insertRow}
- * which assumes that OVSDB schema implementation that corresponds to the databaseName will provide
- * the necessary service to populate the Parent Table Name and Parent Column Name.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of the parent table to which this operation will result in attaching/mutating.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- UUID insertRow(Node node, String databaseName, String tableName, UUID parentRowUuid,
- Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * insert a Row in a Table of a specified Database Schema.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row)}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- UUID insertRow(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct.
- * This is a convenience method on top of {@link #insertTree(Node, String, String, String, UUID, String, Row)}
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, UUID parentRowUuid,
- Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of updateRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #updateRow(Node, String, String, UUID, Row, boolean) updateRow}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is Updated
- * @param rowUuid UUID of the parent row on which this operation might result in mutating.
- * @param rowUuid UUID of the row that is being updated
- * @param row Row of table Content to be Updated. Include just those columns that needs to be updated.
- */
- @Deprecated
- Status updateRow(Node node, String tableName, String parentUuid, String rowUuid, Row row);
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is updated
- * @param rowUuid UUID of the row being updated
- * @param row Row of table Content to be updated
- * @param overwrite true will overwrite/replace the existing row (matching the rowUuid) with the passed row object.
- * false will update the existing row (matching the rowUuid) using only the columns in the passed row object.
- * @throws OvsdbPluginException Any failure during the update operation will result in a specific exception.
- * @return Returns the entire Row after the update operation.
- */
- Row<GenericTableSchema> updateRow(Node node, String databaseName, String tableName, UUID rowUuid,
- Row<GenericTableSchema> row, boolean overwrite) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of deleteRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by {@link #deleteRow(Node, String, String, UUID)}
- *
- * @param node OVSDB Node
- * @param tableName Table on which the row is Updated
- * @param rowUuid UUID of the row that is being deleted
- */
- @Deprecated
- Status deleteRow(Node node, String tableName, String rowUuid);
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is Updated
- * @param rowUuid UUID of the row that is being deleted
- * @throws OvsdbPluginException Any failure during the delete operation will result in a specific exception.
- */
-
- void deleteRow(Node node, String databaseName, String tableName, UUID rowUuid) throws OvsdbPluginException;
-
- /**
- * update or mutate a Row in a Table of a specified Database Schema.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is Updated
- * @param parentTable Name of the Parent Table to which this operation will result in mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated.
- * @param rowUuid UUID of the row that is being deleted
- * @throws OvsdbPluginException Any failure during the delete operation will result in a specific exception.
- */
-
- void deleteRow(Node node, String databaseName, String tableName, String parentTable,
- UUID parentRowUuid, String parentColumn, UUID rowUuid) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of getRow is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by {@link #getRow(Node, String, String, UUID) getRow}
- *
- * @param node OVSDB Node
- * @param tableName Table Name
- * @param uuid UUID of the row being queried
- * @return a row with a list of Column data that corresponds to an unique Row-identifier called uuid in a given table.
- */
- @Deprecated
- Row getRow(Node node, String tableName, String uuid);
-
- /**
- * Returns a Row from a table for the specified uuid.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @param uuid UUID of the row being queried
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return a row with a list of Column data that corresponds to an unique Row-identifier called uuid in a given table.
- */
- Row<GenericTableSchema> getRow(Node node, String databaseName, String tableName, UUID uuid) throws OvsdbPluginException;
-
- /**
- * @deprecated This version of getRows is a short-term replacement for the older and now deprecated method of the same name.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #getRows(Node, String, String) getRows} and {@link #getRows(Node, String, String, String) getRows}
- *
- * @param node OVSDB Node
- * @param tableName Table Name
- * @return List of rows that makes the entire Table.
- */
- @Deprecated
- ConcurrentMap<String, Row> getRows(Node node, String tableName);
-
- /**
- * Returns all rows of a table.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return Map of rows to its UUID that makes the entire Table.
- */
- ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node, String databaseName, String tableName) throws OvsdbPluginException;
-
- /**
- * Returns all rows of a table filtered by query string.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table Name
- * @param fiqlQuery FIQL style String Query <a href="http://tools.ietf.org/html/draft-nottingham-atompub-fiql-00">draft-nottingham-atompub-fiql</a> to filter rows
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return Map of rows to its UUID that makes the entire Table.
- */
- ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node, String databaseName, String tableName, String fiqlQuery) throws OvsdbPluginException;
-
- /**
- * @deprecated Returns all the Tables in a given Ndoe.
- * This API assumes an Open_vSwitch database Schema.
- *
- * This API is replaced by
- * {@link #getTables(Node, String) getTables}
- * @param node OVSDB node
- * @return List of Table Names that make up Open_vSwitch schema.
- */
- @Deprecated
- List<String> getTables(Node node);
-
- /**
- * Returns all the Tables in a given Node.
- *
- * @param node OVSDB node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @throws OvsdbPluginException Any failure during the get operation will result in a specific exception.
- * @return List of Table Names that make up the schema represented by the databaseName
- */
- List<String> getTables(Node node, String databaseName) throws OvsdbPluginException;
-
- /**
- * setOFController is a convenience method used by existing applications to setup Openflow Controller on
- * a Open_vSwitch Bridge.
- * This API assumes an Open_vSwitch database Schema.
- *
- * @param node Node
- * @param bridgeUUID uuid of the Bridge for which the ip-address of Openflow Controller should be programmed.
- * @return Boolean representing success or failure of the operation.
- *
- * @throws InterruptedException
- * @throws ExecutionException
- */
- Boolean setOFController(Node node, String bridgeUUID) throws InterruptedException, ExecutionException;
-
- <T extends TypedBaseTable<?>> String getTableName(Node node, Class<T> typedClass);
- <T extends TypedBaseTable<?>> T getTypedRow(Node node, Class<T> typedClass, Row row);
- <T extends TypedBaseTable<?>> T createTypedRow(Node node, Class<T> typedClass);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import java.util.List;
-import java.util.Map;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-public interface OvsdbConnectionService {
- Connection getConnection(Node node);
- List<Node> getNodes();
- Node getNode(String identifier);
- Node connect(String identifier, Map<ConnectionConstants, String> params);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-import java.net.InetAddress;
-
-public interface OvsdbInventoryListener {
- void nodeAdded(Node node, InetAddress address, int port);
- void nodeRemoved(Node node);
- void rowAdded(Node node, String tableName, String uuid, Row row);
- void rowUpdated(Node node, String tableName, String uuid, Row old, Row row);
- void rowRemoved(Node node, String tableName, String uuid, Row row, Object context);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import java.net.InetAddress;
-import java.util.concurrent.ConcurrentMap;
-
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-public interface OvsdbInventoryService {
- ConcurrentMap<String, ConcurrentMap<String, Row>> getCache(Node n, String databaseName);
- ConcurrentMap<String, Row> getTableCache(Node n, String databaseName, String tableName);
- Row getRow(Node n, String databaseName, String tableName, String uuid);
- void updateRow(Node n, String databaseName, String tableName, String uuid, Row row);
- void removeRow(Node n, String databaseName, String tableName, String uuid);
- void processTableUpdates(Node n, String databaseName, TableUpdates tableUpdates);
- void printCache(Node n);
- void notifyNodeAdded(Node n, InetAddress address, int port);
- void removeNode(Node n);
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import java.io.Serializable;
-
-/**
- * Represents the return object of the osgi service interfaces function calls.
- * It contains a code {@code StatusCode} representing the result of the call and
- * a string which describes a failure reason (if any) in human readable form.
- */
-public class Status implements Serializable {
- private static final long serialVersionUID = 0L;
- private StatusCode code;
- private String description;
- private long requestId;
-
- /**
- * Generates an instance of the Status class. This is used as return code
- * for internal API2 function calls. This constructor allows to specify,
- * beside the Status Code, a custom human readable description to add more
- * information about the status.
- *
- * @param errorCode
- * The status code. If passed as null, code will be stored as
- * {@code StatusCode.UNDEFINED}
- * @param description
- * The human readable description of the status. If passed as
- * null, description will be inferred by the code
- */
- public Status(StatusCode errorCode, String description) {
- this.code = (errorCode != null) ? errorCode : StatusCode.UNDEFINED;
- this.description = (description != null) ? description : this.code
- .toString();
- this.requestId = 0;
- }
-
- /**
- * Generates an instance of the Status class based on the passed StatusCode
- * only. The description field of the Status object will be inferred by the
- * status code.
- *
- * @param errorCode
- * The status code. If passed as null, code will be stored as
- * {@code StatusCode.UNDEFINED}
- */
- public Status(StatusCode errorCode) {
- this.code = (errorCode != null) ? errorCode : StatusCode.UNDEFINED;
- this.description = (description != null) ? description : this.code
- .toString();
- this.requestId = 0;
- }
-
- /**
- * Generates an instance of the Status class to be used in case of
- * asynchronous call. It is supposed to be created by the underlying
- * infrastructure only when it was successful in allocating the asynchronous
- * request id, hence caller should expect StatusCode to be successful.
- *
- * @param errorCode
- * The status code. If passed as null, code will be stored as
- * {@code StatusCode.UNDEFINED}
- * @param requestId
- * The request id set by underlying infrastructure for this
- * request
- */
- public Status(StatusCode errorCode, long requestId) {
- this.code = (errorCode != null) ? errorCode : StatusCode.UNDEFINED;
- this.description = (description != null) ? description : this.code
- .toString();
- this.requestId = requestId;
- }
-
- /**
- * Returns the status code
- *
- * @return the {@code StatusCode} representing the status code
- */
- public StatusCode getCode() {
- return code;
- }
-
- /**
- * Returns a human readable description of the failure if any
- *
- * @return a string representing the reason of failure
- */
- public String getDescription() {
- return description;
- }
-
- /**
- * Tells whether the status is successful
- *
- * @return true if the Status code is {@code StatusCode.SUCCESS}
- */
- public boolean isSuccess() {
- return code == StatusCode.SUCCESS || code == StatusCode.CREATED;
- }
-
- /**
- * Return the request id assigned by underlying infrastructure in case of
- * asynchronous request. In case of synchronous requests, the returned id
- * is expected to be 0
- *
- * @return The request id assigned for this asynchronous request
- */
- public long getRequestId() {
- return requestId;
- }
-
- @Override
- public String toString() {
- return code + ": " + description + " (" + requestId + ")";
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((code == null) ? 0 : code.calculateConsistentHashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- Status other = (Status) obj;
- if (code != other.code) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-/**
- * The enum which describes the generic error conditions.
- * Each enum value is associated with a minimal description string.
- *
- */
-public enum StatusCode {
- SUCCESS("Success"),
- CREATED("Created"),
-
- BADREQUEST("Bad Request"),
- UNAUTHORIZED("UnAuthorized"),
- FORBIDDEN("Forbidden"),
- NOTFOUND("Not Found"),
- NOTALLOWED("Method Not Allowed"),
- NOTACCEPTABLE("Request Not Acceptable"),
- TIMEOUT("Request Timeout"),
- CONFLICT("Resource Conflict"),
- GONE("Resource Gone"),
- UNSUPPORTED("Unsupported"),
-
- INTERNALERROR("Internal Error"),
- NOTIMPLEMENTED("Not Implemented"),
- NOSERVICE("Service Not Available"),
-
- UNDEFINED("Undefined Error");
-
- private String description;
- private StatusCode(String description) {
- this.description = description;
- }
-
- /**
- * Prints the description associated to the code value
- */
- @Override
- public String toString() {
- return description;
- }
-
- public int calculateConsistentHashCode() {
- if (this.description != null) {
- return this.description.hashCode();
- } else {
- return 0;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.api;
-
-import org.opendaylight.ovsdb.plugin.api.Status;
-import org.opendaylight.ovsdb.plugin.api.StatusCode;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-
-/**
- * Extends the Status class to allow functions to return a uuid
- */
-public class StatusWithUuid extends Status {
- private static final long serialVersionUID = -5413085099514964003L;
- private UUID uuid;
-
- public StatusWithUuid(StatusCode errorCode) {
- super(errorCode);
- }
-
- public StatusWithUuid(StatusCode errorCode, String description) {
- super(errorCode, description);
- }
-
- public StatusWithUuid(StatusCode errorCode, long requestId) {
- super(errorCode, requestId);
- }
-
- public StatusWithUuid(StatusCode errorCode, UUID uuid) {
- super(errorCode);
- this.uuid = uuid;
- }
-
- public UUID getUuid() {
- return uuid;
- }
-
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.error;
-
-public class OvsdbPluginException extends RuntimeException {
- public OvsdbPluginException(String message){
- super(message);
- }
-
- public OvsdbPluginException(String message, Throwable cause){
- super(message, cause);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.impl;
-
-import static org.opendaylight.ovsdb.lib.operations.Operations.op;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import org.opendaylight.ovsdb.plugin.api.Status;
-import org.opendaylight.ovsdb.plugin.api.StatusCode;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.error.SchemaVersionMismatchException;
-import org.opendaylight.ovsdb.lib.notation.Column;
-import org.opendaylight.ovsdb.lib.notation.Mutator;
-import org.opendaylight.ovsdb.lib.notation.OvsdbSet;
-import org.opendaylight.ovsdb.lib.notation.ReferencedRow;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.lib.operations.Insert;
-import org.opendaylight.ovsdb.lib.operations.Operation;
-import org.opendaylight.ovsdb.lib.operations.OperationResult;
-import org.opendaylight.ovsdb.lib.operations.TransactionBuilder;
-import org.opendaylight.ovsdb.lib.schema.BaseType.UuidBaseType;
-import org.opendaylight.ovsdb.lib.schema.ColumnSchema;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.TableSchema;
-import org.opendaylight.ovsdb.lib.schema.typed.TypedBaseTable;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.OvsVswitchdSchemaConstants;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.plugin.api.StatusWithUuid;
-import org.opendaylight.ovsdb.plugin.error.OvsdbPluginException;
-import org.opendaylight.ovsdb.schema.openvswitch.Bridge;
-import org.opendaylight.ovsdb.schema.openvswitch.Controller;
-import org.opendaylight.ovsdb.schema.openvswitch.OpenVSwitch;
-import org.opendaylight.ovsdb.schema.openvswitch.Port;
-import org.opendaylight.ovsdb.utils.config.ConfigProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.ListenableFuture;
-
-public class ConfigurationServiceImpl implements OvsdbConfigurationService
-{
- private static final Logger LOG = LoggerFactory.getLogger(ConfigurationServiceImpl.class);
-
- OvsdbConnectionService connectionService;
- OvsdbInventoryService ovsdbInventoryService;
- protected static final String OPENFLOW_13 = "1.3";
-
- void init() {
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- *
- */
- void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- *
- */
- void start() {
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- *
- */
- void stop() {
- }
-
- public void setConnectionServiceInternal(OvsdbConnectionService connectionService) {
- this.connectionService = connectionService;
- }
-
- public void unsetConnectionServiceInternal(OvsdbConnectionService connectionService) {
- if (this.connectionService.equals(connectionService)) {
- this.connectionService = null;
- }
- }
-
- public void setOvsdbInventoryService(OvsdbInventoryService ovsdbInventoryService) {
- this.ovsdbInventoryService = ovsdbInventoryService;
- }
-
- public void unsetInventoryServiceInternal(OvsdbInventoryService ovsdbInventoryService) {
- if (this.ovsdbInventoryService.equals(ovsdbInventoryService)) {
- this.ovsdbInventoryService = null;
- }
- }
-
- private Connection getConnection (Node node) {
- Connection connection = connectionService.getConnection(node);
- if (connection == null || !connection.getClient().isActive()) {
- return null;
- }
-
- return connection;
- }
- /*
- * There are a few Open_vSwitch schema specific special case handling to be done for
- * the older API (such as by inserting a mandatory Interface row automatically upon inserting
- * a Port row.
- */
- private void handleSpecialInsertCase(OvsdbClient client, String databaseName,
- String tableName, Row<GenericTableSchema> row, TransactionBuilder transactionBuilder) {
- Port port = client.getTypedRowWrapper(Port.class, null);
- if (databaseName.equals(OvsVswitchdSchemaConstants.DATABASE_NAME) && tableName.equals(port.getSchema().getName())) {
- port = client.getTypedRowWrapper(Port.class, row);
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> tableSchema = dbSchema.table(tableName, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, Set<UUID>> columnSchema = tableSchema.multiValuedColumn("interfaces", UUID.class);
- String namedUuid = "Special_"+tableName;
- List<Operation> priorOperations = transactionBuilder.getOperations();
- Insert portOperation = (Insert)priorOperations.get(0);
- portOperation.value(columnSchema, new UUID(namedUuid));
-
- Column<GenericTableSchema, ?> nameColumn = port.getNameColumn();
- List<Column<GenericTableSchema, ?>> columns = new ArrayList<Column<GenericTableSchema, ?>>();
- columns.add(nameColumn);
- Row<GenericTableSchema> intfRow = new Row<GenericTableSchema>(tableSchema, columns);
- this.processTypedInsertTransaction(client, databaseName, "Interface", null, null, null, namedUuid, intfRow, transactionBuilder);
- }
- }
-
- /*
- * A common Transaction that takes in old API style Parent_uuid and inserts a mutation on
- * the parent table for the newly inserted Child.
- * Due to some additional special case(s), the Transaction is further amended by handleSpecialInsertCase
- */
- private void processTypedInsertTransaction(OvsdbClient client, String databaseName, String childTable,
- String parentTable, String parentUuid, String parentColumn, String namedUuid,
- Row<GenericTableSchema> row, TransactionBuilder transactionBuilder) {
- this.processInsertTransaction(client, databaseName, childTable, parentTable, new UUID(parentUuid), parentColumn,
- namedUuid, row, transactionBuilder);
- /*
- * There are a few Open_vSwitch schema specific special case handling to be done for
- * the older API (such as by inserting a mandatory Interface row automatically upon inserting
- * a Port row.
- */
- handleSpecialInsertCase(client, databaseName, childTable, row, transactionBuilder);
- }
-
- /*
- * TODO : Move all the Special Cases out of ConfigurationService and into the Schema specific bundles.
- * But that makes plugin more reliant on the Typed Bundles more than just API wrapper.
- * Keeping these Special Handling locally till we introduce the full schema independent APIs in the
- * plugin layer.
- */
- public String getSpecialCaseParentUUID(Node node, String databaseName, String childTableName) {
- if (!databaseName.equals(OvsVswitchdSchemaConstants.DATABASE_NAME)) {
- return null;
- }
- String[] parentColumn = OvsVswitchdSchemaConstants.getParentColumnToMutate(childTableName);
- if (parentColumn != null && parentColumn[0].equals(OvsVswitchdSchemaConstants.DATABASE_NAME)) {
- Connection connection = connectionService.getConnection(node);
- OpenVSwitch openVSwitch = connection.getClient().getTypedRowWrapper(OpenVSwitch.class, null);
- ConcurrentMap<String, Row> row = this.getRows(node, openVSwitch.getSchema().getName());
- if (row == null || row.size() == 0) {
- return null;
- }
- return (String)row.keySet().toArray()[0];
- }
- return null;
- }
-
- /*
- * Though this is a New API that takes in Row object, this still is considered a
- * Deprecated call because of the assumption with a Single Row insertion.
- * An ideal insertRow must be able to take in multiple Rows, which includes the
- * Row being inserted in one Table and other Rows that needs mutate in other Tables.
- */
- @Override
- @Deprecated
- public StatusWithUuid insertRow(Node node, String tableName, String parentUuid, Row<GenericTableSchema> row) {
- String[] parentColumn = OvsVswitchdSchemaConstants.getParentColumnToMutate(tableName);
- if (parentColumn == null) {
- parentColumn = new String[]{null, null};
- }
-
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
-
- String myParentUuid = parentUuid;
- if (myParentUuid == null) {
- myParentUuid = this.getSpecialCaseParentUUID(node, OvsVswitchdSchemaConstants.DATABASE_NAME, tableName);
- }
- LOG.debug("insertRow Connection : {} Table : {} ParentTable : {} Parent Column: {} Parent UUID : {} Row : {}",
- client.getConnectionInfo(), tableName, parentColumn[0], parentColumn[1], myParentUuid, row);
-
- DatabaseSchema dbSchema = client.getDatabaseSchema(OvsVswitchdSchemaConstants.DATABASE_NAME);
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema);
-
- String namedUuid = "Transaction_"+ tableName;
- this.processTypedInsertTransaction(client, OvsVswitchdSchemaConstants.DATABASE_NAME, tableName,
- parentColumn[0], myParentUuid, parentColumn[1], namedUuid,
- row, transactionBuilder);
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults;
- try {
- operationResults = results.get();
- if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) {
- return new StatusWithUuid(StatusCode.INTERNALERROR);
- }
- for (OperationResult result : operationResults) {
- if (result.getError() != null) {
- return new StatusWithUuid(StatusCode.BADREQUEST, result.getError());
- }
- }
- UUID uuid = operationResults.get(0).getUuid();
- return new StatusWithUuid(StatusCode.SUCCESS, uuid);
- } catch (InterruptedException | ExecutionException e) {
- // TODO Auto-generated catch block
- return new StatusWithUuid(StatusCode.INTERNALERROR, e.getLocalizedMessage());
- }
-
- }
-
- @Override
- @Deprecated
- public Status updateRow (Node node, String tableName, String parentUUID, String rowUUID, Row row) {
- String databaseName = OvsVswitchdSchemaConstants.DATABASE_NAME;
- this.updateRow(node, databaseName, tableName, new UUID(rowUUID), row, true);
- return new StatusWithUuid(StatusCode.SUCCESS);
- }
-
- private void processDeleteTransaction(OvsdbClient client, String databaseName, String childTable,
- String parentTable, String parentColumn, String uuid, TransactionBuilder transactionBuilder) {
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> childTableSchema = dbSchema.table(childTable, GenericTableSchema.class);
-
- if (parentColumn != null) {
- TableSchema<GenericTableSchema> parentTableSchema = dbSchema.table(parentTable, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, UUID> parentColumnSchema = parentTableSchema.column(parentColumn, UUID.class);
- transactionBuilder
- .add(op.mutate(parentTableSchema)
- .addMutation(parentColumnSchema, Mutator.DELETE, new UUID(uuid))
- .where(parentColumnSchema.opIncludes(new UUID(uuid)))
- .build());
- }
-
- ColumnSchema<GenericTableSchema, UUID> _uuid = childTableSchema.column("_uuid", UUID.class);
- transactionBuilder.add(op.delete(childTableSchema)
- .where(_uuid.opEqual(new UUID(uuid)))
- .build());
- }
-
- @Override
- @Deprecated
- public Status deleteRow(Node node, String tableName, String uuid) {
- String databaseName = OvsVswitchdSchemaConstants.DATABASE_NAME;
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
-
- String[] parentColumn = OvsVswitchdSchemaConstants.getParentColumnToMutate(tableName);
- if (parentColumn == null) {
- parentColumn = new String[]{null, null};
- }
-
- LOG.debug("deleteRow : Connection : {} databaseName : {} tableName : {} Uuid : {} ParentTable : {} ParentColumn : {}",
- client.getConnectionInfo(), databaseName, tableName, uuid, parentColumn[0], parentColumn[1]);
-
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema);
- this.processDeleteTransaction(client, OvsVswitchdSchemaConstants.DATABASE_NAME, tableName,
- parentColumn[0], parentColumn[1], uuid, transactionBuilder);
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults;
- try {
- operationResults = results.get();
- if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) {
- return new StatusWithUuid(StatusCode.INTERNALERROR);
- }
- for (OperationResult result : operationResults) {
- if (result.getError() != null) {
- return new StatusWithUuid(StatusCode.BADREQUEST, result.getError());
- }
- }
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Error in deleteRow() {} {}", node, tableName, e);
- }
-
- return new Status(StatusCode.SUCCESS);
- }
-
- @Override
- @Deprecated
- public ConcurrentMap<String, Row> getRows(Node node, String tableName) {
- return ovsdbInventoryService.getTableCache(node, OvsVswitchdSchemaConstants.DATABASE_NAME, tableName);
- }
-
- @Override
- @Deprecated
- public Row getRow(Node node, String tableName, String uuid) {
- Map<String, Row> ovsTable = ovsdbInventoryService.getTableCache(node, OvsVswitchdSchemaConstants.DATABASE_NAME, tableName);
- if (ovsTable == null) {
- return null;
- }
- return ovsTable.get(uuid);
- }
-
- @Override
- @Deprecated
- public List<String> getTables(Node node) {
- return this.getTables(node, OvsVswitchdSchemaConstants.DATABASE_NAME);
- }
-
- private InetAddress getControllerIPAddress(Connection connection) {
- InetAddress controllerIP = null;
-
- String addressString = ConfigProperties.getProperty(this.getClass(), "ovsdb.controller.address");
-
- if (addressString != null) {
- try {
- controllerIP = InetAddress.getByName(addressString);
- if (controllerIP != null) {
- return controllerIP;
- }
- } catch (UnknownHostException e) {
- LOG.error("Host {} is invalid", addressString);
- }
- }
-
- addressString = ConfigProperties.getProperty(this.getClass(), "of.address");
-
- if (addressString != null) {
- try {
- controllerIP = InetAddress.getByName(addressString);
- if (controllerIP != null) {
- return controllerIP;
- }
- } catch (UnknownHostException e) {
- LOG.error("Host {} is invalid", addressString);
- }
- }
-
- try {
- controllerIP = connection.getClient().getConnectionInfo().getLocalAddress();
- return controllerIP;
- } catch (Exception e) {
- LOG.debug("Invalid connection provided to getControllerIPAddresses", e);
- }
- return controllerIP;
- }
-
- private short getControllerOFPort() {
- short openFlowPort = (short) 6633;
- String portString = ConfigProperties.getProperty(this.getClass(), "of.listenPort");
- if (portString != null) {
- try {
- openFlowPort = Short.parseShort(portString);
- } catch (NumberFormatException e) {
- LOG.warn("Invalid port:{}, use default({})", portString,
- openFlowPort);
- }
- }
- return openFlowPort;
- }
-
- private UUID getCurrentControllerUuid(Node node, final String controllerTableName, final String target) {
- ConcurrentMap<String, Row> rows = this.getRows(node, controllerTableName);
-
- if (rows != null) {
- for (Map.Entry<String, Row> entry : rows.entrySet()) {
- Controller currController = this.getTypedRow(node, Controller.class, entry.getValue());
- Column<GenericTableSchema, String> column = currController.getTargetColumn();
- String currTarget = column.getData();
- if (currTarget != null && currTarget.equalsIgnoreCase(target)) {
- return currController.getUuid();
- }
- }
- }
- return null;
- }
-
- @Override
- public Boolean setOFController(Node node, String bridgeUUID) throws InterruptedException, ExecutionException {
- Connection connection = this.getConnection(node);
- if (connection == null) {
- return false;
- }
-
- Bridge bridge = connection.getClient().createTypedRowWrapper(Bridge.class);
-
- Status updateOperationStatus = null;
- try {
- OvsdbSet<String> protocols = new OvsdbSet<String>();
-
- String ofVersion = System.getProperty("ovsdb.of.version", OPENFLOW_13);
- switch (ofVersion) {
- case OPENFLOW_13:
- //fall through
- default:
- protocols.add("OpenFlow13");
- break;
- }
- bridge.setProtocols(protocols);
- updateOperationStatus = this.updateRow(node, bridge.getSchema().getName(),
- null, bridgeUUID, bridge.getRow());
- LOG.debug("Bridge {} updated to {} with Status {}", bridgeUUID,
- protocols.toArray()[0], updateOperationStatus);
-
- } catch (SchemaVersionMismatchException e){
- LOG.debug(e.toString());
- }
-
- // If we fail to update the protocols
- if (updateOperationStatus != null && !updateOperationStatus.isSuccess()) {
- return updateOperationStatus.isSuccess();
- }
-
- Status status;
- UUID currControllerUuid;
- InetAddress ofControllerAddr = this.getControllerIPAddress(connection);
- short ofControllerPort = getControllerOFPort();
- String newControllerTarget = "tcp:"+ofControllerAddr.getHostAddress()+":"+ofControllerPort;
- Controller newController = connection.getClient().createTypedRowWrapper(Controller.class);
- newController.setTarget(newControllerTarget);
- final String controllerTableName = newController.getSchema().getName();
-
- currControllerUuid = getCurrentControllerUuid(node, controllerTableName, newControllerTarget);
-
- if (currControllerUuid != null) {
- bridge = connection.getClient().createTypedRowWrapper(Bridge.class);
- bridge.setController(Sets.newHashSet(currControllerUuid));
- status = this.updateRow(node, bridge.getSchema().getName(), null, bridgeUUID, bridge.getRow());
- } else {
- status = this.insertRow(node, controllerTableName, bridgeUUID, newController.getRow());
- }
-
- return status != null && status.isSuccess();
-
- }
-
-
- public Boolean setBridgeOFController(Node node, String bridgeIdentifier) {
- if (connectionService == null) {
- LOG.error("Couldn't refer to the ConnectionService");
- return false;
- }
-
- try{
- Connection connection = connectionService.getConnection(node);
- Bridge bridge = connection.getClient().getTypedRowWrapper(Bridge.class, null);
-
- Map<String, Row> brTableCache = ovsdbInventoryService.getTableCache(node, OvsVswitchdSchemaConstants.DATABASE_NAME, bridge.getSchema().getName());
- for (String uuid : brTableCache.keySet()) {
- bridge = connection.getClient().getTypedRowWrapper(Bridge.class, brTableCache.get(uuid));
- if (bridge.getName().contains(bridgeIdentifier)) {
- return setOFController(node, uuid);
- }
- }
- } catch(Exception e) {
- LOG.error("Error in setBridgeOFController()", e);
- }
- return false;
- }
-
- @Override
- public <T extends TypedBaseTable<?>> String getTableName(Node node, Class<T> typedClass) {
- Connection connection = connectionService.getConnection(node);
- if (connection == null) {
- return null;
- }
- OvsdbClient client = connection.getClient();
- TypedBaseTable<?> typedTable = client.getTypedRowWrapper(typedClass, null);
- if (typedTable == null) {
- return null;
- }
- return typedTable.getSchema().getName();
- }
-
- @Override
- public <T extends TypedBaseTable<?>> T getTypedRow(Node node, Class<T> typedClass, Row row) {
- Connection connection = connectionService.getConnection(node);
- if (connection == null) {
- return null;
- }
- OvsdbClient client = connection.getClient();
- return (T)client.getTypedRowWrapper(typedClass, row);
- }
-
- @Override
- public <T extends TypedBaseTable<?>> T createTypedRow(Node node, Class<T> typedClass) {
- Connection connection = connectionService.getConnection(node);
- if (connection == null) {
- return null;
- }
- OvsdbClient client = connection.getClient();
- return client.createTypedRowWrapper(typedClass);
- }
-
- // SCHEMA-INDEPENDENT Configuration Service APIs
-
- private String getTableNameForRowUuid(Node node, String databaseName, UUID rowUuid) {
- ConcurrentMap<String, ConcurrentMap<String, Row>> cache = ovsdbInventoryService.getCache(node, databaseName);
- if (cache == null) {
- return null;
- }
- for (String tableName : cache.keySet()) {
- ConcurrentMap<String, Row> rows = cache.get(tableName);
- if (rows.get(rowUuid.toString()) != null) {
- return tableName;
- }
- }
- return null;
- }
-
- private String getReferencingColumn (TableSchema<?> parentTableSchema, String childTableName) throws OvsdbPluginException {
- Map<String, ColumnSchema> columnSchemas = parentTableSchema.getColumnSchemas();
- String refColumn = null;
- for (String columnName : columnSchemas.keySet()) {
- ColumnSchema columnSchema = columnSchemas.get(columnName);
- if (columnSchema.getType().getBaseType().getClass().equals(UuidBaseType.class)) {
- UuidBaseType refType = (UuidBaseType)columnSchema.getType().getBaseType();
- if (refType.getRefTable() != null && refType.getRefTable().equalsIgnoreCase(childTableName)) {
- if (refColumn == null) {
- refColumn = columnName;
- } else {
- throw new OvsdbPluginException("Multiple Referencing Columns for "+ childTableName +" on "+ parentTableSchema.getName());
- }
- }
- }
- }
- if (refColumn != null) {
- return refColumn;
- }
- throw new OvsdbPluginException("No Referencing Column for "+childTableName+" on "+parentTableSchema.getName());
- }
- /*
- * A common Insert Transaction convenience method that populates the TransactionBuilder with insert operation
- * for a Child Row and also mutates the parent row with the UUID of the inserted Child.
- */
- private void processInsertTransaction(OvsdbClient client, String databaseName, String childTable,
- String parentTable, UUID parentUuid, String parentColumn, String namedUuid,
- Row<GenericTableSchema> row,
- TransactionBuilder transactionBuilder) {
- // Insert the row as the first transaction entry
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> childTableSchema = dbSchema.table(childTable, GenericTableSchema.class);
- transactionBuilder.add(op.insert(childTableSchema, row)
- .withId(namedUuid));
-
- // Followed by the Mutation
- if (parentColumn != null) {
- TableSchema<GenericTableSchema> parentTableSchema = dbSchema.table(parentTable, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, UUID> parentColumnSchema = parentTableSchema.column(parentColumn, UUID.class);
- ColumnSchema<GenericTableSchema, UUID> _uuid = parentTableSchema.column("_uuid", UUID.class);
-
- transactionBuilder
- .add(op.mutate(parentTableSchema)
- .addMutation(parentColumnSchema, Mutator.INSERT, new UUID(namedUuid))
- .where(_uuid.opEqual(parentUuid))
- .build());
- }
- }
-
- /**
- * insert a Row in a Table of a specified Database Schema.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- @Override
- public UUID insertRow(Node node, String databaseName, String tableName, String parentTable, UUID parentUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException {
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> tableSchema = dbSchema.table(tableName, GenericTableSchema.class);
-
- Row<GenericTableSchema> processedRow = this.insertTree(node, databaseName, tableName, parentTable, parentUuid, parentColumn, row);
-
- ColumnSchema<GenericTableSchema, UUID> _uuid = tableSchema.column("_uuid", UUID.class);
- Column<GenericTableSchema, UUID> uuid = processedRow.getColumn(_uuid);
- return uuid.getData();
- }
-
- /**
- * insert a Row in a Table of a specified Database Schema. This is a convenience method on top of
- * {@link insertRow(Node, String, String, String, UUID, String, Row) insertRow}
- * which assumes that OVSDB schema implementation that corresponds to the databaseName will provide
- * the necessary service to populate the Parent Table Name and Parent Column Name.
- *
- * This method can insert just a single Row specified in the row parameter.
- * But {@link #insertTree(Node, String, String, UUID, Row) insertTree}
- * can insert a hierarchy of rows with parent-child relationship.
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of the parent table to which this operation will result in attaching/mutating.
- * @param row Row of table Content to be inserted
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return UUID of the inserted Row
- */
- @Override
- public UUID insertRow(Node node, String databaseName, String tableName,
- UUID parentRowUuid, Row<GenericTableSchema> row)
- throws OvsdbPluginException {
- return this.insertRow(node, databaseName, tableName, null, parentRowUuid, null, row);
- }
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentTable Name of the Parent Table to which this operation will result in attaching/mutating.
- * @param parentUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param parentColumn Name of the Column in the Parent Table to be mutated with the UUID that results from the insert operation.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- @Override
- public Row<GenericTableSchema> insertTree(Node node, String databaseName, String tableName, String parentTable, UUID parentUuid,
- String parentColumn, Row<GenericTableSchema> row) throws OvsdbPluginException {
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
-
- if (databaseName == null || tableName == null) {
- throw new OvsdbPluginException("databaseName, tableName and parentUuid are Mandatory Parameters");
- }
-
- if (parentTable == null && parentUuid != null) {
- parentTable = this.getTableNameForRowUuid(node, databaseName, parentUuid);
- }
-
- if (parentColumn == null && parentTable != null) {
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> parentTableSchema = dbSchema.table(parentTable, GenericTableSchema.class);
- parentColumn = this.getReferencingColumn(parentTableSchema, tableName);
- }
-
- LOG.debug("insertTree Connection : {} Table : {} ParentTable : {} Parent Column: {} Parent UUID : {} Row : {}",
- client.getConnectionInfo(), tableName, parentTable, parentColumn, parentUuid, row);
-
- Map<UUID, Map.Entry<String, Row<GenericTableSchema>>> referencedRows = Maps.newConcurrentMap();
- extractReferencedRows(node, databaseName, row, referencedRows, 0);
- DatabaseSchema dbSchema = client.getDatabaseSchema(OvsVswitchdSchemaConstants.DATABASE_NAME);
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema);
-
- String namedUuid = "Transaction_"+ tableName;
- this.processInsertTransaction(client, databaseName, tableName, parentTable, parentUuid,
- parentColumn, namedUuid, row, transactionBuilder);
-
- int referencedRowsInsertIndex = transactionBuilder.getOperations().size();
- // Insert Referenced Rows
- if (referencedRows != null) {
- for (UUID refUuid : referencedRows.keySet()) {
- Map.Entry<String, Row<GenericTableSchema>> referencedRow = referencedRows.get(refUuid);
- TableSchema<GenericTableSchema> refTableSchema = dbSchema.table(referencedRow.getKey(), GenericTableSchema.class);
- transactionBuilder.add(op.insert(refTableSchema, referencedRow.getValue())
- .withId(refUuid.toString()));
- }
- }
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults;
- try {
- operationResults = results.get();
- if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) {
- throw new OvsdbPluginException("Insert Operation Failed");
- }
- for (OperationResult result : operationResults) {
- if (result.getError() != null) {
- throw new OvsdbPluginException("Insert Operation Failed with Error : " + result.getError());
- }
- }
- return getNormalizedRow(dbSchema, tableName, row, referencedRows, operationResults, referencedRowsInsertIndex);
- } catch (InterruptedException | ExecutionException e) {
- throw new OvsdbPluginException("Exception : "+e.getLocalizedMessage());
- }
- }
-
- /**
- * inserts a Tree of Rows in multiple Tables that has parent-child relationships referenced through the OVSDB schema's refTable construct.
- * This is a convenience method on top of {@link #insertTree(Node, String, String, String, UUID, String, Row) insertTree}
- *
- * @param node OVSDB Node
- * @param databaseName Database Name that represents the Schema supported by the node.
- * @param tableName Table on which the row is inserted
- * @param parentRowUuid UUID of a Row in parent table to which this operation will result in attaching/mutating.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @throws OvsdbPluginException Any failure during the insert transaction will result in a specific exception.
- * @return Returns the row tree with the UUID of every inserted Row populated in the _uuid column of every row in the tree
- */
- @Override
- public Row<GenericTableSchema> insertTree(Node node, String databaseName,
- String tableName, UUID parentRowUuid, Row<GenericTableSchema> row)
- throws OvsdbPluginException {
- return this.insertTree(node, databaseName, tableName, null, parentRowUuid, null, row);
- }
-
- /**
- * Convenience method that helps insertTree to extract Rows that are referenced directly from within a primary row
- * to be inserted. These referenced rows are *NOT* defined in the OVSDB specification. But, we felt that from a northbound
- * application standpoint, having such an option is useful and our implementation supports it for applications to make use of.
- * In short, whichever ColumnSchema is based on an UUID (refered by RefTable in schema), applications can directly insert an
- * entire row and this method will help navigate it through and identify such cases.
- * After identifying these Referenced Rows, it will modify the primary row with Named UUIDs and fill out the referencedRows
- * Map structure so that insertTree can insert all the Rows defined in this Tree of rows in a single transaction with automatic
- * Mutation on the parent rows.
- *
- * @param node OVSDB Node
- * @param dbName Database Name that represents the Schema supported by the node.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @param referencedRows Map of Named-UUID to the actual referenced row (with RefTable)
- * @param namedUuidSuffix Named UUID must be unique for every new Row insert within a given transaction.
- * This index will help to retain the uniqueness.
- */
- private void extractReferencedRows(Node node, String dbName, Row<GenericTableSchema> row,
- Map<UUID, Map.Entry<String, Row<GenericTableSchema>>> referencedRows,
- int namedUuidSuffix) {
- OvsdbClient client = connectionService.getConnection(node).getClient();
- Collection<Column<GenericTableSchema, ?>> columns = row.getColumns();
- for (Column column : columns) {
- if (column.getData() != null) {
- if (column.getData() instanceof ReferencedRow) {
- ReferencedRow refRowObject = (ReferencedRow)column.getData();
- UUID refUuid = new UUID("NamedUuid"+namedUuidSuffix++);
- column.setData(refUuid);
- try {
- DatabaseSchema dbSchema = client.getSchema(dbName).get();
- GenericTableSchema schema = dbSchema.table(refRowObject.getRefTable(), GenericTableSchema.class);
- Row<GenericTableSchema> refRow = schema.createRow((ObjectNode)refRowObject.getJsonNode());
- referencedRows.put(refUuid, new AbstractMap.SimpleEntry<>(refRowObject.getRefTable(), refRow));
- extractReferencedRows(node, dbName, refRow, referencedRows, namedUuidSuffix);
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Exception while extracting multi-level Row references " + e.getLocalizedMessage());
- }
- } else if (column.getData() instanceof OvsdbSet) {
- OvsdbSet<Object> setObject = (OvsdbSet<Object>)column.getData();
- OvsdbSet<Object> modifiedSet = new OvsdbSet<>();
- for (Object obj : setObject) {
- if (obj instanceof ReferencedRow) {
- ReferencedRow refRowObject = (ReferencedRow)obj;
- UUID refUuid = new UUID("NamedUuid"+namedUuidSuffix++);
- modifiedSet.add(refUuid);
- try {
- DatabaseSchema dbSchema = client.getSchema(dbName).get();
- GenericTableSchema schema = dbSchema.table(refRowObject.getRefTable(), GenericTableSchema.class);
- Row<GenericTableSchema> refRow = schema.createRow((ObjectNode)refRowObject.getJsonNode());
- referencedRows.put(refUuid, new AbstractMap.SimpleEntry<>(refRowObject.getRefTable(), refRow));
- extractReferencedRows(node, dbName, refRow, referencedRows, namedUuidSuffix);
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Exception while extracting multi-level Row references " + e.getLocalizedMessage());
- }
- } else {
- modifiedSet.add(obj);
- }
- }
- column.setData(modifiedSet);
- }
- }
- }
- }
-
- /**
- * getNormalizedRow normalizes the Row from a namedUuid Space as defined in extractReferencedRows to the actual Uuid as created
- * by the Ovsdb-server. In order to perform this normalization, it processes the operation results for a corresponding Transaction
- * where the referenced rows are inserted along with the Primary row. It changes the named-Uuid to the actual Uuid before returning
- * the Row to the application.
- *
- * @param dbSchema Database Schema supported by the node.
- * @param row Row Tree with parent-child relationships via column of type refTable.
- * @param tableName Table on which the row is inserted
- * @param referencedRows Map of Named-UUID to the actual referenced row (with RefTable)
- * @param operationResults Operation Results returned by ovsdb-server for the insertTree transaction
- * @param referencedRowsInsertIndex Starting index in OperationResults from which the ReferencedRow insert results begin.
- * @return
- */
- private Row<GenericTableSchema> getNormalizedRow(DatabaseSchema dbSchema, String tableName, Row<GenericTableSchema> row,
- Map<UUID, Map.Entry<String, Row<GenericTableSchema>>> referencedRows,
- List<OperationResult> operationResults, int referencedRowsInsertIndex) {
- UUID primaryRowUuid = operationResults.get(0).getUuid();
- TableSchema<GenericTableSchema> primaryRowTableSchema = dbSchema.table(tableName, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, UUID> uuid = primaryRowTableSchema.column("_uuid", UUID.class);
- if (uuid != null) {
- Column<GenericTableSchema, UUID> uuidColumn = new Column<>(uuid, primaryRowUuid);
- row.addColumn("_uuid", uuidColumn);
- }
-
- if (referencedRows != null) {
- Collection<Column<GenericTableSchema, ?>> columns = row.getColumns();
- Object[] rowKeys = referencedRows.keySet().toArray();
- for (int idx = 0; idx < rowKeys.length; idx++) {
- UUID refUuid = (UUID) rowKeys[idx];
- for (Column column : columns) {
- if (column.getData() != null) {
- if ((column.getData() instanceof UUID) && column.getData().equals(refUuid)) {
- column.setData(operationResults.get(referencedRowsInsertIndex + idx).getUuid());
- } else if ((column.getData() instanceof OvsdbSet) && ((OvsdbSet)column.getData()).contains(refUuid)) {
- OvsdbSet<UUID> refSet = (OvsdbSet<UUID>)column.getData();
- refSet.remove(refUuid);
- refSet.add(operationResults.get(referencedRowsInsertIndex + idx).getUuid());
- }
- }
- }
- }
- }
- return row;
- }
-
- @Override
- public Row<GenericTableSchema> updateRow(Node node, String databaseName,
- String tableName, UUID rowUuid, Row<GenericTableSchema> row,
- boolean overwrite) {
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
-
- LOG.debug("updateRow : Connection : {} databaseName : {} tableName : {} rowUUID : {} row : {}",
- client.getConnectionInfo(), databaseName, tableName, rowUuid, row.toString());
- try{
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema);
- TableSchema<GenericTableSchema> tableSchema = dbSchema.table(tableName, GenericTableSchema.class);
- ColumnSchema<GenericTableSchema, UUID> uuid = tableSchema.column("_uuid", UUID.class);
- transactionBuilder.add(op.update(tableSchema, row)
- .where(uuid.opEqual(rowUuid))
- .build());
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults = results.get();
- for (OperationResult result : operationResults) {
- if (result.getError() != null) {
- throw new OvsdbPluginException("Error updating row : " + result.getError() +
- " Details: " + result.getDetails());
- }
- }
- if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) {
- throw new OvsdbPluginException("Failed to update row. Please check OVS logs for more info.");
- }
-
- return this.getRow(node, databaseName, tableName, rowUuid);
- } catch(Exception e){
- throw new OvsdbPluginException("Error updating row due to an exception "+ e.getMessage());
- }
- }
-
- @Override
- public void deleteRow(Node node, String databaseName, String tableName, String parentTable, UUID parentRowUuid,
- String parentColumn, UUID rowUuid) {
- Connection connection = connectionService.getConnection(node);
- OvsdbClient client = connection.getClient();
-
- if (parentTable == null && parentRowUuid != null) {
- parentTable = this.getTableNameForRowUuid(node, databaseName, parentRowUuid);
- }
-
- String myParentColumn = parentColumn;
- if (myParentColumn == null && parentTable != null) {
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TableSchema<GenericTableSchema> parentTableSchema = dbSchema.table(parentTable, GenericTableSchema.class);
- myParentColumn = this.getReferencingColumn(parentTableSchema, tableName);
- }
-
- LOG.debug("deleteRow : Connection : {} databaseName : {} tableName : {} Uuid : {} ParentTable : {} ParentColumn : {}",
- client.getConnectionInfo(), databaseName, tableName, rowUuid, parentTable, myParentColumn);
-
- DatabaseSchema dbSchema = client.getDatabaseSchema(databaseName);
- TransactionBuilder transactionBuilder = client.transactBuilder(dbSchema);
- this.processDeleteTransaction(client, databaseName, tableName,
- parentTable, myParentColumn, rowUuid.toString(), transactionBuilder);
-
- ListenableFuture<List<OperationResult>> results = transactionBuilder.execute();
- List<OperationResult> operationResults;
- try {
- operationResults = results.get();
- if (operationResults.isEmpty() || (transactionBuilder.getOperations().size() != operationResults.size())) {
- throw new OvsdbPluginException("Delete Operation Failed");
- }
- for (OperationResult result : operationResults) {
- if (result.getError() != null) {
- throw new OvsdbPluginException("Delete Operation Failed with Error : " + result.getError());
- }
- }
- } catch (InterruptedException | ExecutionException e) {
- LOG.error("Error in deleteRow() {} {} {} {}", node, databaseName, tableName, parentTable, e);
- }
- }
-
- @Override
- public void deleteRow(Node node, String databaseName, String tableName, UUID rowUuid) {
- this.deleteRow(node, databaseName, tableName, null, null, null, rowUuid);
- }
-
- @Override
- public Row<GenericTableSchema> getRow(Node node, String databaseName,
- String tableName, UUID uuid) {
- ConcurrentMap<UUID, Row<GenericTableSchema>> rows = this.getRows(node, databaseName, tableName);
- if (rows != null) {
- return rows.get(uuid);
- }
- return null;
- }
-
- @Override
- public ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node,
- String databaseName, String tableName) throws OvsdbPluginException {
- ConcurrentMap<String, Row> ovsTable = ovsdbInventoryService.getTableCache(node, databaseName, tableName);
- if (ovsTable == null) {
- return null;
- }
- ConcurrentMap<UUID, Row<GenericTableSchema>> tableDB = Maps.newConcurrentMap();
- for (String uuidStr : ovsTable.keySet()) {
- tableDB.put(new UUID(uuidStr), ovsTable.get(uuidStr));
- }
- return tableDB;
- }
-
- @Override
- public ConcurrentMap<UUID, Row<GenericTableSchema>> getRows(Node node,
- String databaseName, String tableName, String fiqlQuery) {
- return this.getRows(node, databaseName, tableName);
- }
-
- @Override
- public List<String> getTables(Node node, String databaseName) {
- ConcurrentMap<String, ConcurrentMap<String, Row>> cache = ovsdbInventoryService.getCache(node, databaseName);
- if (cache == null) {
- return null;
- } else {
- return new ArrayList<>(cache.keySet());
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.impl;
-
-import io.netty.channel.ChannelHandler;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-
-import org.opendaylight.ovsdb.lib.MonitorCallBack;
-import org.opendaylight.ovsdb.lib.OvsdbClient;
-import org.opendaylight.ovsdb.lib.OvsdbConnection;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionInfo;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionListener;
-import org.opendaylight.ovsdb.lib.message.MonitorRequest;
-import org.opendaylight.ovsdb.lib.message.MonitorRequestBuilder;
-import org.opendaylight.ovsdb.lib.message.MonitorSelect;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.schema.DatabaseSchema;
-import org.opendaylight.ovsdb.lib.schema.GenericTableSchema;
-import org.opendaylight.ovsdb.lib.schema.TableSchema;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-import org.opendaylight.ovsdb.plugin.api.ConnectionConstants;
-import org.opendaylight.ovsdb.plugin.api.Status;
-import org.opendaylight.ovsdb.plugin.api.StatusCode;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.utils.config.ConfigProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-
-
-/**
- * Represents the openflow plugin component in charge of programming the flows
- * the flow programming and relay them to functional modules above SAL.
- */
-public class ConnectionServiceImpl implements OvsdbConnectionService,
- OvsdbConnectionListener {
- private static final Logger LOG = LoggerFactory.getLogger(ConnectionServiceImpl.class);
-
- // Properties that can be set in config.ini
- private static final Integer DEFAULT_OVSDB_PORT = 6640;
- private static final String OVSDB_LISTENPORT = "ovsdb.listenPort";
-
-
- public void putOvsdbConnection (String identifier, Connection connection) {
- ovsdbConnections.put(identifier, connection);
- }
-
- private ConcurrentMap<String, Connection> ovsdbConnections = new ConcurrentHashMap<String, Connection>();
- private List<ChannelHandler> handlers = null;
-
- private volatile OvsdbInventoryService ovsdbInventoryService;
- private volatile OvsdbConnection connectionLib;
-
- public void setOvsdbInventoryService(OvsdbInventoryService inventoryService) {
- this.ovsdbInventoryService = inventoryService;
- }
-
- public void setOvsdbConnection(OvsdbConnection ovsdbConnection) {
- this.connectionLib = ovsdbConnection;
- }
-
- public void init() {
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- */
- void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- */
- void start() {
- /* Start ovsdb server before getting connection clients */
- String portString = ConfigProperties.getProperty(OvsdbConnectionService.class, OVSDB_LISTENPORT);
- int ovsdbListenPort = DEFAULT_OVSDB_PORT;
- if (portString != null) {
- ovsdbListenPort = Integer.parseInt(portString);
- }
-
- if (!connectionLib.startOvsdbManager(ovsdbListenPort)) {
- LOG.warn("Start OVSDB manager call from ConnectionService was not necessary");
- }
-
- /* Then get connection clients */
- Collection<OvsdbClient> connections = connectionLib.getConnections();
- for (OvsdbClient client : connections) {
- LOG.info("CONNECT start connected clients client = {}", client);
- this.connected(client);
- }
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- */
- void stopping() {
- for (Connection connection : ovsdbConnections.values()) {
- connection.disconnect();
- }
- }
-
- public Status disconnect(Node node) {
- Connection connection = getConnection(node);
- if (connection != null) {
- ovsdbConnections.remove(normalizeId(node.getId().getValue()));
- connection.disconnect();
- ovsdbInventoryService.removeNode(node);
- return new Status(StatusCode.SUCCESS);
- } else {
- return new Status(StatusCode.NOTFOUND);
- }
- }
-
- public Node connect(String identifier, Map<ConnectionConstants, String> params) {
- InetAddress address;
- Integer port;
-
- try {
- address = InetAddress.getByName(params.get(ConnectionConstants.ADDRESS));
- } catch (Exception e) {
- LOG.error("Unable to resolve {}", params.get(ConnectionConstants.ADDRESS), e);
- return null;
- }
-
- try {
- port = Integer.parseInt(params.get(ConnectionConstants.PORT));
- if (port == 0) {
- port = DEFAULT_OVSDB_PORT;
- }
- } catch (Exception e) {
- port = DEFAULT_OVSDB_PORT;
- }
-
- try {
- OvsdbClient client = connectionLib.connect(address, port);
- return handleNewConnection(identifier, client);
- } catch (InterruptedException e) {
- LOG.error("Thread was interrupted during connect", e);
- } catch (ExecutionException e) {
- LOG.error("ExecutionException in handleNewConnection for identifier " + identifier, e);
- }
- return null;
- }
-
- public List<ChannelHandler> getHandlers() {
- return handlers;
- }
-
- public void setHandlers(List<ChannelHandler> handlers) {
- this.handlers = handlers;
- }
-
- private String normalizeId (String identifier) {
- String id = identifier;
-
- String[] pair = identifier.split("\\|");
- if (pair[0].equals("OVS")) {
- id = pair[1];
- }
-
- return id;
- }
-
- @Override
- public Connection getConnection(Node node) {
- return ovsdbConnections.get(normalizeId(node.getId().getValue()));
- }
-
- @Override
- public Node getNode (String identifier) {
- Connection connection = ovsdbConnections.get(normalizeId(identifier));
- if (connection != null) {
- return connection.getNode();
- } else {
- return null;
- }
- }
-
- @Override
- public List<Node> getNodes() {
- List<Node> nodes = new ArrayList<>();
- for (Connection connection : ovsdbConnections.values()) {
- nodes.add(connection.getNode());
- }
- return nodes;
- }
-
- private Node handleNewConnection(String identifier, OvsdbClient client) throws InterruptedException, ExecutionException {
- Connection connection = new Connection(identifier, client);
- Node node = connection.getNode();
- ovsdbConnections.put(identifier, connection);
- List<String> dbs = client.getDatabases().get();
- for (String db : dbs) {
- client.getSchema(db).get();
- }
- // Keeping the Initial inventory update(s) on its own thread.
- new Thread() {
- Connection connection;
- String identifier;
-
- @Override
- public void run() {
- try {
- LOG.info("Initialize inventory for {}", connection.toString());
- initializeInventoryForNewNode(connection);
- } catch (InterruptedException | ExecutionException | IOException e) {
- LOG.error("Failed to initialize inventory for node with identifier {}", identifier, e);
- ovsdbConnections.remove(identifier);
- }
- }
- public Thread initializeConnectionParams(String identifier, Connection connection) {
- this.identifier = identifier;
- this.connection = connection;
- return this;
- }
- }.initializeConnectionParams(identifier, connection).start();
- return node;
- }
-
- public void channelClosed(Node node) throws Exception {
- LOG.info("Connection to Node : {} closed", node);
- disconnect(node);
- ovsdbInventoryService.removeNode(node);
- }
-
- private void initializeInventoryForNewNode (Connection connection) throws InterruptedException, ExecutionException, IOException {
- OvsdbClient client = connection.getClient();
- InetAddress address = client.getConnectionInfo().getRemoteAddress();
- int port = client.getConnectionInfo().getRemotePort();
-
- List<String> databases = client.getDatabases().get();
- if (databases == null) {
- LOG.error("Unable to get Databases for the ovsdb connection : {}", client.getConnectionInfo());
- return;
- }
- for (String database : databases) {
- DatabaseSchema dbSchema = client.getSchema(database).get();
- TableUpdates updates = this.monitorTables(connection.getNode(), dbSchema);
- ovsdbInventoryService.processTableUpdates(connection.getNode(), dbSchema.getName(), updates);
- }
- LOG.info("Notifying Inventory Listeners for Node Added: {}", connection.getNode().toString());
- ovsdbInventoryService.notifyNodeAdded(connection.getNode(), address, port);
- }
-
- public TableUpdates monitorTables(Node node, DatabaseSchema dbSchema) throws ExecutionException, InterruptedException, IOException {
- Connection connection = getConnection(node);
- OvsdbClient client = connection.getClient();
- if (dbSchema == null) {
- LOG.error("Unable to get Database Schema for the ovsdb connection : {}", client.getConnectionInfo());
- return null;
- }
- Set<String> tables = dbSchema.getTables();
- if (tables == null) {
- LOG.warn("Database {} without any tables. Strange !", dbSchema.getName());
- return null;
- }
- List<MonitorRequest<GenericTableSchema>> monitorRequests = Lists.newArrayList();
- for (String tableName : tables) {
- GenericTableSchema tableSchema = dbSchema.table(tableName, GenericTableSchema.class);
- monitorRequests.add(this.getAllColumnsMonitorRequest(tableSchema));
- }
- return client.monitor(dbSchema, monitorRequests, new UpdateMonitor(node));
- }
-
- /**
- * As per RFC 7047, section 4.1.5, if a Monitor request is sent without any columns, the update response will not include
- * the _uuid column.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * Each <monitor-request> specifies one or more columns and the manner in which the columns (or the entire table) are to be monitored.
- * The "columns" member specifies the columns whose values are monitored. It MUST NOT contain duplicates.
- * If "columns" is omitted, all columns in the table, except for "_uuid", are monitored.
- * ----------------------------------------------------------------------------------------------------------------------------------
- * In order to overcome this limitation, this method
- *
- * @return MonitorRequest that includes all the Bridge Columns including _uuid
- */
- public <T extends TableSchema<T>> MonitorRequest<T> getAllColumnsMonitorRequest (T tableSchema) {
- Set<String> columns = tableSchema.getColumns();
- MonitorRequestBuilder<T> monitorBuilder = MonitorRequestBuilder.builder(tableSchema);
- for (String column : columns) {
- monitorBuilder.addColumn(column);
- }
- return monitorBuilder.with(new MonitorSelect(true, true, true, true)).build();
- }
-
- private class UpdateMonitor implements MonitorCallBack {
- Node node = null;
- public UpdateMonitor(Node node) {
- this.node = node;
- }
-
- @Override
- public void update(TableUpdates result, DatabaseSchema dbSchema) {
- ovsdbInventoryService.processTableUpdates(node, dbSchema.getName(), result);
- }
-
- @Override
- public void exception(Throwable t) {
- System.out.println("Exception t = " + t);
- }
- }
-
- private String getConnectionIdentifier(OvsdbClient client) {
- OvsdbConnectionInfo info = client.getConnectionInfo();
- return info.getRemoteAddress().getHostAddress()+":"+info.getRemotePort();
- }
-
-
- @Override
- public void connected(OvsdbClient client) {
- String identifier = getConnectionIdentifier(client);
- try {
- this.handleNewConnection(identifier, client);
- } catch (InterruptedException | ExecutionException e) {
- e.printStackTrace();
- }
- }
-
- @Override
- public void disconnected(OvsdbClient client) {
- Connection connection = ovsdbConnections.get(this.getConnectionIdentifier(client));
- if (connection == null) {
- return;
- }
- this.disconnect(connection.getNode());
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.impl;
-
-import java.net.InetAddress;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-
-import org.opendaylight.ovsdb.lib.message.TableUpdate;
-import org.opendaylight.ovsdb.lib.message.TableUpdates;
-import org.opendaylight.ovsdb.lib.notation.Row;
-import org.opendaylight.ovsdb.lib.notation.UUID;
-import org.opendaylight.ovsdb.plugin.internal.NodeDatabase;
-import org.opendaylight.ovsdb.plugin.api.OvsVswitchdSchemaConstants;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-
-import com.google.common.collect.Sets;
-
-import com.google.common.collect.Maps;
-
-/**
- * Stub Implementation for IPluginInReadService used by SAL
- *
- *
- */
-public class InventoryServiceImpl implements OvsdbInventoryService {
- private ConcurrentMap<Node, NodeDatabase> dbCache = Maps.newConcurrentMap();
- private ScheduledExecutorService executor;
- private OvsdbConfigurationService ovsdbConfigurationService;
-
- private Set<OvsdbInventoryListener> ovsdbInventoryListeners = Sets.newCopyOnWriteArraySet();
-
- /**
- * Function called by the dependency manager when all the required
- * dependencies are satisfied
- *
- */
- public void init() {
- this.executor = Executors.newSingleThreadScheduledExecutor();
- }
-
- /**
- * Function called by the dependency manager when at least one dependency
- * become unsatisfied or when the component is shutting down because for
- * example bundle is being stopped.
- *
- */
- public void destroy() {
- }
-
- /**
- * Function called by dependency manager after "init ()" is called and after
- * the services provided by the class are registered in the service registry
- *
- */
- public void start() {
- }
-
- /**
- * Function called by the dependency manager before the services exported by
- * the component are unregistered, this will be followed by a "destroy ()"
- * calls
- *
- */
- public void stop() {
- this.executor.shutdownNow();
- }
-
- public void setOvsdbConfigurationService(OvsdbConfigurationService service) {
- ovsdbConfigurationService = service;
- }
-
- public void unsetConfigurationService(OvsdbConfigurationService service) {
- ovsdbConfigurationService = null;
- }
-
- @Override
- public ConcurrentMap<String, ConcurrentMap<String, Row>> getCache(Node n, String databaseName) {
- NodeDatabase db = dbCache.get(n);
- if (db == null) {
- return null;
- }
- return db.getDatabase(databaseName);
- }
-
-
- @Override
- public ConcurrentMap<String, Row> getTableCache(Node n, String databaseName, String tableName) {
- NodeDatabase db = dbCache.get(n);
- if (db == null) {
- return null;
- }
- return db.getTableCache(databaseName, tableName);
- }
-
-
- @Override
- public Row getRow(Node n, String databaseName, String tableName, String uuid) {
- NodeDatabase db = dbCache.get(n);
- if (db == null) {
- return null;
- }
- return db.getRow(databaseName, tableName, uuid);
- }
-
- @Override
- public void updateRow(Node n, String databaseName, String tableName, String uuid, Row row) {
- NodeDatabase db = dbCache.get(n);
- if (db == null) {
- db = new NodeDatabase();
- dbCache.put(n, db);
- }
- db.updateRow(databaseName, tableName, uuid, row);
- }
-
- @Override
- public void removeRow(Node n, String databaseName, String tableName, String uuid) {
- NodeDatabase db = dbCache.get(n);
- if (db != null) {
- db.removeRow(databaseName, tableName, uuid);
- }
- }
-
- @Override
- public void processTableUpdates(Node n, String databaseName, TableUpdates tableUpdates) {
- NodeDatabase db = dbCache.get(n);
- if (db == null) {
- db = new NodeDatabase();
- dbCache.put(n, db);
- }
-
- for (String tableName : tableUpdates.getUpdates().keySet()) {
- Map<String, Row> tCache = db.getTableCache(databaseName, tableName);
- TableUpdate update = tableUpdates.getUpdates().get(tableName);
- for (UUID uuid : (Set<UUID>)update.getRows().keySet()) {
-
- if (update.getNew(uuid) != null) {
- boolean isNewRow = (tCache == null || tCache.get(uuid.toString()) == null);
- db.updateRow(databaseName, tableName, uuid.toString(), update.getNew(uuid));
- if (isNewRow) {
- this.handleOpenVSwitchSpecialCase(n, databaseName, tableName, uuid);
- if (!ovsdbInventoryListeners.isEmpty()) {
- for (OvsdbInventoryListener listener : ovsdbInventoryListeners) {
- listener.rowAdded(n, tableName, uuid.toString(), update.getNew(uuid));
- }
- }
- } else {
- if (!ovsdbInventoryListeners.isEmpty()) {
- for (OvsdbInventoryListener listener : ovsdbInventoryListeners) {
- listener.rowUpdated(n, tableName, uuid.toString(), update.getOld(uuid), update.getNew(uuid));
- }
- }
- }
- } else if (update.getOld(uuid) != null){
- if (tCache != null) {
- if (!ovsdbInventoryListeners.isEmpty()) {
- for (OvsdbInventoryListener listener : ovsdbInventoryListeners) {
- listener.rowRemoved(n, tableName, uuid.toString(), update.getOld(uuid), update.getNew(uuid));
- }
- }
- }
- db.removeRow(databaseName, tableName, uuid.toString());
- }
- }
- }
- }
-
- private void handleOpenVSwitchSpecialCase(final Node node, final String databaseName, final String tableName, final UUID uuid) {
- if (OvsVswitchdSchemaConstants.shouldConfigureController(databaseName, tableName)) {
- Runnable updateControllerRunnable = new Runnable() {
- @Override
- public void run() {
- try {
- if (ovsdbConfigurationService != null) {
- ovsdbConfigurationService.setOFController(node, uuid.toString());
- }
- } catch (InterruptedException | ExecutionException e) {
- e.printStackTrace();
- }
- }
- };
- executor.execute(updateControllerRunnable);
- }
- }
-
- @Override
- public void printCache(Node n) {
- if ((dbCache != null) && (!dbCache.isEmpty())) {
- NodeDatabase db = dbCache.get(n);
- if (db != null) {
- db.printTableCache();
- }
- }
- }
-
- @Override
- public void notifyNodeAdded(Node node, InetAddress address, int port) {
- if (!ovsdbInventoryListeners.isEmpty()) {
- for (OvsdbInventoryListener listener : ovsdbInventoryListeners) {
- listener.nodeAdded(node, address, port);
- }
- }
- }
-
- @Override
- public void removeNode(Node node) {
- if (!ovsdbInventoryListeners.isEmpty()) {
- for (OvsdbInventoryListener listener : ovsdbInventoryListeners) {
- listener.nodeRemoved(node);
- }
- }
-
- dbCache.remove(node);
- }
-
- private void listenerAdded(OvsdbInventoryListener listener) {
- this.ovsdbInventoryListeners.add(listener);
- }
-
- private void listenerRemoved(OvsdbInventoryListener listener) {
- this.ovsdbInventoryListeners.remove(listener);
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.internal;
-
-import org.apache.felix.dm.DependencyActivatorBase;
-import org.apache.felix.dm.DependencyManager;
-import org.opendaylight.ovsdb.lib.OvsdbConnection;
-import org.opendaylight.ovsdb.lib.OvsdbConnectionListener;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConfigurationService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbConnectionService;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryListener;
-import org.opendaylight.ovsdb.plugin.api.OvsdbInventoryService;
-import org.opendaylight.ovsdb.plugin.impl.ConfigurationServiceImpl;
-import org.opendaylight.ovsdb.plugin.impl.ConnectionServiceImpl;
-import org.opendaylight.ovsdb.plugin.impl.InventoryServiceImpl;
-
-import org.osgi.framework.BundleContext;
-
-/**
- * OVSDB protocol plugin Activator
- *
- *
- */
-public class Activator extends DependencyActivatorBase {
-
- @Override
- public void init(BundleContext context, DependencyManager manager) throws Exception {
- manager.add(createComponent()
- .setInterface(OvsdbConfigurationService.class.getName(), null)
- .setImplementation(ConfigurationServiceImpl.class)
- .add(createServiceDependency()
- .setService(OvsdbConnectionService.class)
- .setRequired(true))
- .add(createServiceDependency()
- .setService(OvsdbInventoryService.class)
- .setRequired(true)));
-
- manager.add(createComponent()
- .setInterface(
- new String[] {OvsdbConnectionService.class.getName(),
- OvsdbConnectionListener.class.getName()}, null)
- .setImplementation(ConnectionServiceImpl.class)
- .add(createServiceDependency()
- .setService(OvsdbInventoryService.class)
- .setRequired(true))
- .add(createServiceDependency()
- .setService(OvsdbConnection.class)
- .setRequired(true))
- );
-
- manager.add(createComponent()
- .setInterface(OvsdbInventoryService.class.getName(), null)
- .setImplementation(InventoryServiceImpl.class)
- .add(createServiceDependency()
- .setService(OvsdbInventoryListener.class)
- .setCallbacks("listenerAdded", "listenerRemoved"))
- .add(createServiceDependency()
- .setService(OvsdbConfigurationService.class)
- .setRequired(false)));
- }
-
- @Override
- public void destroy(BundleContext context, DependencyManager manager) throws Exception {
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.internal;
-
-public enum Encapsulation {
-
- VXLAN("vxlan"), GRE("gre"), CAPWAP("capwap");
-
- private final String value;
-
- private Encapsulation(final String value) {
- this.value = value;
- }
-
- public String getValue() {
- return value;
- }
-
- @Override
- public String toString() {
- return getValue();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2015 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.ovsdb.plugin.internal;
-
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-
-import org.apache.commons.collections.MapUtils;
-import org.opendaylight.ovsdb.lib.notation.Column;
-import org.opendaylight.ovsdb.lib.notation.Row;
-
-import com.google.common.collect.Maps;
-
-public class NodeDatabase {
- ConcurrentMap<String, TableDB> dbCache = Maps.newConcurrentMap();
-
- public ConcurrentMap<String, ConcurrentMap<String, Row>> getDatabase(String dbName) {
- TableDB tdb = dbCache.get(dbName);
- if (tdb == null) {
- return null;
- }
- return tdb.getTableCache();
- }
-
- public ConcurrentMap<String, Row> getTableCache(String dbName, String tableName) {
- ConcurrentMap<String, ConcurrentMap<String,Row>> tdbMap = getDatabase(dbName);
- if (tdbMap == null) {
- return null;
- }
- return tdbMap.get(tableName);
- }
-
- private void setDBCache(String dbName, TableDB table) {
- dbCache.put(dbName, table);
- }
-
- public Row getRow (String dbName, String tableName, String uuid) {
- ConcurrentMap<String, Row> tdb = this.getTableCache(dbName, tableName);
- if (tdb == null) {
- return null;
- }
- return tdb.get(uuid);
- }
-
- public void updateRow(String dbName, String tableName, String uuid, Row row) {
- TableDB db = dbCache.get(dbName);
- if (db == null) {
- db = new TableDB();
- setDBCache(dbName, db);
- }
- db.updateRow(tableName, uuid, row);
- }
-
- public void removeRow(String dbName, String tableName, String uuid) {
- TableDB db = dbCache.get(dbName);
- if (db == null) {
- return;
- }
- db.removeRow(tableName, uuid);
- }
-
- public void printTableCache() {
- for (String dbName : dbCache.keySet()) {
- System.out.println("Database "+dbName);
- ConcurrentMap<String, ConcurrentMap<String,Row>> tableDB = this.getDatabase(dbName);
- if (tableDB == null) {
- continue;
- }
- for (String tableName : tableDB.keySet()) {
- ConcurrentMap<String, Row> tableRows = this.getTableCache(dbName, tableName);
- System.out.println("\tTable "+tableName);
- for (String uuid : tableRows.keySet()) {
- Row row = tableRows.get(uuid);
- Collection<Column> columns = row.getColumns();
- System.out.print("\t\t"+uuid+ "==");
- for (Column column : columns) {
- if (column.getData() != null) {
- System.out.print(column.getSchema().getName()+" : "+ column.getData()+" ");
- }
- }
- System.out.println("");
- }
- System.out.println("-----------------------------------------------------------");
- }
- }
- }
-
- public class TableDB {
- ConcurrentMap<String, ConcurrentMap<String, Row>> cache = Maps.newConcurrentMap();
-
- public ConcurrentMap<String, ConcurrentMap<String, Row>> getTableCache() {
- return cache;
- }
-
- public ConcurrentMap<String, Row> getTableCache(String tableName) {
- return cache.get(tableName);
- }
-
- private void setTableCache(String tableName, ConcurrentMap<String, Row> tableCache) {
- cache.put(tableName, tableCache);
- }
-
- public Row getRow (String tableName, String uuid) {
- Map<String, Row> tableCache = getTableCache(tableName);
- if (tableCache != null) {
- return tableCache.get(uuid);
- }
- return null;
- }
-
- public void updateRow(String tableName, String uuid, Row row) {
- ConcurrentMap<String, Row> tableCache = getTableCache(tableName);
- if (tableCache == null) {
- tableCache = Maps.newConcurrentMap();
- setTableCache(tableName, tableCache);
- }
- tableCache.put(uuid, row);
- }
-
- public void removeRow(String tableName, String uuid) {
- Map<String, Row> tableCache = getTableCache(tableName);
- if (tableCache != null) {
- tableCache.remove(uuid);
- }
- }
-
- public void printTableCache() {
- MapUtils.debugPrint(System.out, null, cache);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- * Authors : Sam Hague
- */
-package org.opendaylight.ovsdb.plugin.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.ovsdb.plugin.api.Connection;
-
-public class ConnectionServiceImplTest {
- private static final String OVS = "OVS";
- private static final String IDENTIFIER = "192.168.120.31:45001";
- private static final String OVS_IDENTIFIER = OVS + "|" + IDENTIFIER;
- private static final String BAD_IDENTIFIER = "BAD" + "|" + IDENTIFIER;
- private static ConnectionServiceImpl connectionService;
-
- @BeforeClass
- public static void setUp () {
- connectionService = new ConnectionServiceImpl();
- Connection connection = new Connection(IDENTIFIER, null);
- connectionService.putOvsdbConnection(IDENTIFIER, connection);
- }
-
- @Test
- public void testGetNode () {
- Node node = connectionService.getNode(IDENTIFIER);
- assertNotNull("Node " + IDENTIFIER + " is null", node);
-
- node = connectionService.getNode(OVS_IDENTIFIER);
- assertNotNull("Node " + OVS_IDENTIFIER + " is null", node);
-
- node = connectionService.getNode(IDENTIFIER + "extra");
- assertNull("Node " + BAD_IDENTIFIER + " is not null", node);
- }
-
- @Test
- public void testGetConnection () {
- Node node = connectionService.getNode(IDENTIFIER);
- assertNotNull("Node " + IDENTIFIER + " is null", node);
-
- Connection connection = connectionService.getConnection(node);
- assertNotNull("Connection " + IDENTIFIER + " is null", connection);
-
- try {
- connection = connectionService.getConnection(null);
- fail("Expected a NullPointerException to be thrown");
- } catch (NullPointerException e) {
- assertSame(NullPointerException.class, e.getClass());
- }
- }
-}
</parent>
<artifactId>ovsdb</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name> <!-- Used by Sonar to set project name -->
<packaging>pom</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<!-- Parent POM files -->
<module>commons</module>
+ <module>library</module>
+ <!-- Aggregations -->
<module>features</module>
<module>karaf</module>
<!-- OVSDB Components -->
- <module>library</module>
<module>southbound</module>
<module>northbound</module>
<module>openstack</module>
- <module>plugin</module>
<module>ovsdb-artifacts</module>
- <module>ovsdb-plugin-compatibility-layer</module>
- <module>plugin-shell</module>
<module>schemas</module>
<module>utils</module>
<module>ovsdb-ui</module>
<module>routemgr</module>
+ <module>hwvtepsouthbound</module>
<!-- Integration Tests -->
<module>integrationtest</module>
</modules>
<relativePath>../commons/parent</relativePath>\r
</parent>\r
<artifactId>routemgr-aggregator</artifactId>\r
- <version>1.2.0-SNAPSHOT</version>\r
+ <version>1.2.1-SNAPSHOT</version>\r
<name>${project.artifactId}</name>\r
<packaging>pom</packaging>\r
<description>The OVSDB routemgr project is a project for OpenDaylight that will implement the routing aspects of Openstack integration, vSwitches and a network virtualization implementation.</description>\r
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-api</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>The OVSDB router project is a project for OpenDaylight that will implement the routing aspects of Openstack integration, vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<tag>HEAD</tag>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
</scm>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>mdsal-artifacts</artifactId>
+ <version>2.0.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>mdsal-model-artifacts</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+
<dependencies>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-topology</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>yang-ext</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-yang-types-20130715</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>opendaylight-l2-types</artifactId>
</dependency>
</dependencies>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-artifacts</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>pom</packaging>
<description>The OVSDB routemgr project is a project for OpenDaylight that will implement the routing aspects of Openstack integration, vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
</parent>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-config</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<build>
<plugins>
</parent>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-features</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<modelVersion>4.0.0</modelVersion>
<description>The OVSDB routemgr project is a project for OpenDaylight that will implement the routing aspects of Openstack integration, vSwitches and a network virtualization implementation.</description>
</prerequisites>
<properties>
<mdsal.version>1.3.0-SNAPSHOT</mdsal.version>
+ <restconf.version>1.3.0-SNAPSHOT</restconf.version>
<yangtools.version>0.8.0-SNAPSHOT</yangtools.version>
+ <mdsal.model.version>0.8.0-SNAPSHOT</mdsal.model.version>
</properties>
<dependencyManagement>
<dependencies>
<type>pom</type>
<scope>import</scope>
</dependency>
+<!--
<dependency>
<groupId>org.opendaylight.neutron</groupId>
<artifactId>project-neutron</artifactId>
<type>pom</type>
<scope>import</scope>
</dependency>
+-->
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>openflowplugin-artifacts</artifactId>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>southbound-artifacts</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
+ <type>pom</type>
+ <scope>import</scope>
+ </dependency>
+<!--
+ <dependency>
+ <groupId>org.opendaylight.netconf</groupId>
+ <artifactId>restconf-artifacts</artifactId>
+ <version>${restconf.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
+-->
</dependencies>
</dependencyManagement>
<dependencies>
+<!--
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
+-->
+ <dependency>
+ <groupId>org.opendaylight.mdsal.model</groupId>
+ <artifactId>features-mdsal-model</artifactId>
+ <version>${mdsal.model.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
+<!--
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-restconf</artifactId>
<type>xml</type>
<classifier>features</classifier>
</dependency>
+-->
<dependency>
<groupId>org.opendaylight.neutron</groupId>
<artifactId>features-neutron</artifactId>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>southbound-features</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<type>xml</type>
<classifier>features</classifier>
</dependency>
<features name="odl-ovsdb-routemgr-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+<!--
<repository>mvn:org.opendaylight.controller/features-restconf/${mdsal.version}/xml/features</repository>
+-->
<repository>mvn:org.opendaylight.neutron/features-neutron/0.6.0-SNAPSHOT/xml/features</repository>
<repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.2.0-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.ovsdb/southbound-features/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.ovsdb/southbound-features/1.2.1-SNAPSHOT/xml/features</repository>
<feature name='odl-routemgr-plugin' description="Opendaylight :: Routemgr :: Plugin" version='${project.version}'>
<feature version="0.2.0-SNAPSHOT">odl-openflowplugin-flow-services</feature>
<feature version="0.6.0-SNAPSHOT">odl-neutron-transcriber</feature>
- <feature version="1.2.0-SNAPSHOT">odl-ovsdb-southbound-impl</feature>
+ <feature version="1.2.1-SNAPSHOT">odl-ovsdb-southbound-impl</feature>
+ <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+ <feature version='${mdsal.model.version}'>odl-mdsal-models</feature>
<bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
<bundle>mvn:org.opendaylight.ovsdb/routemgr-impl/${project.version}</bundle>
<bundle>mvn:org.opendaylight.ovsdb/routemgr-api/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/97-routemgrmain.xml">mvn:org.opendaylight.ovsdb/routemgr-config/${project.version}/xml/config</configfile>
</feature>
+<!--
<feature name='odl-routemgr-rest' description="Opendaylight :: Routemgr :: Plugin :: Rest" version='${project.version}'>
<feature version="${project.version}">odl-routemgr-plugin</feature>
<feature version="${mdsal.version}">odl-restconf</feature>
</feature>
+-->
</features>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-impl</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>The OVSDB routemgr project is a project for OpenDaylight that will implement the routing aspects of Openstack integration, vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<dependency>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>southbound-api</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.ovsdb</groupId>
<artifactId>routemgr-karaf</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<name>${project.artifactId}</name>
<prerequisites>
<maven>3.1.1</maven>
<artifactId>utils.config</artifactId>
<name>${project.artifactId}</name>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<artifactId>utils.mdsal-node</artifactId>
<name>${project.artifactId}</name>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<artifactId>utils.mdsal-openflow</artifactId>
<name>${project.artifactId}</name>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>jar</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>
<dependencies>
<!-- Yang Models -->
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-inet-types</artifactId>
</dependency>
<dependency>
<artifactId>model-flow-base</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>ietf-yang-types</artifactId>
</dependency>
<dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
+ <groupId>org.opendaylight.mdsal.model</groupId>
<artifactId>opendaylight-l2-types</artifactId>
</dependency>
<!-- Controller Dependencies -->
}
/**
- * Create a DHCP match with pot provided
+ * Create a DHCP match with pot provided.
*
* @param matchBuilder the match builder
* @param srcPort the source port
* @param dstPort the destination port
* @return the DHCP match
*/
- public static MatchBuilder createDHCPMatch(MatchBuilder matchBuilder, int srcPort, int dstPort) {
+ public static MatchBuilder createDhcpMatch(MatchBuilder matchBuilder,
+ int srcPort, int dstPort) {
EthernetMatchBuilder ethernetMatch = new EthernetMatchBuilder();
EthernetTypeBuilder ethTypeBuilder = new EthernetTypeBuilder();
* @param dstPort the destination port
* @return the DHCP server match
*/
- public static MatchBuilder createDHCPServerMatch(MatchBuilder matchBuilder, String dhcpServerMac, int srcPort,
+ public static MatchBuilder createDhcpServerMatch(MatchBuilder matchBuilder, String dhcpServerMac, int srcPort,
int dstPort) {
EthernetMatchBuilder ethernetMatch = new EthernetMatchBuilder();
}
/**
+ * Creates a Match with src ip address mac address set.
* @param matchBuilder MatchBuilder Object
* @param srcip String containing an IPv4 prefix
* @param srcMac The source macAddress
* @return matchBuilder Map Object with a match
*/
- public static MatchBuilder createSrcL3IPv4MatchWithMac(MatchBuilder matchBuilder, Ipv4Prefix srcip, MacAddress srcMac) {
+ public static MatchBuilder createSrcL3Ipv4MatchWithMac(MatchBuilder matchBuilder, Ipv4Prefix srcip, MacAddress srcMac) {
Ipv4MatchBuilder ipv4MatchBuilder = new Ipv4MatchBuilder();
ipv4MatchBuilder.setIpv4Source(new Ipv4Prefix(srcip));
}
+ /**
+ * Creates a ether net match with ether type set to 0x0800L.
+ * @param matchBuilder MatchBuilder Object
+ * @param srcMac The source macAddress
+ * @param dstMac The destination mac address
+ * @return matchBuilder Map Object with a match
+ */
+ public static MatchBuilder createEtherMatchWithType(MatchBuilder matchBuilder,String srcMac, String dstMac)
+ {
+ EthernetTypeBuilder ethTypeBuilder = new EthernetTypeBuilder();
+ ethTypeBuilder.setType(new EtherType(0x0800L));
+ EthernetMatchBuilder eth = new EthernetMatchBuilder();
+ eth.setEthernetType(ethTypeBuilder.build());
+ if (null != srcMac) {
+ eth.setEthernetSource(new EthernetSourceBuilder()
+ .setAddress(new MacAddress(srcMac)).build());
+ }
+ if (null != dstMac) {
+ eth.setEthernetDestination(new EthernetDestinationBuilder()
+ .setAddress(new MacAddress(dstMac)).build());
+ }
+ matchBuilder.setEthernetMatch(eth.build());
+ return matchBuilder;
+ }
+ /**
+ * Adds remote Ip prefix to existing match.
+ * @param matchBuilder The match builder
+ * @param sourceIpPrefix The source IP prefix
+ * @param destIpPrefix The destination IP prefix
+ * @return matchBuilder Map Object with a match
+ */
+ public static MatchBuilder addRemoteIpPrefix(MatchBuilder matchBuilder,
+ Ipv4Prefix sourceIpPrefix,Ipv4Prefix destIpPrefix) {
+ Ipv4MatchBuilder ipv4match = new Ipv4MatchBuilder();
+ if (null != sourceIpPrefix) {
+ ipv4match.setIpv4Source(sourceIpPrefix);
+ }
+ if (null != destIpPrefix) {
+ ipv4match.setIpv4Destination(destIpPrefix);
+ }
+ matchBuilder.setLayer3Match(ipv4match.build());
+
+ return matchBuilder;
+ }
+ /**
+ * Add a layer4 match to an existing match
+ *
+ * @param matchBuilder Map matchBuilder MatchBuilder Object without a match
+ * @param protocol The layer4 protocol
+ * @param srcPort The src port
+ * @param destPort The destination port
+ * @return matchBuilder Map Object with a match
+ */
+ public static MatchBuilder addLayer4Match(MatchBuilder matchBuilder,
+ int protocol, int srcPort, int destPort) {
+ IpMatchBuilder ipmatch = new IpMatchBuilder();
+ if (TCP_SHORT == protocol) {
+ ipmatch.setIpProtocol(TCP_SHORT);
+ TcpMatchBuilder tcpmatch = new TcpMatchBuilder();
+ if (0 != srcPort) {
+ tcpmatch.setTcpSourcePort(new PortNumber(srcPort));
+ }
+ if (0 != destPort) {
+ tcpmatch.setTcpDestinationPort(new PortNumber(destPort));
+ }
+ matchBuilder.setLayer4Match(tcpmatch.build());
+ } else if (UDP_SHORT == protocol) {
+ ipmatch.setIpProtocol(UDP_SHORT);
+ UdpMatchBuilder udpMatch = new UdpMatchBuilder();
+ if (0 != srcPort) {
+ udpMatch.setUdpSourcePort(new PortNumber(srcPort));
+ }
+ if (0 != destPort) {
+ udpMatch.setUdpDestinationPort(new PortNumber(destPort));
+ }
+ matchBuilder.setLayer4Match(udpMatch.build());
+ }
+ matchBuilder.setIpMatch(ipmatch.build());
+
+ return matchBuilder;
+ }
+
+
public static class RegMatch {
final Class<? extends NxmNxReg> reg;
final Long value;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.mdsal</groupId>
+ <artifactId>binding-parent</artifactId>
+ <version>0.8.0-SNAPSHOT</version>
+ <relativePath/>
+ </parent>
+
+ <groupId>org.opendaylight.ovsdb</groupId>
+ <artifactId>utils.mdsal-utils</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ <version>1.3.0-SNAPSHOT</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.mockito</groupId>
+ <artifactId>mockito-all</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+</project>
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2015 Red Hat, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.ovsdb.utils.mdsal.utils;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MdsalUtils {
+ private static final Logger LOG = LoggerFactory.getLogger(MdsalUtils.class);
+ private DataBroker databroker = null;
+
+ /**
+ * Class constructor setting the data broker.
+ *
+ * @param dataBroker the {@link org.opendaylight.controller.md.sal.binding.api.DataBroker}
+ */
+ public MdsalUtils(DataBroker dataBroker) {
+ this.databroker = dataBroker;
+ }
+
+ /**
+ * Executes delete as a blocking transaction.
+ *
+ * @param store {@link LogicalDatastoreType} which should be modified
+ * @param path {@link InstanceIdentifier} to read from
+ * @param <D> the data object type
+ * @return the result of the request
+ */
+ public <D extends org.opendaylight.yangtools.yang.binding.DataObject> boolean delete(
+ final LogicalDatastoreType store, final InstanceIdentifier<D> path) {
+ boolean result = false;
+ final WriteTransaction transaction = databroker.newWriteOnlyTransaction();
+ transaction.delete(store, path);
+ CheckedFuture<Void, TransactionCommitFailedException> future = transaction.submit();
+ try {
+ future.checkedGet();
+ result = true;
+ } catch (TransactionCommitFailedException e) {
+ LOG.warn("Failed to delete {} ", path, e);
+ }
+ return result;
+ }
+
+ /**
+ * Executes merge as a blocking transaction.
+ *
+ * @param logicalDatastoreType {@link LogicalDatastoreType} which should be modified
+ * @param path {@link InstanceIdentifier} for path to read
+ * @param <D> the data object type
+ * @return the result of the request
+ */
+ public <D extends org.opendaylight.yangtools.yang.binding.DataObject> boolean merge(
+ final LogicalDatastoreType logicalDatastoreType, final InstanceIdentifier<D> path, D data) {
+ boolean result = false;
+ final WriteTransaction transaction = databroker.newWriteOnlyTransaction();
+ transaction.merge(logicalDatastoreType, path, data, true);
+ CheckedFuture<Void, TransactionCommitFailedException> future = transaction.submit();
+ try {
+ future.checkedGet();
+ result = true;
+ } catch (TransactionCommitFailedException e) {
+ LOG.warn("Failed to merge {} ", path, e);
+ }
+ return result;
+ }
+
+ /**
+ * Executes put as a blocking transaction.
+ *
+ * @param logicalDatastoreType {@link LogicalDatastoreType} which should be modified
+ * @param path {@link InstanceIdentifier} for path to read
+ * @param <D> the data object type
+ * @return the result of the request
+ */
+ public <D extends org.opendaylight.yangtools.yang.binding.DataObject> boolean put(
+ final LogicalDatastoreType logicalDatastoreType, final InstanceIdentifier<D> path, D data) {
+ boolean result = false;
+ final WriteTransaction transaction = databroker.newWriteOnlyTransaction();
+ transaction.put(logicalDatastoreType, path, data, true);
+ CheckedFuture<Void, TransactionCommitFailedException> future = transaction.submit();
+ try {
+ future.checkedGet();
+ result = true;
+ } catch (TransactionCommitFailedException e) {
+ LOG.warn("Failed to put {} ", path, e);
+ }
+ return result;
+ }
+
+ /**
+ * Executes read as a blocking transaction.
+ *
+ * @param store {@link LogicalDatastoreType} to read
+ * @param path {@link InstanceIdentifier} for path to read
+ * @param <D> the data object type
+ * @return the result as the data object requested
+ */
+ public <D extends org.opendaylight.yangtools.yang.binding.DataObject> D read(
+ final LogicalDatastoreType store, final InstanceIdentifier<D> path) {
+ D result = null;
+ final ReadOnlyTransaction transaction = databroker.newReadOnlyTransaction();
+ Optional<D> optionalDataObject;
+ CheckedFuture<Optional<D>, ReadFailedException> future = transaction.read(store, path);
+ try {
+ optionalDataObject = future.checkedGet();
+ if (optionalDataObject.isPresent()) {
+ result = optionalDataObject.get();
+ } else {
+ LOG.debug("{}: Failed to read {}",
+ Thread.currentThread().getStackTrace()[1], path);
+ }
+ } catch (ReadFailedException e) {
+ LOG.warn("Failed to read {} ", path, e);
+ }
+ transaction.close();
+ return result;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Inocybe and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.ovsdb.utils.mdsal.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+
+/**
+ * Unit test for class {@link MdsalUtils}
+ *
+ */
+@RunWith(MockitoJUnitRunner.class)
+@SuppressWarnings({ "unchecked", "rawtypes" })
+public class MdsalUtilsTest {
+
+ @InjectMocks private MdsalUtils mdsalUtils;
+
+ @Mock private DataBroker databroker;
+
+ @Test
+ public void testDelete() {
+ WriteTransaction writeTransaction = mock(WriteTransaction.class);
+ when(databroker.newWriteOnlyTransaction()).thenReturn(writeTransaction);
+ CheckedFuture<Void, TransactionCommitFailedException> future = mock(CheckedFuture.class);
+ when(writeTransaction.submit()).thenReturn(future );
+
+ boolean result = mdsalUtils.delete(LogicalDatastoreType.CONFIGURATION, mock(InstanceIdentifier.class));
+
+ verify(writeTransaction, times(1)).delete(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(writeTransaction, times(1)).submit();
+
+ assertTrue("Error, the delete transaction failed", result);
+ }
+
+ @Test
+ public void testMerge() {
+ WriteTransaction writeTransaction = mock(WriteTransaction.class);
+ when(databroker.newWriteOnlyTransaction()).thenReturn(writeTransaction);
+ CheckedFuture<Void, TransactionCommitFailedException> future = mock(CheckedFuture.class);
+ when(writeTransaction.submit()).thenReturn(future );
+
+ boolean result = mdsalUtils.merge(LogicalDatastoreType.CONFIGURATION, mock(InstanceIdentifier.class), mock(DataObject.class));
+
+ verify(writeTransaction, times(1)).merge(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class), anyBoolean());
+ verify(writeTransaction, times(1)).submit();
+
+ assertTrue("Error, the merge transaction failed", result);
+ }
+
+ @Test
+ public void testPut() {
+ WriteTransaction writeTransaction = mock(WriteTransaction.class);
+ when(databroker.newWriteOnlyTransaction()).thenReturn(writeTransaction);
+ CheckedFuture<Void, TransactionCommitFailedException> future = mock(CheckedFuture.class);
+ when(writeTransaction.submit()).thenReturn(future );
+
+ boolean result = mdsalUtils.put(LogicalDatastoreType.CONFIGURATION, mock(InstanceIdentifier.class), mock(DataObject.class));
+
+ verify(writeTransaction, times(1)).put(any(LogicalDatastoreType.class), any(InstanceIdentifier.class), any(DataObject.class), anyBoolean());
+ verify(writeTransaction, times(1)).submit();
+
+ assertTrue("Error, the put transaction failed", result);
+ }
+
+ @Test
+ public void testRead() throws ReadFailedException {
+ ReadOnlyTransaction readOnlyTransaction = mock(ReadOnlyTransaction.class);
+ when(databroker.newReadOnlyTransaction()).thenReturn(readOnlyTransaction);
+ CheckedFuture<Optional, ReadFailedException> future = mock(CheckedFuture.class);
+ Optional opt = mock(Optional.class);
+ when(opt.isPresent()).thenReturn(true);
+ DataObject obj = mock(DataObject.class);
+ when(opt.get()).thenReturn(obj );
+ when(future.checkedGet()).thenReturn(opt);
+ when(readOnlyTransaction.read(any(LogicalDatastoreType.class), any(InstanceIdentifier.class))).thenReturn(future);
+
+ DataObject result = mdsalUtils.read(LogicalDatastoreType.CONFIGURATION, mock(InstanceIdentifier.class));
+
+ verify(readOnlyTransaction, times(1)).read(any(LogicalDatastoreType.class), any(InstanceIdentifier.class));
+ verify(readOnlyTransaction, times(1)).close();
+
+ assertEquals("Error, the read transaction failed", obj, result);
+ }
+}
<module>mdsal-node</module>
<module>mdsal-openflow</module>
<module>servicehelper</module>
+ <module>mdsal-utils</module>
</modules>
</project>
</parent>
<artifactId>utils.servicehelper</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>The OVSDB Plugin integration project is a project for OpenDaylight that will implement the Open vSwitch Database RFC 7047 management protocol allowing the Southbound configuration of vSwitches and a network virtualization implementation.</description>
<url>https://wiki.opendaylight.org/view/OVSDB_Integration:Main</url>