<version>0.2.5-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
- <artifactId>config-netty-features</artifactId>
+ <artifactId>features-config-netty</artifactId>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-features</artifactId>
+ <artifactId>features-config-persister</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-event-executor-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-threadgroup-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-timer-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>threadpool-config-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-netty-config</artifactId>
+ </dependency>
</dependencies>
<build>
<features name="odl-config-persister-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
- <repository>mvn:org.opendaylight.controller/config-persister-features/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
<feature name='odl-config-netty' version='${project.version}'>
<feature version='${project.version}'>odl-config-netty-config-api</feature>
<bundle>mvn:org.opendaylight.controller/netty-event-executor-config/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/threadpool-config-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/threadpool-config-impl/${project.version}</bundle>
<feature version='${project.version}'>odl-config-startup</feature>
- <configfile finalname="configuration/initial/00-netty.xml">mvn:org.opendaylight.controller/config-netty-config/${config.version}/xml/config</configfile>
+ <configfile finalname="${config.configfile.directory}/${config.netty.configfile}">mvn:org.opendaylight.controller/config-netty-config/${config.version}/xml/config</configfile>
</feature>
</features>
\ No newline at end of file
<version>0.2.5-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
- <artifactId>config-persister-features</artifactId>
+ <artifactId>features-config-persister</artifactId>
<packaging>pom</packaging>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-features</artifactId>
+ <artifactId>features-netconf</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-features</artifactId>
+ <artifactId>features-config</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-file-xml-adapter</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-feature-adapter</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.persistence</groupId>
+ <artifactId>org.eclipse.persistence.moxy</artifactId>
+ </dependency>
</dependencies>
<build>
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/netconf-features/${netconf.version}/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
- <feature name='odl-config-startup' version='${project.version}'>
- <feature version='${project.version}'>odl-config-netconf-connector</feature>
+ <repository>mvn:org.opendaylight.controller/features-netconf/${netconf.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+ <feature name='odl-config-all' version='${project.version}'>
<feature version='${project.version}'>odl-config-persister</feature>
- <feature version='${project.version}'>odl-netconf-impl</feature>
+ <feature version='${project.version}'>odl-config-startup</feature>
</feature>
<feature name='odl-config-persister' version='${project.version}'>
<feature version='${netconf.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-config-api</feature>
- <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
<bundle>mvn:org.opendaylight.controller/config-persister-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/config-persister-file-xml-adapter/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/config-persister-directory-xml-adapter/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/config-persister-impl/${project.version}</bundle>
-
+ <bundle>mvn:org.opendaylight.controller/config-persister-feature-adapter/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-util/${netconf.version}</bundle>
<bundle>mvn:org.opendaylight.controller/netconf-mapping-api/${netconf.version}</bundle>
<bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
<bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
</feature>
-</features>
\ No newline at end of file
+ <feature name='odl-config-startup' version='${project.version}'>
+ <feature version='${project.version}'>odl-config-netconf-connector</feature>
+ <feature version='${project.version}'>odl-config-persister</feature>
+ <feature version='${project.version}'>odl-netconf-impl</feature>
+ </feature>
+</features>
<version>0.2.5-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
- <artifactId>config-features</artifactId>
+ <artifactId>features-config</artifactId>
<packaging>pom</packaging>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-common-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netty-config-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>yang-jmx-generator</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>shutdown-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>shutdown-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.javassist</groupId>
+ <artifactId>javassist</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-manager</artifactId>
+ </dependency>
</dependencies>
<build>
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
- <feature name='odl-config-core' version='${project.version}'>
- <feature version='${yangtools.version}'>yangtools-concepts</feature>
- <feature version='${yangtools.version}'>yangtools-binding</feature>
- <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
- <feature version='${mdsal.version}'>odl-mdsal-commons</feature>
- <feature version='${project.version}'>odl-config-api</feature>
- <bundle>mvn:org.opendaylight.controller/config-util/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/yang-jmx-generator/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/shutdown-api/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/shutdown-impl/${project.version}</bundle>
- <bundle>mvn:org.osgi/org.osgi.core/${osgi.core.version}</bundle>
- <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
- <bundle>mvn:org.javassist/javassist/${javassist.version}</bundle>
+ <feature name='odl-config-all' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-common</feature>
+ <feature version='${project.version}'>odl-config-api</feature>
+ <feature version='${project.version}'>odl-config-netty-config-api</feature>
+ <feature version='${project.version}'>odl-config-core</feature>
+ <feature version='${project.version}'>odl-config-manager</feature>
</feature>
- <feature name='odl-config-manager' version='${project.version}'>
- <feature version='${project.version}'>odl-config-core</feature>
- <bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
+
+ <feature name='odl-mdsal-common' version='${mdsal.version}'>
+ <feature version='${yangtools.version}'>odl-yangtools-data-binding</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-common/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-api/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-impl/${mdsal.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-common-util/${mdsal.version}</bundle>
</feature>
<feature name='odl-config-api' version='${project.version}'>
<bundle>mvn:org.opendaylight.controller/config-api/${project.version}</bundle>
-
- <!-- yangtools features -->
- <feature version='${yangtools.version}'>yangtools-concepts</feature>
- <feature version='${yangtools.version}'>yangtools-binding</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
</feature>
<feature name='odl-config-netty-config-api' version='${project.version}'>
+ <feature version='${project.version}'>odl-config-api</feature>
<bundle>mvn:org.opendaylight.controller/netty-config-api/${project.version}</bundle>
-
- <!-- netty bundles -->
<bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
+ </feature>
+ <feature name='odl-config-core' version='${project.version}'>
+ <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
+ <feature version='${mdsal.version}'>odl-mdsal-common</feature>
<feature version='${project.version}'>odl-config-api</feature>
+ <bundle>mvn:org.opendaylight.controller/config-util/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/yang-jmx-generator/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/shutdown-api/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/shutdown-impl/${project.version}</bundle>
+ <bundle>mvn:org.osgi/org.osgi.core/${osgi.core.version}</bundle>
+ <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
+ <bundle>mvn:org.javassist/javassist/${javassist.version}</bundle>
</feature>
- <feature name='odl-config-dispatcher' version='${project.version}'>
- <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${project.version}</bundle>
+ <feature name='odl-config-manager' version='${project.version}'>
+ <feature version='${project.version}'>odl-config-core</feature>
+ <bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
</feature>
-
</features>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.1-SNAPSHOT</version>
+ <relativePath>../../opendaylight/md-sal</relativePath>
+ </parent>
+ <artifactId>features-flow</artifactId>
+
+ <packaging>pom</packaging>
+
+ <properties>
+ <features.file>features.xml</features.file>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-mdsal</artifactId>
+ <version>${mdsal.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-base</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-service</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-flow-statistics</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-topology</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>topology-manager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>topology-lldp-discovery</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>statistics-manager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>inventory-manager</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.md</groupId>
+ <artifactId>forwardingrules-manager</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <resources>
+ <resource>
+ <filtering>true</filtering>
+ <directory>src/main/resources</directory>
+ </resource>
+ </resources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>filter</id>
+ <goals>
+ <goal>resources</goal>
+ </goals>
+ <phase>generate-resources</phase>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/${features.file}</file>
+ <type>xml</type>
+ <classifier>features</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+ </scm>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="odl-flow-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+ <repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
+ <feature name='odl-flow-model' version='${project.version}'>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.model/model-flow-base/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.model/model-flow-service/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.model/model-flow-statistics/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.model/model-topology/${project.version}</bundle>
+ </feature>
+ <feature name='odl-flow-services' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-flow-model</feature>
+ <bundle>mvn:org.opendaylight.controller.md/topology-manager/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.md/topology-lldp-discovery/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.md/statistics-manager/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.md/inventory-manager/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.md/forwardingrules-manager/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/liblldp/${sal.version}</bundle>
+ </feature>
+
+</features>
<version>1.1-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
- <artifactId>mdsal-features</artifactId>
+ <artifactId>features-mdsal</artifactId>
<packaging>pom</packaging>
<features.file>features.xml</features.file>
</properties>
- <dependencies></dependencies>
+ <dependencies>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-config</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-config-persister</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-config-netty</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-core-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-core-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-core-spi</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-connector-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>md-sal-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-netconf-connector</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.model</groupId>
+ <artifactId>model-inventory</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-config-dispatcher</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-connector-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-rest-connector</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.google.code.gson</groupId>
+ <artifactId>gson</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.thirdparty</groupId>
+ <artifactId>com.sun.jersey.jersey-servlet</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec-http</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-handler</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-remote</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-rest-connector-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sample-toaster</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sample-toaster-provider</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sample-toaster-consumer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>toaster-config</artifactId>
+ </dependency>
+ </dependencies>
<build>
<resources>
<?xml version="1.0" encoding="UTF-8"?>
-<features name="mdsal-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+<features name="odl-mdsal-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config-netty/${config.version}/xml/features</repository>
<feature name='odl-mdsal-all' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-commons</feature>
<feature version='${project.version}'>odl-mdsal-broker</feature>
- <feature version='${project.version}'>odl-mdsal-restconf</feature>
- </feature>
- <feature name='odl-mdsal-commons' version='${project.version}'>
- <feature version='${yangtools.version}'>yangtools-data-binding</feature>
- <bundle>mvn:org.opendaylight.controller/sal-common/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-api/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-impl/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-common-util/${project.version}</bundle>
+ <feature version='${project.version}'>odl-mdsal-netconf-connector</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${project.version}'>odl-toaster</feature>
</feature>
<feature name='odl-mdsal-broker' version='${project.version}'>
- <feature version='${yangtools.version}'>yangtools-concepts</feature>
- <feature version='${yangtools.version}'>yangtools-binding</feature>
- <feature version='${mdsal.version}'>odl-mdsal-commons</feature>
- <feature version='${config.version}'>odl-config-core</feature>
- <feature version='${config.version}'>odl-config-manager</feature>
- <feature version='${config.version}'>odl-config-api</feature>
- <feature version='${config.version}'>odl-config-persister</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+ <feature version='${mdsal.version}'>odl-mdsal-common</feature>
+ <feature version='${config.version}'>odl-config-startup</feature>
+ <feature version='${config.version}'>odl-config-netty</feature>
<bundle>mvn:org.opendaylight.controller/sal-core-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-core-spi/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-broker-impl/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-binding-util/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-connector-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/sal-inmemory-datastore/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.mdsal.configfile}">mvn:org.opendaylight.controller/md-sal-config/${mdsal.version}/xml/config</configfile>
+ </feature>
+ <feature name='odl-mdsal-netconf-connector' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-broker</feature>
+ <feature version='${netconf.version}'>odl-netconf-client</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-netconf-connector/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${config.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.netconf.connector.configfile}">mvn:org.opendaylight.controller/netconf-connector-config/${netconf.version}/xml/config</configfile>
</feature>
- <feature name='odl-mdsal-restconf' version='${project.version}'>
+ <feature name='odl-restconf' version='${project.version}'>
<feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+ <feature>war</feature>
<bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
- <bundle>wrap:mvn:com.google.code.gson/gson/${gson.version}</bundle>
- <bundle>wrap:mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
- <bundle>wrap:mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
+ <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
<bundle>mvn:org.opendaylight.controller.thirdparty/com.sun.jersey.jersey-servlet/${jersey.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-buffer/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-codec/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-codec-http/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-common/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-handler/${netty.version}</bundle>
- <bundle>wrap:mvn:io.netty/netty-transport/${netty.version}</bundle>
- </feature>
- <feature name='odl-mdsal-model' version='${project.version}'>
- <bundle>mvn:org.opendaylight.controller.model/model-flow-base/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.model/model-flow-management/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.model/model-flow-service/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.model/model-flow-statistics/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.model/model-topology/${project.version}</bundle>
+ <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-codec-http/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
</feature>
- <feature name='odl-mdsal-toaster' version='${project.version}'>
- <feature version='${yangtools.version}'>yangtools-concepts</feature>
- <feature version='${yangtools.version}'>yangtools-binding</feature>
+ <feature name='odl-toaster' version='${project.version}'>
+ <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<feature version='${project.version}'>odl-mdsal-broker</feature>
- <feature version='${project.version}'>odl-mdsal-all</feature>
<bundle>mvn:org.opendaylight.controller.samples/sample-toaster/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.samples/sample-toaster-consumer/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.samples/sample-toaster-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.toaster.configfile}">mvn:org.opendaylight.controller.samples/toaster-config/${project.version}/xml/config</configfile>
</feature>
- <feature name='odl-mdsal-misc' version='${project.version}'>
- <bundle>mvn:org.opendaylight.controller/sal-netconf-connector/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-restconf-broker/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.md/topology-manager/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.md/topology-lldp-discovery/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.md/statistics-manager/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.md/inventory-manager/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.md/forwardingrules-manager/${project.version}</bundle>
- </feature>
-
</features>
<version>0.2.5-SNAPSHOT</version>
<relativePath>../../opendaylight/netconf</relativePath>
</parent>
- <artifactId>netconf-features</artifactId>
+ <artifactId>features-netconf</artifactId>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-features</artifactId>
+ <artifactId>features-config</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>features-odl-protocol-framework</artifactId>
+ <artifactId>features-protocol-framework</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-monitoring</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-monitoring-extension</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-netconf-connector</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-netty-util</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.thirdparty</groupId>
+ <artifactId>ganymed</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.openexi</groupId>
+ <artifactId>nagasena</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-handler</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-client</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-config</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-monitoring</artifactId>
+ </dependency>
</dependencies>
<build>
<features name="odl-netconf-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
- <repository>mvn:org.opendaylight.controller/features-odl-protocol-framework/${protocol-framework.version}/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-protocol-framework/${protocol-framework.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+ <feature name='odl-netconf-all' version='${project.version}'>
+ <feature version='${project.version}'>odl-netconf-api</feature>
+ <feature version='${project.version}'>odl-netconf-mapping-api</feature>
+ <feature version='${project.version}'>odl-netconf-util</feature>
+ <feature version='${project.version}'>odl-netconf-impl</feature>
+ <feature version='${project.version}'>odl-config-netconf-connector</feature>
+ <feature version='${project.version}'>odl-netconf-netty-util</feature>
+ <feature version='${project.version}'>odl-netconf-client</feature>
+ <feature version='${project.version}'>odl-netconf-monitoring</feature>
+ </feature>
<feature name='odl-netconf-api' version='${project.version}'>
+ <feature version='${protocol-framework.version}'>odl-protocol-framework</feature>
<bundle>mvn:org.opendaylight.controller/netconf-api/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring-extension/${project.version}</bundle>
- <feature version='${protocol-framework.version}'>odl-protocol-framework</feature>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-inet-types/${ietf-inet-types.version}</bundle>
<bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types/${ietf-yang-types.version}</bundle>
</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<bundle>mvn:org.opendaylight.controller/netconf-util/${project.version}</bundle>
</feature>
- <feature name='odl-config-netconf-connector' version='${project.version}'>
- <feature version='${config.version}'>odl-config-manager</feature>
- <bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
+ <feature name='odl-netconf-impl' version='${project.version}'>
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
- </feature>
-
- <feature name='odl-netconf-impl' version='${project.version}'>
+ <feature version='${project.version}'>odl-netconf-netty-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-impl/${project.version}</bundle>
+ </feature>
+ <feature name='odl-config-netconf-connector' version='${project.version}'>
+ <feature version='${config.version}'>odl-config-manager</feature>
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
- <feature version='${project.version}'>odl-netconf-netty-util</feature>
+ <bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
</feature>
<feature name='odl-netconf-netty-util' version='${project.version}'>
- <bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
<feature version='${project.version}'>odl-netconf-api</feature>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
+ <bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
<bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/${ganymed.version}</bundle>
<bundle>mvn:org.openexi/nagasena/${exi.nagasena.version}</bundle>
<bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
<bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
</feature>
- <feature name='odl-netconf-misc' version='${project.version}'>
+ <feature name='odl-netconf-client' version="${project.version}">
+ <feature version='${project.version}'>odl-netconf-netty-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-client/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.netconf.client.configfile}">mvn:org.opendaylight.controller/netconf-config/${netconf.version}/xml/config</configfile>
+ </feature>
+ <feature name='odl-netconf-monitoring' version='${project.version}'>
+ <feature version='${project.version}'>odl-netconf-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-monitoring/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/netconf-tcp/${project.version}</bundle>
</feature>
</features>
\ No newline at end of file
<module>config-persister</module>
<module>config-netty</module>
<module>mdsal</module>
+ <module>flow</module>
<module>netconf</module>
<module>protocol-framework</module>
</modules>
<version>1.4.2-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
- <artifactId>features-odl-protocol-framework</artifactId>
+ <artifactId>features-protocol-framework</artifactId>
<version>${protocol-framework.version}</version>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-features</artifactId>
+ <artifactId>features-config</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>protocol-framework</artifactId>
+ </dependency>
</dependencies>
<build>
<features name="odl-protocol-framework-${protocol-framework.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
- <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
<feature name='odl-protocol-framework' version='${project.version}'>
+ <feature version='${config.version}'>odl-config-api</feature>
+ <feature version='${config.version}'>odl-config-netty-config-api</feature>
<bundle>mvn:org.opendaylight.controller/protocol-framework/${protocol-framework.version}</bundle>
- <feature version='${config.version}'>odl-config-api</feature> <!-- needed by netty-config-api -->
- <feature version='${config.version}'>odl-config-netty-config-api</feature> <!-- needed by netty-config-api -->
</feature>
</features>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>commons.opendaylight</artifactId>
+ <version>1.4.2-SNAPSHOT</version>
+ <relativePath>../opendaylight</relativePath>
+ </parent>
+
+ <artifactId>liblldp</artifactId>
+ <version>0.8.1-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+ <dependencies>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Import-Package>org.slf4j,
+ org.apache.commons.lang3.builder,
+ org.apache.commons.lang3.tuple
+ </Import-Package>
+ <Export-Package>
+ org.opendaylight.controller.liblldp</Export-Package>
+ </instructions>
+ <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+ </scm>
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ *
+ */
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * BitBufferHelper class that provides utility methods to
+ * - fetch specific bits from a serialized stream of bits
+ * - convert bits to primitive data type - like short, int, long
+ * - store bits in specified location in stream of bits
+ * - convert primitive data types to stream of bits
+ */
+public abstract class BitBufferHelper {
+ protected static final Logger logger = LoggerFactory
+ .getLogger(BitBufferHelper.class);
+
+ public static final long ByteMask = 0xFF;
+
+ // Getters
+ // data: array where data are stored
+ // startOffset: bit from where to start reading
+ // numBits: number of bits to read
+ // All this function return an exception if overflow or underflow
+
+ /**
+ * Returns the first byte from the byte array
+ * @param byte[] data
+ * @return byte value
+ */
+ public static byte getByte(byte[] data) {
+ if ((data.length * NetUtils.NumBitsInAByte) > Byte.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ return (data[0]);
+ }
+
+ /**
+ * Returns the short value for the byte array passed.
+ * Size of byte array is restricted to Short.SIZE
+ * @param byte[] data
+ * @return short value
+ */
+ public static short getShort(byte[] data) {
+ if (data.length > Short.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ return (short) toNumber(data);
+ }
+
+ /**
+ * Returns the int value for the byte array passed.
+ * Size of byte array is restricted to Integer.SIZE
+ * @param byte[] data
+ * @return int - the integer value of byte array
+ */
+ public static int getInt(byte[] data) {
+ if (data.length > Integer.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ return (int) toNumber(data);
+ }
+
+ /**
+ * Returns the long value for the byte array passed.
+ * Size of byte array is restricted to Long.SIZE
+ * @param byte[] data
+ * @return long - the integer value of byte array
+ */
+ public static long getLong(byte[] data) {
+ if (data.length > Long.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (Exception e) {
+ logger.error("", e);
+ }
+ }
+ return (long) toNumber(data);
+ }
+
+ /**
+ * Returns the short value for the last numBits of the byte array passed.
+ * Size of numBits is restricted to Short.SIZE
+ * @param byte[] data
+ * @param int - numBits
+ * @return short - the short value of byte array
+ */
+ public static short getShort(byte[] data, int numBits) {
+ if (numBits > Short.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+ byte[] bits = null;
+ try {
+ bits = BitBufferHelper.getBits(data, startOffset, numBits);
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ return (short) toNumber(bits, numBits);
+ }
+
+ /**
+ * Returns the int value for the last numBits of the byte array passed.
+ * Size of numBits is restricted to Integer.SIZE
+ * @param byte[] data
+ * @param int - numBits
+ * @return int - the integer value of byte array
+ */
+ public static int getInt(byte[] data, int numBits) {
+ if (numBits > Integer.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+ byte[] bits = null;
+ try {
+ bits = BitBufferHelper.getBits(data, startOffset, numBits);
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ return (int) toNumber(bits, numBits);
+ }
+
+ /**
+ * Returns the long value for the last numBits of the byte array passed.
+ * Size of numBits is restricted to Long.SIZE
+ * @param byte[] data
+ * @param int - numBits
+ * @return long - the integer value of byte array
+ */
+ public static long getLong(byte[] data, int numBits) {
+ if (numBits > Long.SIZE) {
+ try {
+ throw new BufferException(
+ "Container is too small for the number of requested bits");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ if (numBits > data.length * NetUtils.NumBitsInAByte) {
+ try {
+ throw new BufferException(
+ "Trying to read more bits than contained in the data buffer");
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ }
+ int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+ byte[] bits = null;
+ try {
+ bits = BitBufferHelper.getBits(data, startOffset, numBits);
+ } catch (BufferException e) {
+ logger.error("", e);
+ }
+ return (long) toNumber(bits, numBits);
+ }
+
+ /**
+ * Reads the specified number of bits from the passed byte array
+ * starting to read from the specified offset
+ * The bits read are stored in a byte array which size is dictated
+ * by the number of bits to be stored.
+ * The bits are stored in the byte array LSB aligned.
+ *
+ * Ex.
+ * Read 7 bits at offset 10
+ * 0 9 10 16 17
+ * 0101000010 | 0000101 | 1111001010010101011
+ * will be returned as {0,0,0,0,0,1,0,1}
+ *
+ * @param byte[] data
+ * @param int startOffset - offset to start fetching bits from data from
+ * @param int numBits - number of bits to be fetched from data
+ * @return byte [] - LSB aligned bits
+ *
+ * @throws BufferException
+ * when the startOffset and numBits parameters are not congruent
+ * with the data buffer size
+ */
+ public static byte[] getBits(byte[] data, int startOffset, int numBits)
+ throws BufferException {
+
+ int startByteOffset = 0;
+ int valfromcurr, valfromnext;
+ int extranumBits = numBits % NetUtils.NumBitsInAByte;
+ int extraOffsetBits = startOffset % NetUtils.NumBitsInAByte;
+ int numBytes = (numBits % NetUtils.NumBitsInAByte != 0) ? 1 + numBits
+ / NetUtils.NumBitsInAByte : numBits / NetUtils.NumBitsInAByte;
+ byte[] shiftedBytes = new byte[numBytes];
+ startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+ byte[] bytes = new byte[numBytes];
+ if (numBits == 0) {
+ return bytes;
+ }
+
+ checkExceptions(data, startOffset, numBits);
+
+ if (extraOffsetBits == 0) {
+ if (extranumBits == 0) {
+ System.arraycopy(data, startByteOffset, bytes, 0, numBytes);
+ return bytes;
+ } else {
+ System.arraycopy(data, startByteOffset, bytes, 0, numBytes - 1);
+ bytes[numBytes - 1] = (byte) ((int) data[startByteOffset
+ + numBytes - 1] & getMSBMask(extranumBits));
+ }
+ } else {
+ int i;
+ for (i = 0; i < numBits / NetUtils.NumBitsInAByte; i++) {
+ // Reading numBytes starting from offset
+ valfromcurr = (data[startByteOffset + i])
+ & getLSBMask(NetUtils.NumBitsInAByte - extraOffsetBits);
+ valfromnext = (data[startByteOffset + i + 1])
+ & getMSBMask(extraOffsetBits);
+ bytes[i] = (byte) (valfromcurr << (extraOffsetBits) | (valfromnext >> (NetUtils.NumBitsInAByte - extraOffsetBits)));
+ }
+ // Now adding the rest of the bits if any
+ if (extranumBits != 0) {
+ if (extranumBits < (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+ valfromnext = (byte) (data[startByteOffset + i] & ((getMSBMask(extranumBits)) >> extraOffsetBits));
+ bytes[i] = (byte) (valfromnext << extraOffsetBits);
+ } else if (extranumBits == (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+ valfromcurr = (data[startByteOffset + i])
+ & getLSBMask(NetUtils.NumBitsInAByte
+ - extraOffsetBits);
+ bytes[i] = (byte) (valfromcurr << extraOffsetBits);
+ } else {
+ valfromcurr = (data[startByteOffset + i])
+ & getLSBMask(NetUtils.NumBitsInAByte
+ - extraOffsetBits);
+ valfromnext = (data[startByteOffset + i + 1])
+ & (getMSBMask(extranumBits
+ - (NetUtils.NumBitsInAByte - extraOffsetBits)));
+ bytes[i] = (byte) (valfromcurr << (extraOffsetBits) | (valfromnext >> (NetUtils.NumBitsInAByte - extraOffsetBits)));
+ }
+
+ }
+ }
+ // Aligns the bits to LSB
+ shiftedBytes = shiftBitsToLSB(bytes, numBits);
+ return shiftedBytes;
+ }
+
+ // Setters
+ // data: array where data will be stored
+ // input: the data that need to be stored in the data array
+ // startOffset: bit from where to start writing
+ // numBits: number of bits to read
+
+ /**
+ * Bits are expected to be stored in the input byte array from LSB
+ * @param byte[] - data to set the input byte
+ * @param byte - input byte to be inserted
+ * @param startOffset - offset of data[] to start inserting byte from
+ * @param numBits - number of bits of input to be inserted into data[]
+ *
+ * @throws BufferException
+ * when the input, startOffset and numBits are not congruent
+ * with the data buffer size
+ */
+ public static void setByte(byte[] data, byte input, int startOffset,
+ int numBits) throws BufferException {
+ byte[] inputByteArray = new byte[1];
+ Arrays.fill(inputByteArray, 0, 1, input);
+ setBytes(data, inputByteArray, startOffset, numBits);
+ }
+
+ /**
+ * Bits are expected to be stored in the input byte array from LSB
+ * @param byte[] - data to set the input byte
+ * @param byte[] - input bytes to be inserted
+ * @param startOffset - offset of data[] to start inserting byte from
+ * @param numBits - number of bits of input to be inserted into data[]
+ * @return void
+ * @throws BufferException
+ * when the startOffset and numBits parameters are not congruent
+ * with data and input buffers' size
+ */
+ public static void setBytes(byte[] data, byte[] input, int startOffset,
+ int numBits) throws BufferException {
+ checkExceptions(data, startOffset, numBits);
+ insertBits(data, input, startOffset, numBits);
+ }
+
+ /**
+ * Returns numBits 1's in the MSB position
+ *
+ * @param numBits
+ * @return
+ */
+ public static int getMSBMask(int numBits) {
+ int mask = 0;
+ for (int i = 0; i < numBits; i++) {
+ mask = mask | (1 << (7 - i));
+ }
+ return mask;
+ }
+
+ /**
+ * Returns numBits 1's in the LSB position
+ *
+ * @param numBits
+ * @return
+ */
+ public static int getLSBMask(int numBits) {
+ int mask = 0;
+ for (int i = 0; i < numBits; i++) {
+ mask = mask | (1 << i);
+ }
+ return mask;
+ }
+
+ /**
+ * Returns the numerical value of the byte array passed
+ *
+ * @param byte[] - array
+ * @return long - numerical value of byte array passed
+ */
+ static public long toNumber(byte[] array) {
+ long ret = 0;
+ long length = array.length;
+ int value = 0;
+ for (int i = 0; i < length; i++) {
+ value = array[i];
+ if (value < 0)
+ value += 256;
+ ret = ret
+ | (long) ((long) value << ((length - i - 1) * NetUtils.NumBitsInAByte));
+ }
+ return ret;
+ }
+
+ /**
+ * Returns the numerical value of the last numBits (LSB bits) of the byte
+ * array passed
+ *
+ * @param byte[] - array
+ * @param int - numBits
+ * @return long - numerical value of byte array passed
+ */
+ static public long toNumber(byte[] array, int numBits) {
+ int length = numBits / NetUtils.NumBitsInAByte;
+ int bitsRest = numBits % NetUtils.NumBitsInAByte;
+ int startOffset = array.length - length;
+ long ret = 0;
+ int value = 0;
+
+ value = array[startOffset - 1] & getLSBMask(bitsRest);
+ value = (array[startOffset - 1] < 0) ? (array[startOffset - 1] + 256)
+ : array[startOffset - 1];
+ ret = ret
+ | (value << ((array.length - startOffset) * NetUtils.NumBitsInAByte));
+
+ for (int i = startOffset; i < array.length; i++) {
+ value = array[i];
+ if (value < 0)
+ value += 256;
+ ret = ret
+ | (long) ((long) value << ((array.length - i - 1) * NetUtils.NumBitsInAByte));
+ }
+
+ return ret;
+ }
+
+ /**
+ * Accepts a number as input and returns its value in byte form in LSB
+ * aligned form example: input = 5000 [1001110001000] bytes = 19, -120
+ * [00010011] [10001000]
+ *
+ * @param Number
+ * @return byte[]
+ *
+ */
+
+ public static byte[] toByteArray(Number input) {
+ Class<? extends Number> dataType = input.getClass();
+ short size = 0;
+ long longValue = input.longValue();
+
+ if (dataType == Byte.class || dataType == byte.class) {
+ size = Byte.SIZE;
+ } else if (dataType == Short.class || dataType == short.class) {
+ size = Short.SIZE;
+ } else if (dataType == Integer.class || dataType == int.class) {
+ size = Integer.SIZE;
+ } else if (dataType == Long.class || dataType == long.class) {
+ size = Long.SIZE;
+ } else {
+ throw new IllegalArgumentException(
+ "Parameter must one of the following: Short/Int/Long\n");
+ }
+
+ int length = size / NetUtils.NumBitsInAByte;
+ byte bytes[] = new byte[length];
+
+ // Getting the bytes from input value
+ for (int i = 0; i < length; i++) {
+ bytes[i] = (byte) ((longValue >> (NetUtils.NumBitsInAByte * (length
+ - i - 1))) & ByteMask);
+ }
+ return bytes;
+ }
+
+ /**
+ * Accepts a number as input and returns its value in byte form in MSB
+ * aligned form example: input = 5000 [1001110001000] bytes = -114, 64
+ * [10011100] [01000000]
+ *
+ * @param Number
+ * input
+ * @param int numBits - the number of bits to be returned
+ * @return byte[]
+ *
+ */
+ public static byte[] toByteArray(Number input, int numBits) {
+ Class<? extends Number> dataType = input.getClass();
+ short size = 0;
+ long longValue = input.longValue();
+
+ if (dataType == Short.class) {
+ size = Short.SIZE;
+ } else if (dataType == Integer.class) {
+ size = Integer.SIZE;
+ } else if (dataType == Long.class) {
+ size = Long.SIZE;
+ } else {
+ throw new IllegalArgumentException(
+ "Parameter must one of the following: Short/Int/Long\n");
+ }
+
+ int length = size / NetUtils.NumBitsInAByte;
+ byte bytes[] = new byte[length];
+ byte[] inputbytes = new byte[length];
+ byte shiftedBytes[];
+
+ // Getting the bytes from input value
+ for (int i = 0; i < length; i++) {
+ bytes[i] = (byte) ((longValue >> (NetUtils.NumBitsInAByte * (length
+ - i - 1))) & ByteMask);
+ }
+
+ if ((bytes[0] == 0 && dataType == Long.class)
+ || (bytes[0] == 0 && dataType == Integer.class)) {
+ int index = 0;
+ for (index = 0; index < length; ++index) {
+ if (bytes[index] != 0) {
+ bytes[0] = bytes[index];
+ break;
+ }
+ }
+ System.arraycopy(bytes, index, inputbytes, 0, length - index);
+ Arrays.fill(bytes, length - index + 1, length - 1, (byte) 0);
+ } else {
+ System.arraycopy(bytes, 0, inputbytes, 0, length);
+ }
+
+ shiftedBytes = shiftBitsToMSB(inputbytes, numBits);
+
+ return shiftedBytes;
+ }
+
+ /**
+ * Takes an LSB aligned byte array and returned the LSB numBits in a MSB
+ * aligned byte array
+ *
+ * @param inputbytes
+ * @param numBits
+ * @return
+ */
+ /**
+ * It aligns the last numBits bits to the head of the byte array following
+ * them with numBits % 8 zero bits.
+ *
+ * Example: For inputbytes = [00000111][01110001] and numBits = 12 it
+ * returns: shiftedBytes = [01110111][00010000]
+ *
+ * @param byte[] inputBytes
+ * @param int numBits - number of bits to be left aligned
+ * @return byte[]
+ */
+ public static byte[] shiftBitsToMSB(byte[] inputBytes, int numBits) {
+ int numBitstoShiftBy = 0, leadZeroesMSB = 8, numEndRestBits = 0;
+ int size = inputBytes.length;
+ byte[] shiftedBytes = new byte[size];
+ int i;
+
+ for (i = 0; i < Byte.SIZE; i++) {
+ if (((byte) (inputBytes[0] & getMSBMask(i + 1))) != 0) {
+ leadZeroesMSB = i;
+ break;
+ }
+ }
+
+ if (numBits % NetUtils.NumBitsInAByte == 0) {
+ numBitstoShiftBy = 0;
+ } else {
+ numBitstoShiftBy = ((NetUtils.NumBitsInAByte - (numBits % NetUtils.NumBitsInAByte)) < leadZeroesMSB) ? (NetUtils.NumBitsInAByte - (numBits % NetUtils.NumBitsInAByte))
+ : leadZeroesMSB;
+ }
+ if (numBitstoShiftBy == 0) {
+ return inputBytes;
+ }
+
+ if (numBits < NetUtils.NumBitsInAByte) {
+ // inputbytes.length = 1 OR read less than a byte
+ shiftedBytes[0] = (byte) ((inputBytes[0] & getLSBMask(numBits)) << numBitstoShiftBy);
+ } else {
+ // # of bits to read from last byte
+ numEndRestBits = NetUtils.NumBitsInAByte
+ - (inputBytes.length * NetUtils.NumBitsInAByte - numBits - numBitstoShiftBy);
+
+ for (i = 0; i < (size - 1); i++) {
+ if ((i + 1) == (size - 1)) {
+ if (numEndRestBits > numBitstoShiftBy) {
+ shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | ((inputBytes[i + 1] & getMSBMask(numBitstoShiftBy)) >> (numEndRestBits - numBitstoShiftBy)));
+ shiftedBytes[i + 1] = (byte) ((inputBytes[i + 1] & getLSBMask(numEndRestBits
+ - numBitstoShiftBy)) << numBitstoShiftBy);
+ } else
+ shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | ((inputBytes[i + 1] & getMSBMask(numEndRestBits)) >> (NetUtils.NumBitsInAByte - numEndRestBits)));
+ }
+ shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | (inputBytes[i + 1] & getMSBMask(numBitstoShiftBy)) >> (NetUtils.NumBitsInAByte - numBitstoShiftBy));
+ }
+
+ }
+ return shiftedBytes;
+ }
+
+ /**
+ * It aligns the first numBits bits to the right end of the byte array
+ * preceding them with numBits % 8 zero bits.
+ *
+ * Example: For inputbytes = [01110111][00010000] and numBits = 12 it
+ * returns: shiftedBytes = [00000111][01110001]
+ *
+ * @param byte[] inputBytes
+ * @param int numBits - number of bits to be right aligned
+ * @return byte[]
+ */
+ public static byte[] shiftBitsToLSB(byte[] inputBytes, int numBits) {
+ int numBytes = inputBytes.length;
+ int numBitstoShift = numBits % NetUtils.NumBitsInAByte;
+ byte[] shiftedBytes = new byte[numBytes];
+ int inputLsb = 0, inputMsb = 0;
+
+ if (numBitstoShift == 0) {
+ return inputBytes;
+ }
+
+ for (int i = 1; i < numBytes; i++) {
+ inputLsb = inputBytes[i - 1]
+ & getLSBMask(NetUtils.NumBitsInAByte - numBitstoShift);
+ inputLsb = (inputLsb < 0) ? (inputLsb + 256) : inputLsb;
+ inputMsb = inputBytes[i] & getMSBMask(numBitstoShift);
+ inputMsb = (inputBytes[i] < 0) ? (inputBytes[i] + 256)
+ : inputBytes[i];
+ shiftedBytes[i] = (byte) ((inputLsb << numBitstoShift) | (inputMsb >> (NetUtils.NumBitsInAByte - numBitstoShift)));
+ }
+ inputMsb = inputBytes[0] & (getMSBMask(numBitstoShift));
+ inputMsb = (inputMsb < 0) ? (inputMsb + 256) : inputMsb;
+ shiftedBytes[0] = (byte) (inputMsb >> (NetUtils.NumBitsInAByte - numBitstoShift));
+ return shiftedBytes;
+ }
+
+ /**
+ * Insert in the data buffer at position dictated by the offset the number
+ * of bits specified from the input data byte array. The input byte array
+ * has the bits stored starting from the LSB
+ *
+ * @param byte[] data
+ * @param byte[] inputdata
+ * @param int startOffset
+ * @param int numBits
+ */
+ public static void insertBits(byte[] data, byte[] inputdataLSB,
+ int startOffset, int numBits) {
+ byte[] inputdata = shiftBitsToMSB(inputdataLSB, numBits); // Align to
+ // MSB the
+ // passed byte
+ // array
+ int numBytes = numBits / NetUtils.NumBitsInAByte;
+ int startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+ int extraOffsetBits = startOffset % NetUtils.NumBitsInAByte;
+ int extranumBits = numBits % NetUtils.NumBitsInAByte;
+ int RestBits = numBits % NetUtils.NumBitsInAByte;
+ int InputMSBbits = 0, InputLSBbits = 0;
+ int i;
+
+ if (numBits == 0) {
+ return;
+ }
+
+ if (extraOffsetBits == 0) {
+ if (extranumBits == 0) {
+ numBytes = numBits / NetUtils.NumBitsInAByte;
+ System.arraycopy(inputdata, 0, data, startByteOffset, numBytes);
+ } else {
+ System.arraycopy(inputdata, 0, data, startByteOffset, numBytes);
+ data[startByteOffset + numBytes] = (byte) (data[startByteOffset
+ + numBytes] | (inputdata[numBytes] & getMSBMask(extranumBits)));
+ }
+ } else {
+ for (i = 0; i < numBytes; i++) {
+ if (i != 0)
+ InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+ InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+ - extraOffsetBits)));
+ InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+ : InputMSBbits + 256;
+ data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+ | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+ InputMSBbits = InputLSBbits = 0;
+ }
+ if (RestBits < (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+ if (numBytes != 0)
+ InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+ InputMSBbits = (byte) (inputdata[i] & (getMSBMask(RestBits)));
+ InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+ : InputMSBbits + 256;
+ data[startByteOffset + i] = (byte) ((data[startByteOffset + i])
+ | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+ } else if (RestBits == (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+ if (numBytes != 0)
+ InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+ InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+ - extraOffsetBits)));
+ InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+ : InputMSBbits + 256;
+ data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+ | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+ } else {
+ if (numBytes != 0)
+ InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+ InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+ - extraOffsetBits)));
+ InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+ : InputMSBbits + 256;
+ data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+ | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+
+ InputLSBbits = (inputdata[i] & (getLSBMask(RestBits
+ - (NetUtils.NumBitsInAByte - extraOffsetBits)) << (NetUtils.NumBitsInAByte - RestBits)));
+ data[startByteOffset + i + 1] = (byte) (data[startByteOffset
+ + i + 1] | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)));
+ }
+ }
+ }
+
+ /**
+ * Checks for overflow and underflow exceptions
+ * @param data
+ * @param startOffset
+ * @param numBits
+ * @throws PacketException when the startOffset and numBits parameters
+ * are not congruent with the data buffer's size
+ */
+ public static void checkExceptions(byte[] data, int startOffset, int numBits)
+ throws BufferException {
+ int endOffsetByte;
+ int startByteOffset;
+ endOffsetByte = startOffset
+ / NetUtils.NumBitsInAByte
+ + numBits
+ / NetUtils.NumBitsInAByte
+ + ((numBits % NetUtils.NumBitsInAByte != 0) ? 1 : ((startOffset
+ % NetUtils.NumBitsInAByte != 0) ? 1 : 0));
+ startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+
+ if (data == null) {
+ throw new BufferException("data[] is null\n");
+ }
+
+ if ((startOffset < 0) || (startByteOffset >= data.length)
+ || (endOffsetByte > data.length) || (numBits < 0)
+ || (numBits > NetUtils.NumBitsInAByte * data.length)) {
+ throw new BufferException(
+ "Illegal arguement/out of bound exception - data.length = "
+ + data.length + " startOffset = " + startOffset
+ + " numBits " + numBits);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.liblldp;
+
+/**
+ * Describes an exception that is raised during BitBufferHelper operations.
+ */
+public class BufferException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public BufferException(String message) {
+ super(message);
+ }
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * @file ConstructionException.java
+ *
+ *
+ * @brief Describe an exception that is raised when a construction
+ * for a Node/NodeConnector/Edge or any of the SAL basic object fails
+ * because input passed are not valid or compatible
+ *
+ *
+ */
+package org.opendaylight.controller.liblldp;
+
+public class ConstructionException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public ConstructionException(String message) {
+ super(message);
+ }
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * @file DataLinkAddress.java
+ *
+ * @brief Abstract base class for a Datalink Address
+ *
+ */
+
+/**
+ * Abstract base class for a Datalink Address
+ *
+ */
+@XmlRootElement
+abstract public class DataLinkAddress implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private String name;
+
+ public DataLinkAddress() {
+
+ }
+
+ /**
+ * Constructor of super class
+ *
+ * @param name Create a new DataLink, not for general use but
+ * available only for sub classes
+ *
+ * @return constructed object
+ */
+ protected DataLinkAddress(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Used to copy the DataLinkAddress in a polymorphic way
+ *
+ *
+ * @return A clone of this DataLinkAddress
+ */
+ @Override
+ abstract public DataLinkAddress clone();
+
+ /**
+ * Allow to distinguish among different data link addresses
+ *
+ *
+ * @return Name of the DataLinkAdress we are working on
+ */
+ public String getName() {
+ return this.name;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((name == null) ? 0 : name.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ DataLinkAddress other = (DataLinkAddress) obj;
+ if (name == null) {
+ if (other.name != null)
+ return false;
+ } else if (!name.equals(other.name))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "DataLinkAddress [name=" + name + "]";
+ }
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * The enum contains the most common 802.3 ethernet types and 802.2 + SNAP protocol ids
+ *
+ *
+ *
+ */
+public enum EtherTypes {
+ PVSTP("PVSTP", 0x010B), // 802.2 + SNAP (Spanning Tree)
+ CDP("CDP", 0x2000), // 802.2 + SNAP
+ VTP("VTP", 0x2003), // 802.2 + SNAP
+ IPv4("IPv4", 0x800), ARP("ARP", 0x806), RARP("Reverse ARP", 0x8035), VLANTAGGED(
+ "VLAN Tagged", 0x8100), // 802.1Q
+ IPv6("IPv6", 0x86DD), MPLSUCAST("MPLS Unicast", 0x8847), MPLSMCAST(
+ "MPLS Multicast", 0x8848), QINQ("QINQ", 0x88A8), // Standard 802.1ad QinQ
+ LLDP("LLDP", 0x88CC), OLDQINQ("Old QINQ", 0x9100), // Old non-standard QinQ
+ CISCOQINQ("Cisco QINQ", 0x9200); // Cisco non-standard QinQ
+
+ private static final String regexNumberString = "^[0-9]+$";
+ private String description;
+ private int number;
+
+ private EtherTypes(String description, int number) {
+ this.description = description;
+ this.number = number;
+ }
+
+ public String toString() {
+ return description;
+ }
+
+ public int intValue() {
+ return number;
+ }
+
+ public short shortValue() {
+ return ((Integer) number).shortValue();
+ }
+
+ public static String getEtherTypeName(int number) {
+ return getEtherTypeInternal(number);
+ }
+
+ public static String getEtherTypeName(short number) {
+ return getEtherTypeInternal((int) number & 0xffff);
+ }
+
+ public static String getEtherTypeName(byte number) {
+ return getEtherTypeInternal((int) number & 0xff);
+ }
+
+ private static String getEtherTypeInternal(int number) {
+ for (EtherTypes type : EtherTypes.values()) {
+ if (type.number == number) {
+ return type.toString();
+ }
+ }
+ return "0x" + Integer.toHexString(number);
+ }
+
+ public static short getEtherTypeNumberShort(String name) {
+ if (name.matches(regexNumberString)) {
+ return Short.valueOf(name);
+ }
+ for (EtherTypes type : EtherTypes.values()) {
+ if (type.description.equalsIgnoreCase(name)) {
+ return type.shortValue();
+ }
+ }
+ return 0;
+ }
+
+ public static int getEtherTypeNumberInt(String name) {
+ if (name.matches(regexNumberString)) {
+ return Integer.valueOf(name);
+ }
+ for (EtherTypes type : EtherTypes.values()) {
+ if (type.description.equalsIgnoreCase(name)) {
+ return type.intValue();
+ }
+ }
+ return 0;
+ }
+
+ public static List<String> getEtherTypesNameList() {
+ List<String> ethertypesList = new ArrayList<String>();
+ for (EtherTypes type : EtherTypes.values()) {
+ ethertypesList.add(type.toString());
+ }
+ return ethertypesList;
+ }
+
+ public static EtherTypes loadFromString(String string) {
+ int intType = Integer.parseInt(string);
+
+ for (EtherTypes type : EtherTypes.values()) {
+ if (type.number == intType) {
+ return type;
+ }
+ }
+ return null;
+ }
+
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+
+/**
+ * Class that represents the Ethernet frame objects
+ */
+public class Ethernet extends Packet {
+ private static final String DMAC = "DestinationMACAddress";
+ private static final String SMAC = "SourceMACAddress";
+ private static final String ETHT = "EtherType";
+
+ // TODO: This has to be outside and it should be possible for osgi
+ // to add new coming packet classes
+ public static final Map<Short, Class<? extends Packet>> etherTypeClassMap;
+ static {
+ etherTypeClassMap = new HashMap<Short, Class<? extends Packet>>();
+ etherTypeClassMap.put(EtherTypes.LLDP.shortValue(), LLDP.class);
+ }
+ private static Map<String, Pair<Integer, Integer>> fieldCoordinates = new LinkedHashMap<String, Pair<Integer, Integer>>() {
+ private static final long serialVersionUID = 1L;
+ {
+ put(DMAC, new ImmutablePair<Integer, Integer>(0, 48));
+ put(SMAC, new ImmutablePair<Integer, Integer>(48, 48));
+ put(ETHT, new ImmutablePair<Integer, Integer>(96, 16));
+ }
+ };
+ private final Map<String, byte[]> fieldValues;
+
+ /**
+ * Default constructor that creates and sets the HashMap
+ */
+ public Ethernet() {
+ super();
+ fieldValues = new HashMap<String, byte[]>();
+ hdrFieldCoordMap = fieldCoordinates;
+ hdrFieldsMap = fieldValues;
+ }
+
+ /**
+ * Constructor that sets the access level for the packet and
+ * creates and sets the HashMap
+ */
+ public Ethernet(boolean writeAccess) {
+ super(writeAccess);
+ fieldValues = new HashMap<String, byte[]>();
+ hdrFieldCoordMap = fieldCoordinates;
+ hdrFieldsMap = fieldValues;
+ }
+
+ @Override
+ public void setHeaderField(String headerField, byte[] readValue) {
+ if (headerField.equals(ETHT)) {
+ payloadClass = etherTypeClassMap.get(BitBufferHelper
+ .getShort(readValue));
+ }
+ hdrFieldsMap.put(headerField, readValue);
+ }
+
+ /**
+ * Gets the destination MAC address stored
+ * @return byte[] - the destinationMACAddress
+ */
+ public byte[] getDestinationMACAddress() {
+ return fieldValues.get(DMAC);
+ }
+
+ /**
+ * Gets the source MAC address stored
+ * @return byte[] - the sourceMACAddress
+ */
+ public byte[] getSourceMACAddress() {
+ return fieldValues.get(SMAC);
+ }
+
+ /**
+ * Gets the etherType stored
+ * @return short - the etherType
+ */
+ public short getEtherType() {
+ return BitBufferHelper.getShort(fieldValues.get(ETHT));
+ }
+
+ public boolean isBroadcast(){
+ return NetUtils.isBroadcastMACAddr(getDestinationMACAddress());
+ }
+
+ public boolean isMulticast(){
+ return NetUtils.isMulticastMACAddr(getDestinationMACAddress());
+ }
+
+ /**
+ * Sets the destination MAC address for the current Ethernet object instance
+ * @param byte[] - the destinationMACAddress to set
+ */
+ public Ethernet setDestinationMACAddress(byte[] destinationMACAddress) {
+ fieldValues.put(DMAC, destinationMACAddress);
+ return this;
+ }
+
+ /**
+ * Sets the source MAC address for the current Ethernet object instance
+ * @param byte[] - the sourceMACAddress to set
+ */
+ public Ethernet setSourceMACAddress(byte[] sourceMACAddress) {
+ fieldValues.put(SMAC, sourceMACAddress);
+ return this;
+ }
+
+ /**
+ * Sets the etherType for the current Ethernet object instance
+ * @param short - the etherType to set
+ */
+ public Ethernet setEtherType(short etherType) {
+ byte[] ethType = BitBufferHelper.toByteArray(etherType);
+ fieldValues.put(ETHT, ethType);
+ return this;
+ }
+
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+public class EthernetAddress extends DataLinkAddress {
+ private static final long serialVersionUID = 1L;
+ @XmlTransient
+ private byte[] macAddress;
+
+ public static final EthernetAddress BROADCASTMAC = createWellKnownAddress(new byte[] {
+ (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff,
+ (byte) 0xff });
+
+ public static final EthernetAddress INVALIDHOST = BROADCASTMAC;
+
+ public static final String addressName = "Ethernet MAC Address";
+ public static final int SIZE = 6;
+
+ private static final EthernetAddress createWellKnownAddress(byte[] mac) {
+ try {
+ return new EthernetAddress(mac);
+ } catch (ConstructionException ce) {
+ return null;
+ }
+ }
+
+ /* Private constructor to satisfy JAXB */
+ @SuppressWarnings("unused")
+ private EthernetAddress() {
+ }
+
+ /**
+ * Public constructor for an Ethernet MAC address starting from
+ * the byte constituing the address, the constructor validate the
+ * size of the arrive to make sure it met the expected size
+ *
+ * @param macAddress A byte array in big endian format
+ * representing the Ethernet MAC Address
+ *
+ * @return The constructed object if valid
+ */
+ public EthernetAddress(byte[] macAddress) throws ConstructionException {
+ super(addressName);
+
+ if (macAddress == null) {
+ throw new ConstructionException("Null input parameter passed");
+ }
+
+ if (macAddress.length != SIZE) {
+ throw new ConstructionException(
+ "Wrong size of passed byte array, expected:" + SIZE
+ + " got:" + macAddress.length);
+ }
+ this.macAddress = new byte[SIZE];
+ System.arraycopy(macAddress, 0, this.macAddress, 0, SIZE);
+ }
+
+ public EthernetAddress clone() {
+ try {
+ return new EthernetAddress(this.macAddress.clone());
+ } catch (ConstructionException ce) {
+ return null;
+ }
+ }
+
+ /**
+ * Return the Ethernet Mac address in byte array format
+ *
+ * @return The Ethernet Mac address in byte array format
+ */
+ public byte[] getValue() {
+ return this.macAddress;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ result = prime * result + Arrays.hashCode(macAddress);
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (!super.equals(obj))
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ EthernetAddress other = (EthernetAddress) obj;
+ if (!Arrays.equals(macAddress, other.macAddress))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "EthernetAddress [macAddress=" + HexEncode.bytesToHexStringFormat(macAddress)
+ + "]";
+ }
+
+ @XmlElement(name = "macAddress")
+ public String getMacAddress() {
+ return HexEncode.bytesToHexStringFormat(macAddress);
+ }
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.math.BigInteger;
+
+/**
+ * The class provides methods to convert hex encode strings
+ *
+ *
+ */
+public class HexEncode {
+ /**
+ * This method converts byte array into String format without ":" inserted.
+ *
+ * @param bytes
+ * The byte array to convert to string
+ * @return The hexadecimal representation of the byte array. If bytes is
+ * null, "null" string is returned
+ */
+ public static String bytesToHexString(byte[] bytes) {
+
+ if (bytes == null) {
+ return "null";
+ }
+
+ String ret = "";
+ StringBuffer buf = new StringBuffer();
+ for (int i = 0; i < bytes.length; i++) {
+ if (i > 0) {
+ ret += ":";
+ }
+ short u8byte = (short) (bytes[i] & 0xff);
+ String tmp = Integer.toHexString(u8byte);
+ if (tmp.length() == 1) {
+ buf.append("0");
+ }
+ buf.append(tmp);
+ }
+ ret = buf.toString();
+ return ret;
+ }
+
+ public static String longToHexString(long val) {
+ char arr[] = Long.toHexString(val).toCharArray();
+ StringBuffer buf = new StringBuffer();
+ // prepend the right number of leading zeros
+ int i = 0;
+ for (; i < (16 - arr.length); i++) {
+ buf.append("0");
+ if ((i & 0x01) == 1) {
+ buf.append(":");
+ }
+ }
+ for (int j = 0; j < arr.length; j++) {
+ buf.append(arr[j]);
+ if ((((i + j) & 0x01) == 1) && (j < (arr.length - 1))) {
+ buf.append(":");
+ }
+ }
+ return buf.toString();
+ }
+
+
+ public static byte[] bytesFromHexString(String values) {
+ String target = "";
+ if (values != null) {
+ target = values;
+ }
+ String[] octets = target.split(":");
+
+ byte[] ret = new byte[octets.length];
+ for (int i = 0; i < octets.length; i++) {
+ ret[i] = Integer.valueOf(octets[i], 16).byteValue();
+ }
+ return ret;
+ }
+
+ public static long stringToLong(String values) {
+ long value = new BigInteger(values.replaceAll(":", ""), 16).longValue();
+ return value;
+ }
+
+ /**
+ * This method converts byte array into HexString format with ":" inserted.
+ */
+ public static String bytesToHexStringFormat(byte[] bytes) {
+ if (bytes == null) {
+ return "null";
+ }
+ String ret = "";
+ StringBuffer buf = new StringBuffer();
+ for (int i = 0; i < bytes.length; i++) {
+ if (i > 0) {
+ buf.append(":");
+ }
+ short u8byte = (short) (bytes[i] & 0xff);
+ String tmp = Integer.toHexString(u8byte);
+ if (tmp.length() == 1) {
+ buf.append("0");
+ }
+ buf.append(tmp);
+ }
+ ret = buf.toString();
+ return ret;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class that represents the LLDP frame objects
+ */
+
+public class LLDP extends Packet {
+ private static final String CHASSISID = "ChassisId";
+ private static final String SYSTEMNAMEID = "SystemNameID";
+ private static final String PORTID = "PortId";
+ private static final String TTL = "TTL";
+ private static final int LLDPDefaultTlvs = 4;
+ private static LLDPTLV emptyTLV = new LLDPTLV().setLength((short) 0)
+ .setType((byte) 0);
+ public static final byte[] LLDPMulticastMac = { 1, (byte) 0x80,
+ (byte) 0xc2, 0, 0, (byte) 0xe };
+ private Map<Byte, LLDPTLV> tlvList;
+
+ /**
+ * Default constructor that creates the tlvList LinkedHashMap
+ */
+ public LLDP() {
+ super();
+ tlvList = new LinkedHashMap<Byte, LLDPTLV>(LLDPDefaultTlvs);
+ }
+
+ /**
+ * Constructor that creates the tlvList LinkedHashMap and sets the write
+ * access for the same
+ */
+ public LLDP(boolean writeAccess) {
+ super(writeAccess);
+ tlvList = new LinkedHashMap<Byte, LLDPTLV>(LLDPDefaultTlvs); // Mandatory
+ // TLVs
+ }
+
+ /**
+ * @param String
+ * - description of the type of TLV
+ * @return byte - type of TLV
+ */
+ private byte getType(String typeDesc) {
+ if (typeDesc.equals(CHASSISID)) {
+ return LLDPTLV.TLVType.ChassisID.getValue();
+ } else if (typeDesc.equals(PORTID)) {
+ return LLDPTLV.TLVType.PortID.getValue();
+ } else if (typeDesc.equals(TTL)) {
+ return LLDPTLV.TLVType.TTL.getValue();
+ } else {
+ return LLDPTLV.TLVType.Unknown.getValue();
+ }
+ }
+
+ /**
+ * @param String
+ * - description of the type of TLV
+ * @return LLDPTLV - full TLV
+ */
+ public LLDPTLV getTLV(String type) {
+ return tlvList.get(getType(type));
+ }
+
+ /**
+ * @param String
+ * - description of the type of TLV
+ * @param LLDPTLV
+ * - tlv to set
+ * @return void
+ */
+ public void setTLV(String type, LLDPTLV tlv) {
+ tlvList.put(getType(type), tlv);
+ }
+
+ /**
+ * @return the chassisId TLV
+ */
+ public LLDPTLV getChassisId() {
+ return getTLV(CHASSISID);
+ }
+
+ /**
+ * @param LLDPTLV
+ * - the chassisId to set
+ */
+ public LLDP setChassisId(LLDPTLV chassisId) {
+ tlvList.put(getType(CHASSISID), chassisId);
+ return this;
+ }
+
+ /**
+ * @return the SystemName TLV
+ */
+ public LLDPTLV getSystemNameId() {
+ return getTLV(SYSTEMNAMEID);
+ }
+
+ /**
+ * @param LLDPTLV
+ * - the chassisId to set
+ */
+ public LLDP setSystemNameId(LLDPTLV systemNameId) {
+ tlvList.put(getType(SYSTEMNAMEID), systemNameId);
+ return this;
+ }
+
+ /**
+ * @return LLDPTLV - the portId TLV
+ */
+ public LLDPTLV getPortId() {
+ return tlvList.get(getType(PORTID));
+ }
+
+ /**
+ * @param LLDPTLV
+ * - the portId to set
+ * @return LLDP
+ */
+ public LLDP setPortId(LLDPTLV portId) {
+ tlvList.put(getType(PORTID), portId);
+ return this;
+ }
+
+ /**
+ * @return LLDPTLV - the ttl TLV
+ */
+ public LLDPTLV getTtl() {
+ return tlvList.get(getType(TTL));
+ }
+
+ /**
+ * @param LLDPTLV
+ * - the ttl to set
+ * @return LLDP
+ */
+ public LLDP setTtl(LLDPTLV ttl) {
+ tlvList.put(getType(TTL), ttl);
+ return this;
+ }
+
+ /**
+ * @return the optionalTLVList
+ */
+ public List<LLDPTLV> getOptionalTLVList() {
+ List<LLDPTLV> list = new ArrayList<LLDPTLV>();
+ for (Map.Entry<Byte, LLDPTLV> entry : tlvList.entrySet()) {
+ byte type = entry.getKey();
+ if ((type == LLDPTLV.TLVType.ChassisID.getValue())
+ || (type == LLDPTLV.TLVType.PortID.getValue())
+ || (type == LLDPTLV.TLVType.TTL.getValue())) {
+ continue;
+ } else {
+ list.add(entry.getValue());
+ }
+ }
+ return list;
+ }
+
+ /**
+ * @param optionalTLVList
+ * the optionalTLVList to set
+ * @return LLDP
+ */
+ public LLDP setOptionalTLVList(List<LLDPTLV> optionalTLVList) {
+ for (LLDPTLV tlv : optionalTLVList) {
+ tlvList.put(tlv.getType(), tlv);
+ }
+ return this;
+ }
+
+ @Override
+ public Packet deserialize(byte[] data, int bitOffset, int size)
+ throws PacketException {
+ int lldpOffset = bitOffset; // LLDP start
+ int lldpSize = size; // LLDP size
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("LLDP: {} (offset {} bitsize {})", new Object[] {
+ HexEncode.bytesToHexString(data), lldpOffset, lldpSize });
+ }
+ /*
+ * Deserialize the TLVs until we reach the end of the packet
+ */
+ while (lldpSize > 0) {
+ LLDPTLV tlv = new LLDPTLV();
+ tlv.deserialize(data, lldpOffset, lldpSize);
+ if (tlv.getType() == 0 && tlv.getLength() == 0) {
+ break;
+ }
+ int tlvSize = tlv.getTLVSize(); // Size of current TLV in bits
+ lldpOffset += tlvSize;
+ lldpSize -= tlvSize;
+ this.tlvList.put(tlv.getType(), tlv);
+ }
+ return this;
+ }
+
+ @Override
+ public byte[] serialize() throws PacketException {
+ int startOffset = 0;
+ byte[] serializedBytes = new byte[getLLDPPacketLength()];
+
+ for (Map.Entry<Byte, LLDPTLV> entry : tlvList.entrySet()) {
+ LLDPTLV tlv = entry.getValue();
+ int numBits = tlv.getTLVSize();
+ try {
+ BitBufferHelper.setBytes(serializedBytes, tlv.serialize(),
+ startOffset, numBits);
+ } catch (BufferException e) {
+ throw new PacketException(e.getMessage());
+ }
+ startOffset += numBits;
+ }
+ // Now add the empty LLDPTLV at the end
+ try {
+ BitBufferHelper.setBytes(serializedBytes,
+ LLDP.emptyTLV.serialize(), startOffset,
+ LLDP.emptyTLV.getTLVSize());
+ } catch (BufferException e) {
+ throw new PacketException(e.getMessage());
+ }
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("LLDP: serialized: {}",
+ HexEncode.bytesToHexString(serializedBytes));
+ }
+ return serializedBytes;
+ }
+
+ /**
+ * Returns the size of LLDP packet in bytes
+ *
+ * @return int - LLDP Packet size in bytes
+ */
+ private int getLLDPPacketLength() {
+ int len = 0;
+ LLDPTLV tlv;
+
+ for (Map.Entry<Byte, LLDPTLV> entry : this.tlvList.entrySet()) {
+ tlv = entry.getValue();
+ len += tlv.getTLVSize();
+ }
+ len += LLDP.emptyTLV.getTLVSize();
+
+ return len / NetUtils.NumBitsInAByte;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.lang3.tuple.MutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+
+/**
+ * Class that represents the LLDPTLV objects
+ */
+
+public class LLDPTLV extends Packet {
+ private static final String TYPE = "Type";
+ private static final String LENGTH = "Length";
+ private static final String VALUE = "Value";
+ private static final int LLDPTLVFields = 3;
+ public static final byte[] OFOUI = new byte[] { (byte) 0x00, (byte) 0x26,
+ (byte) 0xe1 }; // OpenFlow OUI
+ public static final byte[] customTlvSubType = new byte[] { 0 };
+ public static final int customTlvOffset = OFOUI.length
+ + customTlvSubType.length;
+ public static final byte chassisIDSubType[] = new byte[] { 4 }; // MAC address for the system
+ public static final byte portIDSubType[] = new byte[] { 7 }; // locally assigned
+
+ public enum TLVType {
+ Unknown((byte) 0), ChassisID((byte) 1), PortID((byte) 2), TTL((byte) 3), PortDesc(
+ (byte) 4), SystemName((byte) 5), SystemDesc((byte) 6), Custom(
+ (byte) 127);
+
+ private byte value;
+
+ private TLVType(byte value) {
+ this.value = value;
+ }
+
+ public byte getValue() {
+ return value;
+ }
+ }
+
+ private static Map<String, Pair<Integer, Integer>> fieldCoordinates = new LinkedHashMap<String, Pair<Integer, Integer>>() {
+ private static final long serialVersionUID = 1L;
+
+ {
+ put(TYPE, new MutablePair<Integer, Integer>(0, 7));
+ put(LENGTH, new MutablePair<Integer, Integer>(7, 9));
+ put(VALUE, new MutablePair<Integer, Integer>(16, 0));
+ }
+ };
+
+ protected Map<String, byte[]> fieldValues;
+
+ /**
+ * Default constructor that creates and sets the hash map values and sets
+ * the payload to null
+ */
+ public LLDPTLV() {
+ payload = null;
+ fieldValues = new HashMap<String, byte[]>(LLDPTLVFields);
+ hdrFieldCoordMap = fieldCoordinates;
+ hdrFieldsMap = fieldValues;
+ }
+
+ /**
+ * Constructor that writes the passed LLDPTLV values to the hdrFieldsMap
+ */
+ public LLDPTLV(LLDPTLV other) {
+ for (Map.Entry<String, byte[]> entry : other.hdrFieldsMap.entrySet()) {
+ this.hdrFieldsMap.put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ /**
+ * @return int - the length of TLV
+ */
+ public int getLength() {
+ return (int) BitBufferHelper.toNumber(fieldValues.get(LENGTH),
+ fieldCoordinates.get(LENGTH).getRight().intValue());
+ }
+
+ /**
+ * @return byte - the type of TLV
+ */
+ public byte getType() {
+ return BitBufferHelper.getByte(fieldValues.get(TYPE));
+ }
+
+ /**
+ * @return byte[] - the value field of TLV
+ */
+ public byte[] getValue() {
+ return fieldValues.get(VALUE);
+ }
+
+ /**
+ * @param byte - the type to set
+ * @return LLDPTLV
+ */
+ public LLDPTLV setType(byte type) {
+ byte[] lldpTLVtype = { type };
+ fieldValues.put(TYPE, lldpTLVtype);
+ return this;
+ }
+
+ /**
+ * @param short - the length to set
+ * @return LLDPTLV
+ */
+ public LLDPTLV setLength(short length) {
+ fieldValues.put(LENGTH, BitBufferHelper.toByteArray(length));
+ return this;
+ }
+
+ /**
+ * @param byte[] - the value to set
+ * @return LLDPTLV
+ */
+ public LLDPTLV setValue(byte[] value) {
+ fieldValues.put(VALUE, value);
+ return this;
+ }
+
+ @Override
+ public void setHeaderField(String headerField, byte[] readValue) {
+ hdrFieldsMap.put(headerField, readValue);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ result = prime * result
+ + ((fieldValues == null) ? 0 : fieldValues.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!super.equals(obj)) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ LLDPTLV other = (LLDPTLV) obj;
+ if (fieldValues == null) {
+ if (other.fieldValues != null) {
+ return false;
+ }
+ } else if (!fieldValues.equals(other.fieldValues)) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public int getfieldnumBits(String fieldName) {
+ if (fieldName.equals(VALUE)) {
+ return (NetUtils.NumBitsInAByte * BitBufferHelper.getShort(
+ fieldValues.get(LENGTH), fieldCoordinates.get(LENGTH)
+ .getRight().intValue()));
+ }
+ return fieldCoordinates.get(fieldName).getRight();
+ }
+
+ /**
+ * Returns the size in bits of the whole TLV
+ *
+ * @return int - size in bits of full TLV
+ */
+ public int getTLVSize() {
+ return (LLDPTLV.fieldCoordinates.get(TYPE).getRight() + // static
+ LLDPTLV.fieldCoordinates.get(LENGTH).getRight() + // static
+ getfieldnumBits(VALUE)); // variable
+ }
+
+ /**
+ * Creates the SystemName TLV value
+ *
+ * @param nodeId
+ * node identifier string
+ * @return the SystemName TLV value in byte array
+ */
+ static public byte[] createSystemNameTLVValue(String nodeId) {
+ byte[] nid = nodeId.getBytes();
+ return nid;
+ }
+
+ /**
+ * Creates the ChassisID TLV value including the subtype and ChassisID
+ * string
+ *
+ * @param nodeId
+ * node identifier string
+ * @return the ChassisID TLV value in byte array
+ */
+ static public byte[] createChassisIDTLVValue(String nodeId) {
+ byte[] nid = HexEncode.bytesFromHexString(nodeId);
+ byte[] cid = new byte[6];
+ int srcPos = 0, dstPos = 0;
+
+ if (nid.length > cid.length) {
+ srcPos = nid.length - cid.length;
+ } else {
+ dstPos = cid.length - nid.length;
+ }
+ System.arraycopy(nid, srcPos, cid, dstPos, cid.length);
+
+ byte[] cidValue = new byte[cid.length + chassisIDSubType.length];
+
+ System.arraycopy(chassisIDSubType, 0, cidValue, 0,
+ chassisIDSubType.length);
+ System.arraycopy(cid, 0, cidValue, chassisIDSubType.length, cid.length);
+
+ return cidValue;
+ }
+
+ /**
+ * Creates the PortID TLV value including the subtype and PortID string
+ *
+ * @param portId
+ * port identifier string
+ * @return the PortID TLV value in byte array
+ */
+ static public byte[] createPortIDTLVValue(String portId) {
+ byte[] pid = portId.getBytes(Charset.defaultCharset());
+ byte[] pidValue = new byte[pid.length + portIDSubType.length];
+
+ System.arraycopy(portIDSubType, 0, pidValue, 0, portIDSubType.length);
+ System.arraycopy(pid, 0, pidValue, portIDSubType.length, pid.length);
+
+ return pidValue;
+ }
+
+ /**
+ * Creates the custom TLV value including OUI, subtype and custom string
+ *
+ * @param portId
+ * port identifier string
+ * @return the custom TLV value in byte array
+ */
+ static public byte[] createCustomTLVValue(String customString) {
+ byte[] customArray = customString.getBytes(Charset.defaultCharset());
+ byte[] customValue = new byte[customTlvOffset + customArray.length];
+
+ System.arraycopy(OFOUI, 0, customValue, 0, OFOUI.length);
+ System.arraycopy(customTlvSubType, 0, customValue, OFOUI.length,
+ customTlvSubType.length);
+ System.arraycopy(customArray, 0, customValue, customTlvOffset,
+ customArray.length);
+
+ return customValue;
+ }
+
+ /**
+ * Retrieves the string from TLV value and returns it in HexString format
+ *
+ * @param tlvValue
+ * the TLV value
+ * @param tlvLen
+ * the TLV length
+ * @return the HexString
+ */
+ static public String getHexStringValue(byte[] tlvValue, int tlvLen) {
+ byte[] cidBytes = new byte[tlvLen - chassisIDSubType.length];
+ System.arraycopy(tlvValue, chassisIDSubType.length, cidBytes, 0,
+ cidBytes.length);
+ return HexEncode.bytesToHexStringFormat(cidBytes);
+ }
+
+ /**
+ * Retrieves the string from TLV value
+ *
+ * @param tlvValue
+ * the TLV value
+ * @param tlvLen
+ * the TLV length
+ * @return the string
+ */
+ static public String getStringValue(byte[] tlvValue, int tlvLen) {
+ byte[] pidSubType = new byte[portIDSubType.length];
+ byte[] pidBytes = new byte[tlvLen - portIDSubType.length];
+ System.arraycopy(tlvValue, 0, pidSubType, 0,
+ pidSubType.length);
+ System.arraycopy(tlvValue, portIDSubType.length, pidBytes, 0,
+ pidBytes.length);
+ if (pidSubType[0] == (byte) 0x3) {
+ return HexEncode.bytesToHexStringFormat(pidBytes);
+ } else {
+ return (new String(pidBytes, Charset.defaultCharset()));
+ }
+ }
+
+ /**
+ * Retrieves the custom string from the Custom TLV value which includes OUI,
+ * subtype and custom string
+ *
+ * @param customTlvValue
+ * the custom TLV value
+ * @param customTlvLen
+ * the custom TLV length
+ * @return the custom string
+ */
+ static public String getCustomString(byte[] customTlvValue, int customTlvLen) {
+ String customString = "";
+ byte[] vendor = new byte[3];
+ System.arraycopy(customTlvValue, 0, vendor, 0, vendor.length);
+ if (Arrays.equals(vendor, LLDPTLV.OFOUI)) {
+ int customArrayLength = customTlvLen - customTlvOffset;
+ byte[] customArray = new byte[customArrayLength];
+ System.arraycopy(customTlvValue, customTlvOffset, customArray, 0,
+ customArrayLength);
+ try {
+ customString = new String(customArray, "UTF-8");
+ } catch (UnsupportedEncodingException e) {
+ }
+ }
+
+ return customString;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.net.Inet4Address;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class containing the common utility functions needed for operating on
+ * networking data structures
+ */
+public abstract class NetUtils {
+ protected static final Logger logger = LoggerFactory.getLogger(NetUtils.class);
+ /**
+ * Constant holding the number of bits in a byte
+ */
+ public static final int NumBitsInAByte = 8;
+
+ /**
+ * Constant holding the number of bytes in MAC Address
+ */
+ public static final int MACAddrLengthInBytes = 6;
+
+ /**
+ * Constant holding the number of words in MAC Address
+ */
+ public static final int MACAddrLengthInWords = 3;
+
+ /**
+ * Constant holding the broadcast MAC address
+ */
+ private static final byte[] BroadcastMACAddr = {-1, -1, -1, -1, -1, -1};
+
+ /**
+ * Converts a 4 bytes array into an integer number
+ *
+ * @param ba
+ * the 4 bytes long byte array
+ * @return the integer number
+ */
+ public static int byteArray4ToInt(byte[] ba) {
+ if (ba == null || ba.length != 4) {
+ return 0;
+ }
+ return (0xff & ba[0]) << 24 | (0xff & ba[1]) << 16 | (0xff & ba[2]) << 8 | (0xff & ba[3]);
+ }
+
+ /**
+ * Converts a 6 bytes array into a long number MAC addresses.
+ *
+ * @param ba
+ * The 6 bytes long byte array.
+ * @return The long number.
+ * Zero is returned if {@code ba} is {@code null} or
+ * the length of it is not six.
+ */
+ public static long byteArray6ToLong(byte[] ba) {
+ if (ba == null || ba.length != MACAddrLengthInBytes) {
+ return 0L;
+ }
+ long num = 0L;
+ int i = 0;
+ do {
+ num <<= NumBitsInAByte;
+ num |= 0xff & ba[i];
+ i++;
+ } while (i < MACAddrLengthInBytes);
+ return num;
+ }
+
+ /**
+ * Converts a long number to a 6 bytes array for MAC addresses.
+ *
+ * @param addr
+ * The long number.
+ * @return The byte array.
+ */
+ public static byte[] longToByteArray6(long addr){
+ byte[] mac = new byte[MACAddrLengthInBytes];
+ int i = MACAddrLengthInBytes - 1;
+ do {
+ mac[i] = (byte) addr;
+ addr >>>= NumBitsInAByte;
+ i--;
+ } while (i >= 0);
+ return mac;
+ }
+
+ /**
+ * Converts an integer number into a 4 bytes array
+ *
+ * @param i
+ * the integer number
+ * @return the byte array
+ */
+ public static byte[] intToByteArray4(int i) {
+ return new byte[] { (byte) ((i >> 24) & 0xff), (byte) ((i >> 16) & 0xff), (byte) ((i >> 8) & 0xff),
+ (byte) (i & 0xff) };
+ }
+
+ /**
+ * Converts an IP address passed as integer value into the respective
+ * InetAddress object
+ *
+ * @param address
+ * the IP address in integer form
+ * @return the IP address in InetAddress form
+ */
+ public static InetAddress getInetAddress(int address) {
+ InetAddress ip = null;
+ try {
+ ip = InetAddress.getByAddress(NetUtils.intToByteArray4(address));
+ } catch (UnknownHostException e) {
+ logger.error("", e);
+ }
+ return ip;
+ }
+
+ /**
+ * Return the InetAddress Network Mask given the length of the prefix bit
+ * mask. The prefix bit mask indicates the contiguous leading bits that are
+ * NOT masked out. Example: A prefix bit mask length of 8 will give an
+ * InetAddress Network Mask of 255.0.0.0
+ *
+ * @param prefixMaskLength
+ * integer representing the length of the prefix network mask
+ * @param isV6
+ * boolean representing the IP version of the returned address
+ * @return
+ */
+ public static InetAddress getInetNetworkMask(int prefixMaskLength, boolean isV6) {
+ if (prefixMaskLength < 0 || (!isV6 && prefixMaskLength > 32) || (isV6 && prefixMaskLength > 128)) {
+ return null;
+ }
+ byte v4Address[] = { 0, 0, 0, 0 };
+ byte v6Address[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ byte address[] = (isV6) ? v6Address : v4Address;
+ int numBytes = prefixMaskLength / 8;
+ int numBits = prefixMaskLength % 8;
+ int i = 0;
+ for (; i < numBytes; i++) {
+ address[i] = (byte) 0xff;
+ }
+ if (numBits > 0) {
+ int rem = 0;
+ for (int j = 0; j < numBits; j++) {
+ rem |= 1 << (7 - j);
+ }
+ address[i] = (byte) rem;
+ }
+
+ try {
+ return InetAddress.getByAddress(address);
+ } catch (UnknownHostException e) {
+ logger.error("", e);
+ }
+ return null;
+ }
+
+ /**
+ * Returns the prefix size in bits of the specified subnet mask. Example:
+ * For the subnet mask ff.ff.ff.e0 it returns 25 while for ff.00.00.00 it
+ * returns 8. If the passed subnetMask array is null, 0 is returned.
+ *
+ * @param subnetMask
+ * the subnet mask as byte array
+ * @return the prefix length as number of bits
+ */
+ public static int getSubnetMaskLength(byte[] subnetMask) {
+ int maskLength = 0;
+ if (subnetMask != null && (subnetMask.length == 4 || subnetMask.length == 16)) {
+ int index = 0;
+ while (index < subnetMask.length && subnetMask[index] == (byte) 0xFF) {
+ maskLength += NetUtils.NumBitsInAByte;
+ index++;
+ }
+ if (index != subnetMask.length) {
+ int bits = NetUtils.NumBitsInAByte - 1;
+ while (bits >= 0 && (subnetMask[index] & 1 << bits) != 0) {
+ bits--;
+ maskLength++;
+ }
+ }
+ }
+ return maskLength;
+ }
+
+ /**
+ * Returns the prefix size in bits of the specified subnet mask. Example:
+ * For the subnet mask 255.255.255.128 it returns 25 while for 255.0.0.0 it
+ * returns 8. If the passed subnetMask object is null, 0 is returned
+ *
+ * @param subnetMask
+ * the subnet mask as InetAddress
+ * @return the prefix length as number of bits
+ */
+ public static int getSubnetMaskLength(InetAddress subnetMask) {
+ return subnetMask == null ? 0 : NetUtils.getSubnetMaskLength(subnetMask.getAddress());
+ }
+
+ /**
+ * Given an IP address and a prefix network mask length, it returns the
+ * equivalent subnet prefix IP address Example: for ip = "172.28.30.254" and
+ * maskLen = 25 it will return "172.28.30.128"
+ *
+ * @param ip
+ * the IP address in InetAddress form
+ * @param maskLen
+ * the length of the prefix network mask
+ * @return the subnet prefix IP address in InetAddress form
+ */
+ public static InetAddress getSubnetPrefix(InetAddress ip, int maskLen) {
+ int bytes = maskLen / 8;
+ int bits = maskLen % 8;
+ byte modifiedByte;
+ byte[] sn = ip.getAddress();
+ if (bits > 0) {
+ modifiedByte = (byte) (sn[bytes] >> (8 - bits));
+ sn[bytes] = (byte) (modifiedByte << (8 - bits));
+ bytes++;
+ }
+ for (; bytes < sn.length; bytes++) {
+ sn[bytes] = (byte) (0);
+ }
+ try {
+ return InetAddress.getByAddress(sn);
+ } catch (UnknownHostException e) {
+ return null;
+ }
+ }
+
+ /**
+ * Checks if the test address and mask conflicts with the filter address and
+ * mask
+ *
+ * For example:
+ * testAddress: 172.28.2.23
+ * testMask: 255.255.255.0
+ * filterAddress: 172.28.1.10
+ * testMask: 255.255.255.0
+ * do conflict
+ *
+ * testAddress: 172.28.2.23
+ * testMask: 255.255.255.0
+ * filterAddress: 172.28.1.10
+ * testMask: 255.255.0.0
+ * do not conflict
+ *
+ * Null parameters are permitted
+ *
+ * @param testAddress
+ * @param filterAddress
+ * @param testMask
+ * @param filterMask
+ * @return
+ */
+ public static boolean inetAddressConflict(InetAddress testAddress, InetAddress filterAddress, InetAddress testMask,
+ InetAddress filterMask) {
+ // Sanity check
+ if ((testAddress == null) || (filterAddress == null)) {
+ return false;
+ }
+
+ // Presence check
+ if (isAny(testAddress) || isAny(filterAddress)) {
+ return false;
+ }
+
+ int testMaskLen = (testMask == null) ? ((testAddress instanceof Inet4Address) ? 32 : 128) : NetUtils
+ .getSubnetMaskLength(testMask);
+ int filterMaskLen = (filterMask == null) ? ((testAddress instanceof Inet4Address) ? 32 : 128) : NetUtils
+ .getSubnetMaskLength(filterMask);
+
+ // Mask length check. Test mask has to be more specific than filter one
+ if (testMaskLen < filterMaskLen) {
+ return true;
+ }
+
+ // Subnet Prefix on filter mask length must be the same
+ InetAddress prefix1 = getSubnetPrefix(testAddress, filterMaskLen);
+ InetAddress prefix2 = getSubnetPrefix(filterAddress, filterMaskLen);
+ return (!prefix1.equals(prefix2));
+ }
+
+ /**
+ * Returns true if the passed MAC address is all zero
+ *
+ * @param mac
+ * the byte array representing the MAC address
+ * @return true if all MAC bytes are zero
+ */
+ public static boolean isZeroMAC(byte[] mac) {
+ for (short i = 0; i < 6; i++) {
+ if (mac[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Returns true if the MAC address is the broadcast MAC address and false
+ * otherwise.
+ *
+ * @param MACAddress
+ * @return
+ */
+ public static boolean isBroadcastMACAddr(byte[] MACAddress) {
+ if (MACAddress.length == MACAddrLengthInBytes) {
+ for (int i = 0; i < 6; i++) {
+ if (MACAddress[i] != BroadcastMACAddr[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ return false;
+ }
+ /**
+ * Returns true if the MAC address is a unicast MAC address and false
+ * otherwise.
+ *
+ * @param MACAddress
+ * @return
+ */
+ public static boolean isUnicastMACAddr(byte[] MACAddress) {
+ if (MACAddress.length == MACAddrLengthInBytes) {
+ return (MACAddress[0] & 1) == 0;
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the MAC address is a multicast MAC address and false
+ * otherwise. Note that this explicitly returns false for the broadcast MAC
+ * address.
+ *
+ * @param MACAddress
+ * @return
+ */
+ public static boolean isMulticastMACAddr(byte[] MACAddress) {
+ if (MACAddress.length == MACAddrLengthInBytes && !isBroadcastMACAddr(MACAddress)) {
+ return (MACAddress[0] & 1) != 0;
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the passed InetAddress contains all zero
+ *
+ * @param ip
+ * the IP address to test
+ * @return true if the address is all zero
+ */
+ public static boolean isAny(InetAddress ip) {
+ for (byte b : ip.getAddress()) {
+ if (b != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public static boolean fieldsConflict(int field1, int field2) {
+ if ((field1 == 0) || (field2 == 0) || (field1 == field2)) {
+ return false;
+ }
+ return true;
+ }
+
+ public static InetAddress parseInetAddress(String addressString) {
+ InetAddress address = null;
+ try {
+ address = InetAddress.getByName(addressString);
+ } catch (UnknownHostException e) {
+ logger.error("", e);
+ }
+ return address;
+ }
+
+ /**
+ * Checks if the passed IP v4 address in string form is valid The address
+ * may specify a mask at the end as "/MM"
+ *
+ * @param cidr
+ * the v4 address as A.B.C.D/MM
+ * @return
+ */
+ public static boolean isIPv4AddressValid(String cidr) {
+ if (cidr == null) {
+ return false;
+ }
+
+ String values[] = cidr.split("/");
+ Pattern ipv4Pattern = Pattern
+ .compile("(([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.){3}([01]?\\d\\d?|2[0-4]\\d|25[0-5])");
+ Matcher mm = ipv4Pattern.matcher(values[0]);
+ if (!mm.matches()) {
+ return false;
+ }
+ if (values.length >= 2) {
+ int prefix = Integer.valueOf(values[1]);
+ if ((prefix < 0) || (prefix > 32)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Checks if the passed IP v6 address in string form is valid The address
+ * may specify a mask at the end as "/MMM"
+ *
+ * @param cidr
+ * the v6 address as A::1/MMM
+ * @return
+ */
+ public static boolean isIPv6AddressValid(String cidr) {
+ if (cidr == null) {
+ return false;
+ }
+
+ String values[] = cidr.split("/");
+ try {
+ // when given an IP address, InetAddress.getByName validates the ip
+ // address
+ InetAddress addr = InetAddress.getByName(values[0]);
+ if (!(addr instanceof Inet6Address)) {
+ return false;
+ }
+ } catch (UnknownHostException ex) {
+ return false;
+ }
+
+ if (values.length >= 2) {
+ int prefix = Integer.valueOf(values[1]);
+ if ((prefix < 0) || (prefix > 128)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Checks if the passed IP address in string form is a valid v4 or v6
+ * address. The address may specify a mask at the end as "/MMM"
+ *
+ * @param cidr
+ * the v4 or v6 address as IP/MMM
+ * @return
+ */
+ public static boolean isIPAddressValid(String cidr) {
+ return NetUtils.isIPv4AddressValid(cidr) || NetUtils.isIPv6AddressValid(cidr);
+ }
+
+ /*
+ * Following utilities are useful when you need to compare or bit shift java
+ * primitive type variable which are inherently signed
+ */
+ /**
+ * Returns the unsigned value of the passed byte variable
+ *
+ * @param b
+ * the byte value
+ * @return the int variable containing the unsigned byte value
+ */
+ public static int getUnsignedByte(byte b) {
+ return b & 0xFF;
+ }
+
+ /**
+ * Return the unsigned value of the passed short variable
+ *
+ * @param s
+ * the short value
+ * @return the int variable containing the unsigned short value
+ */
+ public static int getUnsignedShort(short s) {
+ return s & 0xFFFF;
+ }
+
+ /**
+ * Returns the highest v4 or v6 InetAddress
+ *
+ * @param v6
+ * true for IPv6, false for Ipv4
+ * @return The highest IPv4 or IPv6 address
+ */
+ public static InetAddress gethighestIP(boolean v6) {
+ try {
+ return (v6) ? InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") : InetAddress
+ .getByName("255.255.255.255");
+ } catch (UnknownHostException e) {
+ return null;
+ }
+ }
+
+ /**
+ * Returns Broadcast MAC Address
+ *
+ * @return the byte array containing broadcast mac address
+ */
+ public static byte[] getBroadcastMACAddr() {
+ return Arrays.copyOf(BroadcastMACAddr, BroadcastMACAddr.length);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract class which represents the generic network packet object It provides
+ * the basic methods which are common for all the packets, like serialize and
+ * deserialize
+ */
+
+public abstract class Packet {
+ protected static final Logger logger = LoggerFactory
+ .getLogger(Packet.class);
+ // Access level granted to this packet
+ protected boolean writeAccess;
+ // When deserialized from wire, packet could result corrupted
+ protected boolean corrupted;
+ // The packet that encapsulate this packet
+ protected Packet parent;
+ // The packet encapsulated by this packet
+ protected Packet payload;
+ // The unparsed raw payload carried by this packet
+ protected byte[] rawPayload;
+ // Bit coordinates of packet header fields
+ protected Map<String, Pair<Integer, Integer>> hdrFieldCoordMap;
+ // Header fields values: Map<FieldName,Value>
+ protected Map<String, byte[]> hdrFieldsMap;
+ // The class of the encapsulated packet object
+ protected Class<? extends Packet> payloadClass;
+
+ public Packet() {
+ writeAccess = false;
+ corrupted = false;
+ }
+
+ public Packet(boolean writeAccess) {
+ this.writeAccess = writeAccess;
+ corrupted = false;
+ }
+
+ public Packet getParent() {
+ return parent;
+ }
+
+ public Packet getPayload() {
+ return payload;
+ }
+
+ public void setParent(Packet parent) {
+ this.parent = parent;
+ }
+
+ public void setPayload(Packet payload) {
+ this.payload = payload;
+ }
+
+ public void setHeaderField(String headerField, byte[] readValue) {
+ hdrFieldsMap.put(headerField, readValue);
+ }
+
+ /**
+ * This method deserializes the data bits obtained from the wire into the
+ * respective header and payload which are of type Packet
+ *
+ * @param byte[] data - data from wire to deserialize
+ * @param int bitOffset bit position where packet header starts in data
+ * array
+ * @param int size of packet in bits
+ * @return Packet
+ * @throws PacketException
+ */
+ public Packet deserialize(byte[] data, int bitOffset, int size)
+ throws PacketException {
+
+ // Deserialize the header fields one by one
+ int startOffset = 0, numBits = 0;
+ for (Entry<String, Pair<Integer, Integer>> pairs : hdrFieldCoordMap
+ .entrySet()) {
+ String hdrField = pairs.getKey();
+ startOffset = bitOffset + this.getfieldOffset(hdrField);
+ numBits = this.getfieldnumBits(hdrField);
+
+ byte[] hdrFieldBytes = null;
+ try {
+ hdrFieldBytes = BitBufferHelper.getBits(data, startOffset,
+ numBits);
+ } catch (BufferException e) {
+ throw new PacketException(e.getMessage());
+ }
+
+ /*
+ * Store the raw read value, checks the payload type and set the
+ * payloadClass accordingly
+ */
+ this.setHeaderField(hdrField, hdrFieldBytes);
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("{}: {}: {} (offset {} bitsize {})",
+ new Object[] { this.getClass().getSimpleName(), hdrField,
+ HexEncode.bytesToHexString(hdrFieldBytes),
+ startOffset, numBits });
+ }
+ }
+
+ // Deserialize the payload now
+ int payloadStart = startOffset + numBits;
+ int payloadSize = data.length * NetUtils.NumBitsInAByte - payloadStart;
+
+ if (payloadClass != null) {
+ try {
+ payload = payloadClass.newInstance();
+ } catch (Exception e) {
+ throw new RuntimeException(
+ "Error parsing payload for Ethernet packet", e);
+ }
+ payload.deserialize(data, payloadStart, payloadSize);
+ payload.setParent(this);
+ } else {
+ /*
+ * The payload class was not set, it means no class for parsing
+ * this payload is present. Let's store the raw payload if any.
+ */
+ int start = payloadStart / NetUtils.NumBitsInAByte;
+ int stop = start + payloadSize / NetUtils.NumBitsInAByte;
+ rawPayload = Arrays.copyOfRange(data, start, stop);
+ }
+
+
+ // Take care of computation that can be done only after deserialization
+ postDeserializeCustomOperation(data, payloadStart - getHeaderSize());
+
+ return this;
+ }
+
+ /**
+ * This method serializes the header and payload from the respective
+ * packet class, into a single stream of bytes to be sent on the wire
+ *
+ * @return The byte array representing the serialized Packet
+ * @throws PacketException
+ */
+ public byte[] serialize() throws PacketException {
+
+ // Acquire or compute the serialized payload
+ byte[] payloadBytes = null;
+ if (payload != null) {
+ payloadBytes = payload.serialize();
+ } else if (rawPayload != null) {
+ payloadBytes = rawPayload;
+ }
+ int payloadSize = (payloadBytes == null) ? 0 : payloadBytes.length;
+
+ // Allocate the buffer to contain the full (header + payload) packet
+ int headerSize = this.getHeaderSize() / NetUtils.NumBitsInAByte;
+ byte packetBytes[] = new byte[headerSize + payloadSize];
+ if (payloadBytes != null) {
+ System.arraycopy(payloadBytes, 0, packetBytes, headerSize, payloadSize);
+ }
+
+ // Serialize this packet header, field by field
+ for (Map.Entry<String, Pair<Integer, Integer>> pairs : hdrFieldCoordMap
+ .entrySet()) {
+ String field = pairs.getKey();
+ byte[] fieldBytes = hdrFieldsMap.get(field);
+ // Let's skip optional fields when not set
+ if (fieldBytes != null) {
+ try {
+ BitBufferHelper.setBytes(packetBytes, fieldBytes,
+ getfieldOffset(field), getfieldnumBits(field));
+ } catch (BufferException e) {
+ throw new PacketException(e.getMessage());
+ }
+ }
+ }
+
+ // Perform post serialize operations (like checksum computation)
+ postSerializeCustomOperation(packetBytes);
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("{}: {}", this.getClass().getSimpleName(),
+ HexEncode.bytesToHexString(packetBytes));
+ }
+
+ return packetBytes;
+ }
+
+ /**
+ * This method gets called at the end of the serialization process It is
+ * intended for the child packets to insert some custom data into the output
+ * byte stream which cannot be done or cannot be done efficiently during the
+ * normal Packet.serialize() path. An example is the checksum computation
+ * for IPv4
+ *
+ * @param byte[] - serialized bytes
+ * @throws PacketException
+ */
+ protected void postSerializeCustomOperation(byte[] myBytes)
+ throws PacketException {
+ // no op
+ }
+
+ /**
+ * This method re-computes the checksum of the bits received on the wire and
+ * validates it with the checksum in the bits received Since the computation
+ * of checksum varies based on the protocol, this method is overridden.
+ * Currently only IPv4 and ICMP do checksum computation and validation. TCP
+ * and UDP need to implement these if required.
+ *
+ * @param byte[] data The byte stream representing the Ethernet frame
+ * @param int startBitOffset The bit offset from where the byte array corresponding to this Packet starts in the frame
+ * @throws PacketException
+ */
+ protected void postDeserializeCustomOperation(byte[] data, int startBitOffset)
+ throws PacketException {
+ // no op
+ }
+
+ /**
+ * Gets the header length in bits
+ *
+ * @return int the header length in bits
+ */
+ public int getHeaderSize() {
+ int size = 0;
+ /*
+ * We need to iterate over the fields that were read in the frame
+ * (hdrFieldsMap) not all the possible ones described in
+ * hdrFieldCoordMap. For ex, 802.1Q may or may not be there
+ */
+ for (Map.Entry<String, byte[]> fieldEntry : hdrFieldsMap.entrySet()) {
+ if (fieldEntry.getValue() != null) {
+ String field = fieldEntry.getKey();
+ size += getfieldnumBits(field);
+ }
+ }
+ return size;
+ }
+
+ /**
+ * This method fetches the start bit offset for header field specified by
+ * 'fieldname'. The offset is present in the hdrFieldCoordMap of the
+ * respective packet class
+ *
+ * @param String
+ * fieldName
+ * @return Integer - startOffset of the requested field
+ */
+ public int getfieldOffset(String fieldName) {
+ return hdrFieldCoordMap.get(fieldName).getLeft();
+ }
+
+ /**
+ * This method fetches the number of bits for header field specified by
+ * 'fieldname'. The numBits are present in the hdrFieldCoordMap of the
+ * respective packet class
+ *
+ * @param String
+ * fieldName
+ * @return Integer - number of bits of the requested field
+ */
+ public int getfieldnumBits(String fieldName) {
+ return hdrFieldCoordMap.get(fieldName).getRight();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder ret = new StringBuilder();
+ ret.append(this.getClass().getSimpleName());
+ ret.append(": [");
+ for (String field : hdrFieldCoordMap.keySet()) {
+ byte[] value = hdrFieldsMap.get(field);
+ ret.append(field);
+ ret.append(": ");
+ ret.append(HexEncode.bytesToHexString(value));
+ ret.append(", ");
+ }
+ ret.replace(ret.length()-2, ret.length()-1, "]");
+ return ret.toString();
+ }
+
+ /**
+ * Returns the raw payload carried by this packet in case payload was not
+ * parsed. Caller can call this function in case the getPaylod() returns null.
+ *
+ * @return The raw payload if not parsable as an array of bytes, null otherwise
+ */
+ public byte[] getRawPayload() {
+ return rawPayload;
+ }
+
+ /**
+ * Set a raw payload in the packet class
+ *
+ * @param payload The raw payload as byte array
+ */
+ public void setRawPayload(byte[] payload) {
+ this.rawPayload = Arrays.copyOf(payload, payload.length);
+ }
+
+ /**
+ * Return whether the deserialized packet is to be considered corrupted.
+ * This is the case when the checksum computed after reconstructing the
+ * packet received from wire is not equal to the checksum read from the
+ * stream. For the Packet class which do not have a checksum field, this
+ * function will always return false.
+ *
+ *
+ * @return true if the deserialized packet's recomputed checksum is not
+ * equal to the packet carried checksum
+ */
+ public boolean isCorrupted() {
+ return corrupted;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ result = prime * result
+ + ((this.hdrFieldsMap == null) ? 0 : hdrFieldsMap.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ Packet other = (Packet) obj;
+ if (hdrFieldsMap == other.hdrFieldsMap) {
+ return true;
+ }
+ if (hdrFieldsMap == null || other.hdrFieldsMap == null) {
+ return false;
+ }
+ if (hdrFieldsMap != null && other.hdrFieldsMap != null) {
+ for (String field : hdrFieldsMap.keySet()) {
+ if (!Arrays.equals(hdrFieldsMap.get(field), other.hdrFieldsMap.get(field))) {
+ return false;
+ }
+ }
+ } else {
+ return false;
+ }
+ return true;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.liblldp;
+
+/**
+ * Describes an exception that is raised when the process of serializing or
+ * deserializing a network packet/stream fails. This generally happens when the
+ * packet/stream is malformed.
+ *
+ */
+public class PacketException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public PacketException(String message) {
+ super(message);
+ }
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.packet;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+import org.opendaylight.controller.liblldp.BitBufferHelper;
+
+public class BitBufferHelperTest {
+
+ @Test
+ public void testGetByte() {
+ byte[] data = { 100 };
+ Assert.assertTrue(BitBufferHelper.getByte(data) == 100);
+ }
+
+ @Test
+ public void testGetBits() throws Exception {
+ byte[] data = { 10, 12, 14, 20, 55, 69, 82, 97, 109, 117, 127, -50 };
+ byte[] bits;
+
+ bits = BitBufferHelper.getBits(data, 88, 8); //BYTE extraOffsetBits = extranumBits = 0
+ Assert.assertTrue(bits[0] == -50);
+
+ bits = BitBufferHelper.getBits(data, 8, 16); //Short
+ Assert.assertTrue(bits[0] == 12);
+ Assert.assertTrue(bits[1] == 14);
+
+ bits = BitBufferHelper.getBits(data, 32, 32); //Int
+ Assert.assertTrue(bits[0] == 55);
+ Assert.assertTrue(bits[1] == 69);
+ Assert.assertTrue(bits[2] == 82);
+ Assert.assertTrue(bits[3] == 97);
+
+ bits = BitBufferHelper.getBits(data, 16, 48); //Long
+ Assert.assertTrue(bits[0] == 14);
+ Assert.assertTrue(bits[1] == 20);
+ Assert.assertTrue(bits[2] == 55);
+ Assert.assertTrue(bits[3] == 69);
+ Assert.assertTrue(bits[4] == 82);
+ Assert.assertTrue(bits[5] == 97);
+
+ bits = BitBufferHelper.getBits(data, 40, 7); //BYTE extraOffsetBits = extranumBits != 0
+ Assert.assertTrue(bits[0] == 34);
+
+ bits = BitBufferHelper.getBits(data, 8, 13); //Short
+ Assert.assertTrue(bits[0] == 1);
+ Assert.assertTrue(bits[1] == -127);
+
+ bits = BitBufferHelper.getBits(data, 32, 28); //Int
+ Assert.assertTrue(bits[0] == 3);
+ Assert.assertTrue(bits[1] == 116);
+ Assert.assertTrue(bits[2] == 85);
+ Assert.assertTrue(bits[3] == 38);
+
+ bits = BitBufferHelper.getBits(data, 16, 41); //Long
+ Assert.assertTrue(bits[0] == 0);
+ Assert.assertTrue(bits[1] == 28);
+ Assert.assertTrue(bits[2] == 40);
+ Assert.assertTrue(bits[3] == 110);
+ Assert.assertTrue(bits[4] == -118);
+ Assert.assertTrue(bits[5] == -92);
+
+ bits = BitBufferHelper.getBits(data, 3, 7); //BYTE extraOffsetBits != 0; extranumBits == 0
+ Assert.assertTrue(bits[0] == 40);
+
+ bits = BitBufferHelper.getBits(data, 13, 16); //Short
+ Assert.assertTrue(bits[0] == -127);
+ Assert.assertTrue(bits[1] == -62);
+
+ bits = BitBufferHelper.getBits(data, 5, 32); //Int
+ Assert.assertTrue(bits[0] == 65);
+ Assert.assertTrue(bits[1] == -127);
+ Assert.assertTrue(bits[2] == -62);
+ Assert.assertTrue(bits[3] == -122);
+
+ bits = BitBufferHelper.getBits(data, 23, 48); //Long
+ Assert.assertTrue(bits[0] == 10);
+ Assert.assertTrue(bits[1] == 27);
+ Assert.assertTrue(bits[2] == -94);
+ Assert.assertTrue(bits[3] == -87);
+ Assert.assertTrue(bits[4] == 48);
+ Assert.assertTrue(bits[5] == -74);
+
+ bits = BitBufferHelper.getBits(data, 66, 9); //BYTE extraOffsetBits != 0; extranumBits != 0
+ Assert.assertTrue(bits[0] == 1);
+ Assert.assertTrue(bits[1] == 107);
+
+ bits = BitBufferHelper.getBits(data, 13, 15); //Short
+ Assert.assertTrue(bits[0] == 64);
+ Assert.assertTrue(bits[1] == -31);
+
+ bits = BitBufferHelper.getBits(data, 5, 29); //Int
+ Assert.assertTrue(bits[0] == 8);
+ Assert.assertTrue(bits[1] == 48);
+ Assert.assertTrue(bits[2] == 56);
+ Assert.assertTrue(bits[3] == 80);
+
+ bits = BitBufferHelper.getBits(data, 31, 43); //Long
+ Assert.assertTrue(bits[0] == 0);
+ Assert.assertTrue(bits[1] == -35);
+ Assert.assertTrue(bits[2] == 21);
+ Assert.assertTrue(bits[3] == 73);
+ Assert.assertTrue(bits[4] == -123);
+ Assert.assertTrue(bits[5] == -75);
+
+ bits = BitBufferHelper.getBits(data, 4, 12); //Short
+ Assert.assertTrue(bits[0] == 10);
+ Assert.assertTrue(bits[1] == 12);
+
+ byte[] data1 = { 0, 8 };
+ bits = BitBufferHelper.getBits(data1, 7, 9); //Short
+ Assert.assertTrue(bits[0] == 0);
+ Assert.assertTrue(bits[1] == 8);
+
+ byte[] data2 = { 2, 8 };
+ bits = BitBufferHelper.getBits(data2, 0, 7); //Short
+ Assert.assertTrue(bits[0] == 1);
+
+ bits = BitBufferHelper.getBits(data2, 7, 9); //Short
+ Assert.assertTrue(bits[0] == 0);
+ Assert.assertTrue(bits[1] == 8);
+ }
+
+ // [01101100][01100000]
+ // [01100011]
+ @Test
+ public void testGetBytes() throws Exception {
+ byte data[] = { 108, 96, 125, -112, 5, 6, 108, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22 };
+ byte[] x;
+
+ Assert.assertTrue(BitBufferHelper.getBits(data, 0, 8)[0] == 108);
+ Assert.assertTrue(BitBufferHelper.getBits(data, 8, 8)[0] == 96);
+
+ x = BitBufferHelper.getBits(data, 0, 10);
+ Assert.assertTrue(x[0] == 1);
+ Assert.assertTrue(x[1] == -79);
+
+ x = BitBufferHelper.getBits(data, 3, 8);
+ Assert.assertTrue(x[0] == 99);
+ //Assert.assertTrue(x[1] == 97);
+
+ }
+
+ @Test
+ public void testMSBMask() {
+ int numBits = 1; //MSB
+ int mask = BitBufferHelper.getMSBMask(numBits);
+ Assert.assertTrue(mask == 128);
+
+ numBits = 8;
+ mask = BitBufferHelper.getMSBMask(numBits);
+ Assert.assertTrue(mask == 255);
+
+ numBits = 2;
+ mask = BitBufferHelper.getMSBMask(numBits);
+ Assert.assertTrue(mask == 192);
+ }
+
+ @Test
+ public void testLSBMask() {
+ int numBits = 1; //LSB
+ int mask = BitBufferHelper.getLSBMask(numBits);
+ Assert.assertTrue(mask == 1);
+
+ numBits = 3;
+ mask = BitBufferHelper.getLSBMask(numBits);
+ Assert.assertTrue(mask == 7);
+
+ numBits = 8;
+ mask = BitBufferHelper.getLSBMask(numBits);
+ Assert.assertTrue(mask == 255);
+ }
+
+ @Test
+ public void testToByteArray() {
+ short sh = Short.MAX_VALUE;
+ byte[] data_sh = new byte[Byte.SIZE / 8];
+ data_sh = BitBufferHelper.toByteArray(sh);
+ Assert.assertTrue(data_sh[0] == 127);
+ Assert.assertTrue(data_sh[1] == -1);
+
+ short sh2 = Short.MIN_VALUE;
+ byte[] data_sh2 = new byte[Byte.SIZE / 8];
+ data_sh2 = BitBufferHelper.toByteArray(sh2);
+ Assert.assertTrue(data_sh2[0] == -128);
+ Assert.assertTrue(data_sh2[1] == 0);
+
+ short sh3 = 16384;
+ byte[] data_sh3 = new byte[Byte.SIZE / 8];
+ data_sh3 = BitBufferHelper.toByteArray(sh3);
+ Assert.assertTrue(data_sh3[0] == 64);
+ Assert.assertTrue(data_sh3[1] == 0);
+
+ short sh4 = 146; //TCP headerlenflags - startoffset = 103
+ byte[] data_sh4 = new byte[Byte.SIZE / 8];
+ data_sh4 = BitBufferHelper.toByteArray(sh4);
+ Assert.assertTrue(data_sh4[0] == 0);
+ Assert.assertTrue(data_sh4[1] == -110);
+
+ short sh4_2 = 5000; //IPv4 Offset - startOffset = 51 (to 63)
+ byte[] data_sh4_2 = new byte[Byte.SIZE / 8];
+ data_sh4_2 = BitBufferHelper.toByteArray(sh4_2);
+ Assert.assertTrue(data_sh4_2[0] == 19);
+ Assert.assertTrue(data_sh4_2[1] == -120);
+
+ short sh4_3 = 5312; //numEndRestBits < numBitstoShiftBy
+ byte[] data_sh4_3 = new byte[Byte.SIZE / 8];
+ data_sh4_3 = BitBufferHelper.toByteArray(sh4_3);
+ Assert.assertTrue(data_sh4_3[0] == 20);
+ Assert.assertTrue(data_sh4_3[1] == -64);
+
+ int Int = Integer.MAX_VALUE;
+ byte[] data_Int = new byte[Integer.SIZE / 8];
+ data_Int = BitBufferHelper.toByteArray(Int);
+ Assert.assertTrue(data_Int[0] == 127);
+ Assert.assertTrue(data_Int[1] == -1);
+ Assert.assertTrue(data_Int[2] == -1);
+ Assert.assertTrue(data_Int[3] == -1);
+
+ int Int2 = Integer.MIN_VALUE;
+ byte[] data_Int2 = new byte[Integer.SIZE / 8];
+ data_Int2 = BitBufferHelper.toByteArray(Int2);
+ Assert.assertTrue(data_Int2[0] == -128);
+ Assert.assertTrue(data_Int2[1] == 0);
+ Assert.assertTrue(data_Int2[2] == 0);
+ Assert.assertTrue(data_Int2[3] == 0);
+
+ int Int3 = 1077952576;
+ byte[] data_Int3 = new byte[Integer.SIZE / 8];
+ data_Int3 = BitBufferHelper.toByteArray(Int3);
+ Assert.assertTrue(data_Int3[0] == 64);
+ Assert.assertTrue(data_Int3[1] == 64);
+ Assert.assertTrue(data_Int3[2] == 64);
+ Assert.assertTrue(data_Int3[3] == 64);
+
+ long Lng = Long.MAX_VALUE;
+ byte[] data_lng = new byte[Long.SIZE / 8];
+ data_lng = BitBufferHelper.toByteArray(Lng);
+ Assert.assertTrue(data_lng[0] == 127);
+ Assert.assertTrue(data_lng[1] == -1);
+ Assert.assertTrue(data_lng[2] == -1);
+ Assert.assertTrue(data_lng[3] == -1);
+ Assert.assertTrue(data_lng[4] == -1);
+ Assert.assertTrue(data_lng[5] == -1);
+ Assert.assertTrue(data_lng[6] == -1);
+ Assert.assertTrue(data_lng[7] == -1);
+
+ long Lng2 = Long.MIN_VALUE;
+ byte[] data_lng2 = new byte[Long.SIZE / 8];
+ data_lng2 = BitBufferHelper.toByteArray(Lng2);
+ Assert.assertTrue(data_lng2[0] == -128);
+ Assert.assertTrue(data_lng2[1] == 0);
+ Assert.assertTrue(data_lng2[2] == 0);
+ Assert.assertTrue(data_lng2[3] == 0);
+ Assert.assertTrue(data_lng2[4] == 0);
+ Assert.assertTrue(data_lng2[5] == 0);
+ Assert.assertTrue(data_lng2[6] == 0);
+ Assert.assertTrue(data_lng2[7] == 0);
+
+ byte B = Byte.MAX_VALUE;
+ byte[] data_B = new byte[Byte.SIZE / 8];
+ data_B = BitBufferHelper.toByteArray(B);
+ Assert.assertTrue(data_B[0] == 127);
+
+ byte B1 = Byte.MIN_VALUE;
+ byte[] data_B1 = new byte[Byte.SIZE / 8];
+ data_B1 = BitBufferHelper.toByteArray(B1);
+ Assert.assertTrue(data_B1[0] == -128);
+
+ byte B2 = 64;
+ byte[] data_B2 = new byte[Byte.SIZE / 8];
+ data_B2 = BitBufferHelper.toByteArray(B2);
+ Assert.assertTrue(data_B2[0] == 64);
+
+ byte B3 = 32;
+ byte[] data_B3 = new byte[Byte.SIZE / 8];
+ data_B3 = BitBufferHelper.toByteArray(B3);
+ Assert.assertTrue(data_B3[0] == 32);
+
+ }
+
+ @Test
+ public void testToByteArrayVariable() {
+ int len = 9;
+ byte[] data_sh;
+ data_sh = BitBufferHelper.toByteArray(511, len);
+ Assert.assertTrue(data_sh[0] == (byte) 255);
+ Assert.assertTrue(data_sh[1] == (byte) 128);
+
+ data_sh = BitBufferHelper.toByteArray((int) 511, len);
+ Assert.assertTrue(data_sh[0] == (byte) 255);
+ Assert.assertTrue(data_sh[1] == (byte) 128);
+
+ data_sh = BitBufferHelper.toByteArray((long) 511, len);
+ Assert.assertTrue(data_sh[0] == (byte) 255);
+ Assert.assertTrue(data_sh[1] == (byte) 128);
+ }
+
+ @Test
+ public void testToInt() {
+ byte data[] = { 1 };
+ Assert.assertTrue(BitBufferHelper.toNumber(data) == 1);
+
+ byte data2[] = { 1, 1 };
+ Assert.assertTrue(BitBufferHelper.toNumber(data2) == 257);
+
+ byte data3[] = { 1, 1, 1 };
+ Assert.assertTrue(BitBufferHelper.toNumber(data3) == 65793);
+ }
+
+ @Test
+ public void testToLongGetter() {
+ byte data[] = { 1, 1 };
+ Assert.assertTrue(BitBufferHelper.getLong(data) == 257L);
+ }
+
+ @Test
+ public void testSetByte() throws Exception {
+ byte input;
+ byte[] data = new byte[20];
+
+ input = 125;
+ BitBufferHelper.setByte(data, input, 0, Byte.SIZE);
+ Assert.assertTrue(data[0] == 125);
+
+ input = 109;
+ BitBufferHelper.setByte(data, input, 152, Byte.SIZE);
+ Assert.assertTrue(data[19] == 109);
+ }
+
+ @Test
+ public void testSetBytes() throws Exception {
+ byte[] input = { 0, 1 };
+ byte[] data = { 6, 0 };
+
+ BitBufferHelper.setBytes(data, input, 7, 9);
+ Assert.assertTrue(data[0] == 6);
+ Assert.assertTrue(data[1] == 1);
+ }
+
+ //@Test
+ //INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]*/
+ public void testInsertBits() throws Exception {
+ //CASE 1: startOffset%8 == 0 && numBits%8 == 0
+ byte inputdata[] = { 75, 110, 107, 80, 10, 12, 35, 100, 125, 65 };
+ int startOffset = 0;
+ int numBits = 8;
+
+ byte data1[] = new byte[2];
+ startOffset = 0;
+ numBits = 16;
+ BitBufferHelper.insertBits(data1, inputdata, startOffset, numBits);
+ Assert.assertTrue(data1[0] == 75);
+ Assert.assertTrue(data1[1] == 110);
+
+ byte data2[] = new byte[4];
+ startOffset = 0;
+ numBits = 32;
+ BitBufferHelper.insertBits(data2, inputdata, startOffset, numBits);
+ Assert.assertTrue(data2[0] == 75);
+ Assert.assertTrue(data2[1] == 110);
+ Assert.assertTrue(data2[2] == 107);
+ Assert.assertTrue(data2[3] == 80);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001011] [01101000] = {75, 104}
+ byte data10[] = new byte[2];
+ startOffset = 0;
+ numBits = 13;
+ BitBufferHelper.insertBits(data10, inputdata, startOffset, numBits);
+ Assert.assertTrue(data10[0] == 75);
+ Assert.assertTrue(data10[1] == 104);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] // OUTPUT: [01001000] = {72}
+ byte data11[] = new byte[4];
+ startOffset = 8;
+ numBits = 6;
+ BitBufferHelper.insertBits(data11, inputdata, startOffset, numBits);
+ Assert.assertTrue(data11[1] == 72);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101000] = {75, 110, 105}
+ byte data12[] = new byte[4];
+ startOffset = 0;
+ numBits = 23;
+ BitBufferHelper.insertBits(data12, inputdata, startOffset, numBits);
+ Assert.assertTrue(data12[0] == 75);
+ Assert.assertTrue(data12[1] == 110);
+ Assert.assertTrue(data12[2] == 106);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01100000] = {75, 110, 96}
+ byte data13[] = new byte[4];
+ startOffset = 8;
+ numBits = 20;
+ BitBufferHelper.insertBits(data13, inputdata, startOffset, numBits);
+ Assert.assertTrue(data13[1] == 75);
+ Assert.assertTrue(data13[2] == 110);
+ Assert.assertTrue(data13[3] == 96);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [01001011] [01101110] [01101011] [10100000]= {75, 110, 107, 80}
+ byte data14[] = new byte[4];
+ startOffset = 0;
+ numBits = 30;
+ BitBufferHelper.insertBits(data14, inputdata, startOffset, numBits);
+ Assert.assertTrue(data14[0] == 75);
+ Assert.assertTrue(data14[1] == 110);
+ Assert.assertTrue(data14[2] == 107);
+ Assert.assertTrue(data14[3] == 80);
+
+ //CASE 3: startOffset%8 != 0, numBits%8 = 0
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00001001] [11000000] = {72, 96}
+ byte data16[] = new byte[5];
+ startOffset = 3;
+ numBits = 8;
+ BitBufferHelper.insertBits(data16, inputdata, startOffset, numBits);
+ Assert.assertTrue(data16[0] == 9);
+ Assert.assertTrue(data16[1] == 96);
+ Assert.assertTrue(data16[2] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [00000100] [1011 0110] [1110 0000] = {4, -54, -96}
+
+ startOffset = 3;
+ numBits = 16;
+ byte data17[] = new byte[5];
+ BitBufferHelper.insertBits(data17, inputdata, startOffset, numBits);
+ Assert.assertTrue(data17[0] == 9);
+ Assert.assertTrue(data17[1] == 109);
+ Assert.assertTrue(data17[2] == -64);
+ Assert.assertTrue(data17[3] == 0);
+
+ // INPUT: {79, 110, 111}
+ // = [01001111] [01101110] [01101111]
+ //OUTPUT: [0000 1001] [1110 1101] [110 00000] = {9, -19, -64}
+ byte data18[] = new byte[5];
+ byte inputdata3[] = { 79, 110, 111 };
+ startOffset = 3;
+ numBits = 16;
+ BitBufferHelper.insertBits(data18, inputdata3, startOffset, numBits);
+ Assert.assertTrue(data18[0] == 9);
+ Assert.assertTrue(data18[1] == -19);
+ Assert.assertTrue(data18[2] == -64);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 1001] [0110 1101] [1100 1101] [0110 1010] [0000 0001] = {9, 109, -51, 106, 0}
+
+ startOffset = 3;
+ numBits = 32;
+ byte data19[] = new byte[5];
+ BitBufferHelper.insertBits(data19, inputdata, startOffset, numBits);
+ Assert.assertTrue(data19[0] == 9);
+ Assert.assertTrue(data19[1] == 109);
+ Assert.assertTrue(data19[2] == -51);
+ Assert.assertTrue(data19[3] == 106);
+ Assert.assertTrue(data19[4] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: data[4, 5, 6] = [0 010 0101] [1 011 0111] [0 000 0000] = {37, -73, 0}
+ startOffset = 33;
+ numBits = 16;
+ byte data20[] = new byte[7];
+ BitBufferHelper.insertBits(data20, inputdata, startOffset, numBits);
+ Assert.assertTrue(data20[4] == 37);
+ Assert.assertTrue(data20[5] == -73);
+ Assert.assertTrue(data20[6] == 0);
+
+ //CASE 4: extranumBits != 0 AND extraOffsetBits != 0
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 1001] [0100 0000] = {9, 96}
+ startOffset = 3;
+ numBits = 7;
+ byte data21[] = new byte[7];
+ BitBufferHelper.insertBits(data21, inputdata, startOffset, numBits);
+ Assert.assertTrue(data21[0] == 9);
+ Assert.assertTrue(data21[1] == 64);
+ Assert.assertTrue(data21[2] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: data = [00000 010] [01011 011] [01110 000] = {37, -73, 0}
+ startOffset = 5;
+ numBits = 17;
+ byte data22[] = new byte[7];
+ BitBufferHelper.insertBits(data22, inputdata, startOffset, numBits);
+ Assert.assertTrue(data22[0] == 2);
+ Assert.assertTrue(data22[1] == 91);
+ Assert.assertTrue(data22[2] == 112);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 1001] [0110 1101] [110 01101] [01 00000] = {9, 109, -51, 64}
+ startOffset = 3;
+ numBits = 23;
+ byte data23[] = new byte[7];
+ BitBufferHelper.insertBits(data23, inputdata, startOffset, numBits);
+ Assert.assertTrue(data23[0] == 9);
+ Assert.assertTrue(data23[1] == 109);
+ Assert.assertTrue(data23[2] == -51);
+ Assert.assertTrue(data23[3] == 64);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 1001] [0110 1101] = {9, 109}
+ startOffset = 3;
+ numBits = 13;
+ byte data24[] = new byte[7];
+ BitBufferHelper.insertBits(data24, inputdata, startOffset, numBits);
+ Assert.assertTrue(data24[0] == 9);
+ Assert.assertTrue(data24[1] == 109);
+ Assert.assertTrue(data24[2] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 0100] [1011 0110] [1110 0110] = {4, -74, -26}
+ startOffset = 4;
+ numBits = 20;
+ byte data25[] = new byte[7];
+ BitBufferHelper.insertBits(data25, inputdata, startOffset, numBits);
+ Assert.assertTrue(data25[0] == 4);
+ Assert.assertTrue(data25[1] == -74);
+ Assert.assertTrue(data25[2] == -26);
+ Assert.assertTrue(data25[3] == -0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [0000 0010] [0101 1011] = {0, 2, 91, 0}
+ startOffset = 13;
+ numBits = 11;
+ byte data26[] = new byte[7];
+ BitBufferHelper.insertBits(data26, inputdata, startOffset, numBits);
+ Assert.assertTrue(data26[0] == 0);
+ Assert.assertTrue(data26[1] == 2);
+ Assert.assertTrue(data26[2] == 91);
+ Assert.assertTrue(data26[3] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [000 01001] [011 01101] [110 0 0000] = {9, 109, -64, 0}
+ startOffset = 3;
+ numBits = 17;
+ byte data27[] = new byte[7];
+ BitBufferHelper.insertBits(data27, inputdata, startOffset, numBits);
+ Assert.assertTrue(data27[0] == 9);
+ Assert.assertTrue(data27[1] == 109);
+ Assert.assertTrue(data27[2] == -64);
+ Assert.assertTrue(data27[3] == 0);
+
+ // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+ // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001] //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+ // OUTPUT: [00 000000] [00 000000] [00 010010] [11 011011] [10 011010] [11 010100] [0000 0000] = {0, 0, 18, -37,-102,-44,0}
+ startOffset = 18;
+ numBits = 34;
+ byte data28[] = new byte[7];
+ BitBufferHelper.insertBits(data28, inputdata, startOffset, numBits);
+ Assert.assertTrue(data28[0] == 0);
+ Assert.assertTrue(data28[1] == 0);
+ Assert.assertTrue(data28[2] == 18);
+ Assert.assertTrue(data28[3] == -37);
+ Assert.assertTrue(data28[4] == -102);
+ Assert.assertTrue(data28[5] == -44);
+ Assert.assertTrue(data28[6] == 0);
+
+ }
+
+ @Test
+ public void testGetShort() throws Exception {
+ byte data[] = new byte[2];
+ data[0] = 7;
+ data[1] = 8;
+ int length = 9; // num bits
+ Assert.assertTrue(BitBufferHelper.getShort(data, length) == 264);
+
+ data[0] = 6;
+ data[1] = 8;
+ short result = BitBufferHelper.getShort(data, length);
+ Assert.assertTrue(result == 8);
+
+ data[0] = 8;
+ data[1] = 47;
+ result = BitBufferHelper.getShort(data, length);
+ Assert.assertTrue(result == 47);
+
+ //[0000 0001] [0001 0100] [0110 0100]
+ byte[] data1 = new byte[2];
+ data1[0] = 1;
+ data1[1] = 20; //data1[2] = 100;
+ length = 15;
+ result = BitBufferHelper.getShort(data1, length);
+ Assert.assertTrue(result == 276);
+
+ byte[] data2 = new byte[2];
+ data2[0] = 64;
+ data2[1] = 99; //data2[2] = 100;
+ length = 13;
+ result = BitBufferHelper.getShort(data2, length);
+ Assert.assertTrue(result == 99);
+
+ byte[] data3 = { 100, 50 };
+ result = BitBufferHelper.getShort(data3);
+ Assert.assertTrue(result == 25650);
+ }
+
+ @Test
+ public void testToIntVarLength() throws Exception {
+ byte data[] = { (byte) 255, (byte) 128 };
+ int length = 9; // num bits
+ Assert.assertTrue(BitBufferHelper.getInt(data, length) == 384);
+
+ byte data2[] = { 0, 8 };
+ Assert.assertTrue(BitBufferHelper.getInt(data2, 9) == 8);
+
+ byte data3[] = { 1, 1, 1 };
+ Assert.assertTrue(BitBufferHelper.getInt(data3) == 65793);
+
+ byte data4[] = { 1, 1, 1 };
+ Assert.assertTrue(BitBufferHelper.getInt(data4) == 65793);
+
+ byte data5[] = { 1, 1 };
+ Assert.assertTrue(BitBufferHelper.getInt(data5) == 257);
+
+ }
+
+ @Test
+ public void testShiftBitstoLSB() {
+ byte[] data = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+
+ byte[] data2 = { 8, 9, 10 };
+ byte[] shiftedBytes2 = BitBufferHelper.shiftBitsToLSB(data2, 11);
+
+ Assert.assertTrue(shiftedBytes2[0] == 0);
+ Assert.assertTrue(shiftedBytes2[1] == 64);
+ Assert.assertTrue(shiftedBytes2[2] == 72);
+
+ byte[] shiftedBytes = BitBufferHelper.shiftBitsToLSB(data, 49);
+
+ Assert.assertTrue(shiftedBytes[0] == 0);
+ Assert.assertTrue(shiftedBytes[1] == 2);
+ Assert.assertTrue(shiftedBytes[2] == 4);
+ Assert.assertTrue(shiftedBytes[3] == 6);
+ Assert.assertTrue(shiftedBytes[4] == 8);
+ Assert.assertTrue(shiftedBytes[5] == 10);
+ Assert.assertTrue(shiftedBytes[6] == 12);
+ Assert.assertTrue(shiftedBytes[7] == 14);
+ Assert.assertTrue(shiftedBytes[8] == 16);
+ Assert.assertTrue(shiftedBytes[9] == 18);
+
+ byte[] data1 = { 1, 2, 3 };
+ byte[] shiftedBytes1 = BitBufferHelper.shiftBitsToLSB(data1, 18);
+ Assert.assertTrue(shiftedBytes1[0] == 0);
+ Assert.assertTrue(shiftedBytes1[1] == 4);
+ Assert.assertTrue(shiftedBytes1[2] == 8);
+
+ }
+
+ @Test
+ public void testShiftBitstoLSBMSB() {
+ byte[] data = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
+
+ byte[] clone = BitBufferHelper.shiftBitsToMSB(BitBufferHelper
+ .shiftBitsToLSB(data, 72), 72);
+
+ Assert.assertTrue(clone[0] == 1);
+ Assert.assertTrue(clone[1] == 2);
+ Assert.assertTrue(clone[2] == 3);
+ Assert.assertTrue(clone[3] == 4);
+ Assert.assertTrue(clone[4] == 5);
+ Assert.assertTrue(clone[5] == 6);
+ Assert.assertTrue(clone[6] == 7);
+ Assert.assertTrue(clone[7] == 8);
+ Assert.assertTrue(clone[8] == 9);
+ Assert.assertTrue(clone[9] == 0);
+ }
+
+}
--- /dev/null
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * @file EthernetAddressTest.java
+ *
+ * @brief Unit Tests for EthernetAddress class
+ *
+ * Unit Tests for EthernetAddress class
+ */
+package org.opendaylight.controller.sal.packet.address;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.liblldp.ConstructionException;
+import org.opendaylight.controller.liblldp.EthernetAddress;
+
+public class EthernetAddressTest {
+ @Test
+ public void testNonValidConstructor() {
+ @SuppressWarnings("unused")
+ EthernetAddress ea1;
+ // Null input array
+ try {
+ ea1 = new EthernetAddress((byte[]) null);
+
+ // Exception is expected if NOT raised test will fail
+ Assert.assertTrue(false);
+ } catch (ConstructionException e) {
+ }
+
+ // Array too short
+ try {
+ ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0 });
+
+ // Exception is expected if NOT raised test will fail
+ Assert.assertTrue(false);
+ } catch (ConstructionException e) {
+ }
+
+ // Array too long
+ try {
+ ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x0,
+ (byte) 0x0 });
+
+ // Exception is expected if NOT raised test will fail
+ Assert.assertTrue(false);
+ } catch (ConstructionException e) {
+ }
+ }
+
+ @Test
+ public void testEquality() {
+ EthernetAddress ea1;
+ EthernetAddress ea2;
+ try {
+ ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+
+ ea2 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+ Assert.assertTrue(ea1.equals(ea2));
+ } catch (ConstructionException e) {
+ // Exception is NOT expected if raised test will fail
+ Assert.assertTrue(false);
+ }
+
+ try {
+ ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+
+ ea2 = ea1.clone();
+ Assert.assertTrue(ea1.equals(ea2));
+ } catch (ConstructionException e) {
+ // Exception is NOT expected if raised test will fail
+ Assert.assertTrue(false);
+ }
+
+ // Check for well knowns
+ try {
+ ea1 = EthernetAddress.BROADCASTMAC;
+ ea2 = new EthernetAddress(new byte[] { (byte) 0xff, (byte) 0xff,
+ (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff });
+ Assert.assertTrue(ea1.equals(ea2));
+ } catch (ConstructionException e) {
+ // Exception is NOT expected if raised test will fail
+ Assert.assertTrue(false);
+ }
+ }
+
+ @Test
+ public void testUnEquality() {
+ EthernetAddress ea1;
+ EthernetAddress ea2;
+ try {
+ ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x2 });
+
+ ea2 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+ (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+ Assert.assertTrue(!ea1.equals(ea2));
+ } catch (ConstructionException e) {
+ // Exception is NOT expected if raised test will fail
+ Assert.assertTrue(false);
+ }
+ }
+}
<concepts.version>0.5.2-SNAPSHOT</concepts.version>
<concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
<config.version>0.2.5-SNAPSHOT</config.version>
+ <config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
+ <config.netty.configfile>00-netty.xml</config.netty.configfile>
+ <config.mdsal.configfile>01-mdsal.xml</config.mdsal.configfile>
+ <config.netconf.client.configfile>01-netconf.xml</config.netconf.client.configfile>
+ <config.toaster.configfile>03-toaster-sample.xml</config.toaster.configfile>
+ <config.restconf.configfile>10-rest-connector.xml</config.restconf.configfile>
+ <config.netconf.connector.configfile>99-netconf-connector.xml</config.netconf.connector.configfile>
<configuration.implementation.version>0.4.3-SNAPSHOT</configuration.implementation.version>
<configuration.version>0.4.3-SNAPSHOT</configuration.version>
<connectionmanager.version>0.1.2-SNAPSHOT</connectionmanager.version>
<!-- OpenEXI third party lib for netconf-->
<exi.nagasena.version>0000.0002.0038.0</exi.nagasena.version>
+ <felix.util.version>1.6.0</felix.util.version>
<filtervalve.version>1.4.2-SNAPSHOT</filtervalve.version>
<findbugs.maven.plugin.version>2.4.0</findbugs.maven.plugin.version>
<flowprogrammer.northbound.version>0.4.2-SNAPSHOT</flowprogrammer.northbound.version>
<artifactId>akka-osgi_${scala.version}</artifactId>
<version>${akka.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_${scala.version}</artifactId>
+ <version>${akka.version}</version>
+ </dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<artifactId>config-persister-file-xml-adapter</artifactId>
<version>${config.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-feature-adapter</artifactId>
+ <version>${config.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-persister-impl</artifactId>
<artifactId>karaf.branding</artifactId>
<version>${karaf.branding.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>liblldp</artifactId>
+ <version>${sal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>logback-config</artifactId>
<artifactId>sal-common-util</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-inmemory-datastore</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-compatibility</artifactId>
<artifactId>toaster-config</artifactId>
<version>${mdsal.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <version>${yangtools.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>features-toaster</artifactId>
+ <version>${mdsal.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-config-netty</artifactId>
+ <version>${config.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-flow</artifactId>
+ <version>${mdsal.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>com.sun.jersey.jersey-servlet</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-features</artifactId>
+ <artifactId>features-config</artifactId>
<version>${config.version}</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>features-odl-protocol-framework</artifactId>
+ <artifactId>features-protocol-framework</artifactId>
<version>${protocol-framework.version}</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-features</artifactId>
+ <artifactId>features-netconf</artifactId>
<version>${netconf.version}</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-features</artifactId>
+ <artifactId>features-config-persister</artifactId>
<version>${config.version}</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-features</artifactId>
+ <artifactId>features-mdsal</artifactId>
<version>${mdsal.version}</version>
<classifier>features</classifier>
<type>xml</type>
*/
package org.opendaylight.controller.config.manager.impl.osgi;
+import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.registerService;
+import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.wrap;
+
+import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.List;
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.MBeanServer;
import org.opendaylight.controller.config.manager.impl.ConfigRegistryImpl;
import org.opendaylight.controller.config.manager.impl.jmx.ConfigRegistryJMXRegistrator;
import org.opendaylight.controller.config.manager.impl.osgi.mapping.CodecRegistryProvider;
import org.opendaylight.controller.config.manager.impl.osgi.mapping.RefreshingSCPModuleInfoRegistry;
import org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil;
import org.opendaylight.controller.config.spi.ModuleFactory;
+import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.osgi.util.tracker.ServiceTracker;
-import javax.management.InstanceAlreadyExistsException;
-import javax.management.MBeanServer;
-import java.lang.management.ManagementFactory;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.registerService;
-import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.wrap;
-
public class ConfigManagerActivator implements BundleActivator {
private final MBeanServer configMBeanServer = ManagementFactory.getPlatformMBeanServer();
private AutoCloseable autoCloseable;
@Override
- public void start(BundleContext context) {
+ public void start(final BundleContext context) {
ModuleInfoBackedContext moduleInfoBackedContext = ModuleInfoBackedContext.create();// the inner strategy is backed by thread context cl?
bundleTracker.open();
// register config registry to OSGi
+ AutoCloseable clsReg = registerService(context, moduleInfoBackedContext, GeneratedClassLoadingStrategy.class);
AutoCloseable configRegReg = registerService(context, configRegistry, ConfigRegistryImpl.class);
// register config registry to jmx
serviceTracker.open();
List<AutoCloseable> list = Arrays.asList(
- codecRegistryProvider, configRegistry, wrap(bundleTracker), configRegReg, configRegistryJMXRegistrator, wrap(serviceTracker));
+ codecRegistryProvider, clsReg,configRegistry, wrap(bundleTracker), configRegReg, configRegistryJMXRegistrator, wrap(serviceTracker));
autoCloseable = OsgiRegistrationUtil.aggregate(list);
}
@Override
- public void stop(BundleContext context) throws Exception {
+ public void stop(final BundleContext context) throws Exception {
autoCloseable.close();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.config.persist.api;
+
+import java.util.List;
+/*
+ * The config pusher service pushes configs into the config subsystem
+ */
+public interface ConfigPusher {
+
+ /*
+ * Pushes configs into the config subsystem
+ */
+
+ public void pushConfigs(List<? extends ConfigSnapshotHolder> configs) throws InterruptedException;
+}
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-subsystem</artifactId>
+ <version>0.2.5-SNAPSHOT</version>
+ <relativePath>..</relativePath>
+ </parent>
+
+ <artifactId>config-persister-feature-adapter</artifactId>
+ <packaging>bundle</packaging>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.osgi</groupId>
+ <artifactId>org.osgi.core</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.karaf.features</groupId>
+ <artifactId>org.apache.karaf.features.core</artifactId>
+ <version>${karaf.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-impl</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>config-persister-directory-xml-adapter</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>org.apache.felix.utils</artifactId>
+ <version>1.6.0</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
+ <Bundle-Version>${project.version}</Bundle-Version>
+ <Bundle-Activator>org.opendaylight.controller.configpusherfeature.ConfigPusherFeatureActivator</Bundle-Activator>
+ <Private-Package>
+ org.apache.karaf.features.internal.model,
+ org.apache.felix.utils.version,
+ org.opendaylight.controller.configpusherfeature.internal
+ </Private-Package>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature;
+
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.opendaylight.controller.configpusherfeature.internal.ConfigPusherCustomizer;
+import org.osgi.framework.BundleActivator;
+import org.osgi.framework.BundleContext;
+import org.osgi.util.tracker.ServiceTracker;
+
+public class ConfigPusherFeatureActivator implements BundleActivator {
+
+ BundleContext bc = null;
+ ConfigPusherCustomizer cpc = null;
+ ServiceTracker<ConfigPusher,ConfigPusher> cpst = null;
+
+ public void start(BundleContext context) throws Exception {
+ bc = context;
+ cpc = new ConfigPusherCustomizer();
+ cpst = new ServiceTracker<ConfigPusher, ConfigPusher>(bc, ConfigPusher.class.getName(), cpc);
+ cpst.open();
+ }
+
+ public void stop(BundleContext context) throws Exception {
+ if(cpst != null) {
+ cpst.close();
+ cpst = null;
+ }
+ if(cpc != null) {
+ cpc.close();
+ cpc = null;
+ }
+ bc = null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.karaf.features.BundleInfo;
+import org.apache.karaf.features.Conditional;
+import org.apache.karaf.features.ConfigFileInfo;
+import org.apache.karaf.features.Dependency;
+import org.apache.karaf.features.Feature;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/*
+ * Wrap a Feature for the purposes of extracting the FeatureConfigSnapshotHolders from
+ * its underlying ConfigFileInfo's
+ *
+ * Delegates the the contained feature and provides additional methods.
+ */
+public class AbstractFeatureWrapper implements Feature {
+ private static final Logger logger = LoggerFactory.getLogger(AbstractFeatureWrapper.class);
+ protected Feature feature = null;
+
+ protected AbstractFeatureWrapper() {
+ // prevent instantiation without Feature
+ }
+
+ /*
+ * @param f Feature to wrap
+ */
+ public AbstractFeatureWrapper(Feature f) {
+ Preconditions.checkNotNull(f,"FeatureWrapper requires non-null Feature in constructor");
+ this.feature = f;
+ }
+
+ /*
+ * Get FeatureConfigSnapshotHolders appropriate to feed to the config subsystem
+ * from the underlying Feature Config files
+ */
+ public LinkedHashSet<FeatureConfigSnapshotHolder> getFeatureConfigSnapshotHolders() throws Exception {
+ LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+ for(ConfigFileInfo c: getConfigurationFiles()) {
+ try {
+ snapShotHolders.add(new FeatureConfigSnapshotHolder(c,this));
+ } catch (JAXBException e) {
+ logger.debug("{} is not a config subsystem config file",c.getFinalname());
+ }
+ }
+ return snapShotHolders;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((feature == null) ? 0 : feature.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ AbstractFeatureWrapper other = (AbstractFeatureWrapper) obj;
+ if (feature == null) {
+ if (other.feature != null)
+ return false;
+ } else if (!feature.equals(other.feature))
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return feature.getName();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getId()
+ */
+ public String getId() {
+ return feature.getId();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getName()
+ */
+ public String getName() {
+ return feature.getName();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getDescription()
+ */
+ public String getDescription() {
+ return feature.getDescription();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getDetails()
+ */
+ public String getDetails() {
+ return feature.getDetails();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getVersion()
+ */
+ public String getVersion() {
+ return feature.getVersion();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#hasVersion()
+ */
+ public boolean hasVersion() {
+ return feature.hasVersion();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getResolver()
+ */
+ public String getResolver() {
+ return feature.getResolver();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getInstall()
+ */
+ public String getInstall() {
+ return feature.getInstall();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getDependencies()
+ */
+ public List<Dependency> getDependencies() {
+ return feature.getDependencies();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getBundles()
+ */
+ public List<BundleInfo> getBundles() {
+ return feature.getBundles();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getConfigurations()
+ */
+ public Map<String, Map<String, String>> getConfigurations() {
+ return feature.getConfigurations();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getConfigurationFiles()
+ */
+ public List<ConfigFileInfo> getConfigurationFiles() {
+ return feature.getConfigurationFiles();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getConditional()
+ */
+ public List<? extends Conditional> getConditional() {
+ return feature.getConditional();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getStartLevel()
+ */
+ public int getStartLevel() {
+ return feature.getStartLevel();
+ }
+
+ /**
+ * @return
+ * @see org.apache.karaf.features.Feature#getRegion()
+ */
+ public String getRegion() {
+ return feature.getRegion();
+ }
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.felix.utils.version.VersionRange;
+import org.apache.felix.utils.version.VersionTable;
+import org.apache.karaf.features.Dependency;
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeaturesService;
+import org.osgi.framework.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/*
+ * Wrap a Feature for the purposes of extracting the FeatureConfigSnapshotHolders from
+ * its underlying ConfigFileInfo's and those of its children recursively
+ *
+ * Delegates the the contained feature and provides additional methods.
+ */
+public class ChildAwareFeatureWrapper extends AbstractFeatureWrapper implements Feature {
+ private static final Logger logger = LoggerFactory.getLogger(ChildAwareFeatureWrapper.class);
+ private FeaturesService featuresService= null;
+
+ protected ChildAwareFeatureWrapper(Feature f) {
+ // Don't use without a feature service
+ }
+
+ /*
+ * @param f Feature to wrap
+ * @param s FeaturesService to look up dependencies
+ */
+ ChildAwareFeatureWrapper(Feature f, FeaturesService s) throws Exception {
+ super(s.getFeature(f.getName(), f.getVersion()));
+ Preconditions.checkNotNull(s, "FeatureWrapper requires non-null FeatureService in constructor");
+ this.featuresService = s;
+ }
+
+ protected FeaturesService getFeaturesService() {
+ return featuresService;
+ }
+
+ /*
+ * Get FeatureConfigSnapshotHolders appropriate to feed to the config subsystem
+ * from the underlying Feature Config files and those of its children recursively
+ */
+ public LinkedHashSet <? extends ChildAwareFeatureWrapper> getChildFeatures() throws Exception {
+ List<Dependency> dependencies = feature.getDependencies();
+ LinkedHashSet <ChildAwareFeatureWrapper> childFeatures = new LinkedHashSet<ChildAwareFeatureWrapper>();
+ if(dependencies != null) {
+ for(Dependency dependency: dependencies) {
+ Feature fi = extractFeatureFromDependency(dependency);
+ if(fi != null){
+ ChildAwareFeatureWrapper wrappedFeature = new ChildAwareFeatureWrapper(fi,featuresService);
+ childFeatures.add(wrappedFeature);
+ }
+ }
+ }
+ return childFeatures;
+ }
+
+ public LinkedHashSet<FeatureConfigSnapshotHolder> getFeatureConfigSnapshotHolders() throws Exception {
+ LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+ for(ChildAwareFeatureWrapper c: getChildFeatures()) {
+ for(FeatureConfigSnapshotHolder h: c.getFeatureConfigSnapshotHolders()) {
+ FeatureConfigSnapshotHolder f;
+ try {
+ f = new FeatureConfigSnapshotHolder(h,this);
+ snapShotHolders.add(f);
+ } catch (JAXBException e) {
+ logger.debug("{} is not a config subsystem config file",h.getFileInfo().getFinalname());
+ }
+ }
+ }
+ snapShotHolders.addAll(super.getFeatureConfigSnapshotHolders());
+ return snapShotHolders;
+ }
+
+ protected Feature extractFeatureFromDependency(Dependency dependency) throws Exception {
+ Feature[] features = featuresService.listFeatures();
+ VersionRange range = org.apache.karaf.features.internal.model.Feature.DEFAULT_VERSION.equals(dependency.getVersion())
+ ? VersionRange.ANY_VERSION : new VersionRange(dependency.getVersion(), true, true);
+ Feature fi = null;
+ for(Feature f: features) {
+ if (f.getName().equals(dependency.getName())) {
+ Version v = VersionTable.getVersion(f.getVersion());
+ if (range.contains(v)) {
+ if (fi == null || VersionTable.getVersion(fi.getVersion()).compareTo(v) < 0) {
+ fi = f;
+ break;
+ }
+ }
+ }
+ }
+ return fi;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.karaf.features.FeatureEvent;
+import org.apache.karaf.features.FeaturesListener;
+import org.apache.karaf.features.FeaturesService;
+import org.apache.karaf.features.RepositoryEvent;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ConfigFeaturesListener implements FeaturesListener, AutoCloseable {
+ private static final Logger logger = LoggerFactory.getLogger(ConfigFeaturesListener.class);
+ private static final int QUEUE_SIZE = 100;
+ private BlockingQueue<FeatureEvent> queue = new LinkedBlockingQueue<FeatureEvent>(QUEUE_SIZE);
+ Thread pushingThread = null;
+
+ public ConfigFeaturesListener(ConfigPusher p, FeaturesService f) {
+ pushingThread = new Thread(new ConfigPushingRunnable(p, f, queue), "ConfigFeatureListener - ConfigPusher");
+ pushingThread.start();
+ }
+
+ @Override
+ public void featureEvent(FeatureEvent event) {
+ queue.offer(event);
+ }
+
+ @Override
+ public void repositoryEvent(RepositoryEvent event) {
+ logger.debug("Repository: " + event.getType() + " " + event.getRepository());
+ }
+
+ @Override
+ public void close() {
+ if(pushingThread != null) {
+ pushingThread.interrupt();
+ pushingThread = null;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ConfigPusherCustomizer implements ServiceTrackerCustomizer<ConfigPusher, ConfigPusher>, AutoCloseable {
+ private static final Logger logger = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
+ private ConfigFeaturesListener configFeaturesListener = null;
+ private FeatureServiceCustomizer featureServiceCustomizer = null;
+ private ServiceTracker<FeaturesService,FeaturesService> fsst = null;
+
+ @Override
+ public ConfigPusher addingService(ServiceReference<ConfigPusher> configPusherServiceReference) {
+ logger.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
+ BundleContext bc = configPusherServiceReference.getBundle().getBundleContext();
+ ConfigPusher cpService = bc.getService(configPusherServiceReference);
+ featureServiceCustomizer = new FeatureServiceCustomizer(cpService);
+ fsst = new ServiceTracker<FeaturesService, FeaturesService>(bc, FeaturesService.class.getName(), featureServiceCustomizer);
+ fsst.open();
+ return cpService;
+ }
+
+ @Override
+ public void modifiedService(ServiceReference<ConfigPusher> configPusherServiceReference, ConfigPusher configPusher) {
+ // we don't care if the properties change
+ }
+
+ @Override
+ public void removedService(ServiceReference<ConfigPusher> configPusherServiceReference, ConfigPusher configPusher) {
+ this.close();
+ }
+
+ @Override
+ public void close() {
+ if(fsst != null) {
+ fsst.close();
+ fsst = null;
+ }
+ if(configFeaturesListener != null) {
+ configFeaturesListener.close();
+ configFeaturesListener = null;
+ }
+ if(featureServiceCustomizer != null) {
+ featureServiceCustomizer.close();
+ featureServiceCustomizer = null;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeatureEvent;
+import org.apache.karaf.features.FeatureEvent.EventType;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.LinkedHashMultimap;
+
+public class ConfigPushingRunnable implements Runnable {
+ private static final Logger logger = LoggerFactory.getLogger(ConfigPushingRunnable.class);
+ private static final int POLL_TIME = 1;
+ private BlockingQueue<FeatureEvent> queue;
+ private FeatureConfigPusher configPusher;
+ public ConfigPushingRunnable(ConfigPusher p, FeaturesService f,BlockingQueue<FeatureEvent> q) {
+ queue = q;
+ configPusher = new FeatureConfigPusher(p, f);
+ }
+
+ @Override
+ public void run() {
+ List<Feature> toInstall = new ArrayList<Feature>();
+ FeatureEvent event;
+ boolean interuppted = false;
+ while(true) {
+ try {
+ if(!interuppted) {
+ if(toInstall.isEmpty()) {
+ event = queue.take();
+ } else {
+ event = queue.poll(POLL_TIME, TimeUnit.MILLISECONDS);
+ }
+ if(event != null && event.getFeature() !=null) {
+ processFeatureEvent(event,toInstall);
+ }
+ } else if(toInstall.isEmpty()) {
+ logger.error("ConfigPushingRunnable - exiting");
+ return;
+ }
+ } catch (InterruptedException e) {
+ logger.error("ConfigPushingRunnable - interupted");
+ interuppted = true;
+ } catch (Exception e) {
+ logger.error("Exception while processing features {}", e);
+ }
+ }
+ }
+
+ protected void processFeatureEvent(FeatureEvent event, List<Feature> toInstall) throws InterruptedException, Exception {
+ if(event.getType() == EventType.FeatureInstalled) {
+ toInstall.add(event.getFeature());
+ LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> result = configPusher.pushConfigs(toInstall);
+ toInstall.removeAll(result.keySet());
+ } else if(event.getType() == EventType.FeatureUninstalled) {
+ toInstall.remove(event.getFeature());
+ }
+ }
+
+ protected void logPushResult(LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> results) {
+ for(Feature f:results.keySet()) {
+ logger.info("Pushed configs for feature {} {}",f,results.get(f));
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.LinkedHashMultimap;
+
+/*
+ * Simple class to push configs to the config subsystem from Feature's configfiles
+ */
+public class FeatureConfigPusher {
+ private static final Logger logger = LoggerFactory.getLogger(FeatureConfigPusher.class);
+ private FeaturesService featuresService = null;
+ private ConfigPusher pusher = null;
+ /*
+ * A LinkedHashSet (to preserve order and insure uniqueness) of the pushedConfigs
+ * This is used to prevent pushing duplicate configs if a Feature is in multiple dependency
+ * chains. Also, preserves the *original* Feature chain for which we pushed the config.
+ * (which is handy for logging).
+ */
+ LinkedHashSet<FeatureConfigSnapshotHolder> pushedConfigs = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+ /*
+ * LinkedHashMultimap to track which configs we pushed for each Feature installation
+ * For future use
+ */
+ LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> feature2configs = LinkedHashMultimap.create();
+
+ /*
+ * @param p - ConfigPusher to push ConfigSnapshotHolders
+ */
+ public FeatureConfigPusher(ConfigPusher p, FeaturesService f) {
+ pusher = p;
+ featuresService = f;
+ }
+ /*
+ * Push config files from Features to config subsystem
+ * @param features - list of Features to extract config files from recursively and push
+ * to the config subsystem
+ *
+ * @return A LinkedHashMultimap of Features to the FeatureConfigSnapshotHolder actually pushed
+ * If a Feature is not in the returned LinkedHashMultimap then we couldn't push its configs
+ * (Ususally because it was not yet installed)
+ */
+ public LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushConfigs(List<Feature> features) throws Exception, InterruptedException {
+ LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushedFeatures = LinkedHashMultimap.create();
+ for(Feature feature: features) {
+ LinkedHashSet<FeatureConfigSnapshotHolder> configSnapShots = pushConfig(feature);
+ if(!configSnapShots.isEmpty()) {
+ pushedFeatures.putAll(feature,configSnapShots);
+ }
+ }
+ return pushedFeatures;
+ }
+
+ private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(Feature feature) throws Exception, InterruptedException {
+ LinkedHashSet<FeatureConfigSnapshotHolder> configs = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+ if(isInstalled(feature)) {
+ ChildAwareFeatureWrapper wrappedFeature = new ChildAwareFeatureWrapper(feature,featuresService);
+ configs = wrappedFeature.getFeatureConfigSnapshotHolders();
+ if(!configs.isEmpty()) {
+ configs = pushConfig(configs);
+ feature2configs.putAll(feature, configs);
+ }
+ }
+ return configs;
+ }
+
+ private boolean isInstalled(Feature feature) {
+ List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
+ return installedFeatures.contains(feature);
+ }
+
+ private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(LinkedHashSet<FeatureConfigSnapshotHolder> configs) throws InterruptedException {
+ LinkedHashSet<FeatureConfigSnapshotHolder> configsToPush = new LinkedHashSet<FeatureConfigSnapshotHolder>(configs);
+ configsToPush.removeAll(pushedConfigs);
+ if(!configsToPush.isEmpty()) {
+ pusher.pushConfigs(new ArrayList<ConfigSnapshotHolder>(configsToPush));
+ pushedConfigs.addAll(configsToPush);
+ }
+ LinkedHashSet<FeatureConfigSnapshotHolder> configsPushed = new LinkedHashSet<FeatureConfigSnapshotHolder>(pushedConfigs);
+ configsPushed.retainAll(configs);
+ return configsPushed;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedSet;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.karaf.features.ConfigFileInfo;
+import org.apache.karaf.features.Feature;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.storage.file.xml.model.ConfigSnapshot;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+/*
+ * A ConfigSnapshotHolder that can track all the additional information
+ * relavent to the fact we are getting these from a Feature.
+ *
+ * Includes tracking the 'featureChain' - an reverse ordered list of the dependency
+ * graph of features that caused us to push this FeatureConfigSnapshotHolder.
+ * So if A -> B -> C, then the feature chain would be C -> B -> A
+ */
+public class FeatureConfigSnapshotHolder implements ConfigSnapshotHolder {
+ private ConfigSnapshot unmarshalled = null;
+ private ConfigFileInfo fileInfo = null;
+ private List<Feature> featureChain = new ArrayList<Feature>();
+
+ /*
+ * @param holder - FeatureConfigSnapshotHolder that we
+ * @param feature - new
+ */
+ public FeatureConfigSnapshotHolder(final FeatureConfigSnapshotHolder holder, final Feature feature) throws JAXBException {
+ this(holder.fileInfo,holder.getFeature());
+ this.featureChain.add(feature);
+ }
+
+ /*
+ * Create a FeatureConfigSnapshotHolder for a given ConfigFileInfo and record the associated
+ * feature we are creating it from.
+ * @param fileInfo - ConfigFileInfo to read into the ConfigSnapshot
+ * @param feature - Feature the ConfigFileInfo was attached to
+ */
+ public FeatureConfigSnapshotHolder(final ConfigFileInfo fileInfo, final Feature feature) throws JAXBException {
+ Preconditions.checkNotNull(fileInfo);
+ Preconditions.checkNotNull(fileInfo.getFinalname());
+ Preconditions.checkNotNull(feature);
+ this.fileInfo = fileInfo;
+ this.featureChain.add(feature);
+ JAXBContext jaxbContext = JAXBContext.newInstance(ConfigSnapshot.class);
+ Unmarshaller um = jaxbContext.createUnmarshaller();
+ File file = new File(fileInfo.getFinalname());
+ unmarshalled = ((ConfigSnapshot) um.unmarshal(file));
+ }
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#hashCode()
+ *
+ * We really care most about the underlying ConfigShapshot, so compute hashcode on that
+ */
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((unmarshalled != null && unmarshalled.getConfigSnapshot() == null) ? 0 : unmarshalled.getConfigSnapshot().hashCode());
+ return result;
+ }
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#equals(java.lang.Object)
+ * *
+ * We really care most about the underlying ConfigShapshot, so compute equality on that
+ */
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ FeatureConfigSnapshotHolder fcsh = (FeatureConfigSnapshotHolder)obj;
+ if(this.unmarshalled.getConfigSnapshot().equals(fcsh.unmarshalled.getConfigSnapshot())) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ Path p = Paths.get(fileInfo.getFinalname());
+ b.append(p.getFileName())
+ .append("(")
+ .append(getCauseFeature())
+ .append(",")
+ .append(getFeature())
+ .append(")");
+ return b.toString();
+
+ }
+
+ @Override
+ public String getConfigSnapshot() {
+ return unmarshalled.getConfigSnapshot();
+ }
+
+ @Override
+ public SortedSet<String> getCapabilities() {
+ return unmarshalled.getCapabilities();
+ }
+
+ public ConfigFileInfo getFileInfo() {
+ return fileInfo;
+ }
+
+ /*
+ * @returns The original feature to which the ConfigFileInfo was attached
+ * Example:
+ * A -> B -> C, ConfigFileInfo Foo is attached to C.
+ * feature:install A
+ * thus C is the 'Feature' Foo was attached.
+ */
+ public Feature getFeature() {
+ return featureChain.get(0);
+ }
+
+ /*
+ * @return The dependency chain of the features that caused the ConfigFileInfo to be pushed in reverse order.
+ * Example:
+ * A -> B -> C, ConfigFileInfo Foo is attached to C.
+ * The returned list is
+ * [C,B,A]
+ */
+ public ImmutableList<Feature> getFeatureChain() {
+ return ImmutableList.copyOf(Lists.reverse(featureChain));
+ }
+
+ /*
+ * @return The feature the installation of which was the root cause
+ * of this pushing of the ConfigFileInfo.
+ * Example:
+ * A -> B -> C, ConfigFileInfo Foo is attached to C.
+ * feature:install A
+ * this A is the 'Cause' of the installation of Foo.
+ */
+ public Feature getCauseFeature() {
+ return Iterables.getLast(featureChain);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import org.apache.karaf.features.FeaturesListener;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+
+public class FeatureServiceCustomizer implements ServiceTrackerCustomizer<FeaturesService, FeaturesService>, AutoCloseable {
+ private ConfigPusher configPusher = null;
+ private ConfigFeaturesListener configFeaturesListener = null;
+ private ServiceRegistration<?> registration;
+
+ FeatureServiceCustomizer(ConfigPusher c) {
+ configPusher = c;
+ }
+
+
+ @Override
+ public FeaturesService addingService(ServiceReference<FeaturesService> reference) {
+ BundleContext bc = reference.getBundle().getBundleContext();
+ FeaturesService featureService = bc.getService(reference);
+ configFeaturesListener = new ConfigFeaturesListener(configPusher,featureService);
+ registration = bc.registerService(FeaturesListener.class.getCanonicalName(), configFeaturesListener, null);
+ return featureService;
+ }
+
+ @Override
+ public void modifiedService(ServiceReference<FeaturesService> reference,
+ FeaturesService service) {
+ // we don't care if the properties change
+
+ }
+
+ @Override
+ public void removedService(ServiceReference<FeaturesService> reference,
+ FeaturesService service) {
+ close();
+ }
+
+ @Override
+ public void close() {
+ if(registration != null) {
+ registration.unregister();
+ registration = null;
+ }
+ }
+
+}
<module>config-util</module>
<module>config-persister-api</module>
<module>config-persister-file-xml-adapter</module>
+ <module>config-persister-feature-adapter</module>
<module>yang-jmx-generator</module>
<module>yang-jmx-generator-plugin</module>
<module>yang-test</module>
<type>kar</type>
<scope>runtime</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netty-features</artifactId>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
-
<!-- AD-SAL Related Features -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<!-- MD-SAL Related Features -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>mdsal-features</artifactId>
+ <artifactId>features-mdsal</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-flow</artifactId>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
+++ /dev/null
-<snapshot>
- <required-capabilities>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:netty?module=netty&revision=2013-11-19</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor?module=netty-event-executor&revision=2013-11-12</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup?module=threadgroup&revision=2013-11-07</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:timer?module=netty-timer&revision=2013-11-19</capability>
- </required-capabilities>
- <configuration>
-
- <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
- <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <module>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup">netty:netty-threadgroup-fixed</type>
- <name>global-boss-group</name>
- </module>
- <module>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup">netty:netty-threadgroup-fixed</type>
- <name>global-worker-group</name>
- </module>
- <module>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:timer">netty:netty-hashed-wheel-timer</type>
- <name>global-timer</name>
- </module>
- <module>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor">netty:netty-global-event-executor</type>
- <name>singleton</name>
- </module>
- </modules>
-
- <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <service>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-threadgroup</type>
- <instance>
- <name>global-boss-group</name>
- <provider>/modules/module[type='netty-threadgroup-fixed'][name='global-boss-group']</provider>
- </instance>
- <instance>
- <name>global-worker-group</name>
- <provider>/modules/module[type='netty-threadgroup-fixed'][name='global-worker-group']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-event-executor</type>
- <instance>
- <name>global-event-executor</name>
- <provider>/modules/module[type='netty-global-event-executor'][name='singleton']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-timer</type>
- <instance>
- <name>global-timer</name>
- <provider>/modules/module[type='netty-hashed-wheel-timer'][name='global-timer']</provider>
- </instance>
- </service>
- </services>
- </data>
-
- </configuration>
-</snapshot>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<snapshot>
- <configuration>
- <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
- <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:schema-service-singleton</type>
- <name>yang-schema-service</name>
- </module>
- <!-- To enable use of new in-memory datastore and new implementations
- of data brokers, comment out all parts of this
- xml which are marked with DATA-BROKER and uncomment all parts
- of this xml which are marked with NEW-DATA-BROKER
- -->
- <!-- DATA-BROKER start-->
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:hash-map-data-store</type>
- <name>hash-map-data-store</name>
- </module>
- <!-- DATA BROKER end -->
- <!-- NEW-DATA-BROKER start -->
- <!--
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-inmemory-data-broker</type>
- <name>async-data-broker</name>
- <schema-service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
- <name>yang-schema-service</name>
- </schema-service>
- </module>
- -->
- <!-- NEW-DATA-BROKER end -->
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
- <name>dom-broker</name>
- <!-- DATA-BROKER start -->
- <data-store xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
- <!-- to switch to the clustered data store, comment out the hash-map-data-store <name> and uncomment the cluster-data-store one -->
- <name>hash-map-data-store</name>
- <!-- <name>cluster-data-store</name> -->
- </data-store>
- <!-- DATA-BROKER end -->
- <!-- NEW-DATA-BROKER start -->
- <!--
- <async-data-broker>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
- <name>async-data-broker</name>
- </async-data-broker>
- -->
- <!-- NEW-DATA-BROKER end -->
- </module>
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
- <name>binding-broker-impl</name>
- <notification-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
- <name>binding-notification-broker</name>
- </notification-service>
- <data-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <name>binding-data-broker</name>
- </data-broker>
- </module>
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:runtime-generated-mapping</type>
- <name>runtime-mapping-singleton</name>
- </module>
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
- <name>binding-notification-broker</name>
- </module>
- <!-- DATA-BROKER start -->
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-broker</type>
- <name>binding-data-broker</name>
- <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
- <name>dom-broker</name>
- </dom-broker>
- <mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
- <name>runtime-mapping-singleton</name>
- </mapping-service>
- </module>
- <!-- DATA-BROKER end -->
- <!-- NEW-DATA-BROKER start -->
- <!--
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-compatible-broker</type>
- <name>binding-data-broker</name>
- <dom-async-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
- <name>dom-broker</name>
- </dom-async-broker>
- <binding-mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
- <name>runtime-mapping-singleton</name>
- </binding-mapping-service>
- </module>
- -->
- <!-- NEW-DATA-BROKER end -->
- </modules>
- <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
- <instance>
- <name>yang-schema-service</name>
- <provider>/modules/module[type='schema-service-singleton'][name='yang-schema-service']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
- <instance>
- <name>binding-notification-broker</name>
- <provider>/modules/module[type='binding-notification-broker'][name='binding-notification-broker']</provider>
- </instance>
- </service>
- <!-- DATA-BROKER start -->
- <service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
- <instance>
- <name>hash-map-data-store</name>
- <provider>/modules/module[type='hash-map-data-store'][name='hash-map-data-store']</provider>
- </instance>
- </service>
- <!-- DATA-BROKER end -->
- <!-- NEW-DATA-BROKER start -->
- <!--
- <service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
- <instance>
- <name>async-data-broker</name>
- <provider>/modules/module[type='dom-inmemory-data-broker'][name='async-data-broker']</provider>
- </instance>
- </service>
- -->
- <!-- NEW-DATA-BROKER end -->
- <service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
- <instance>
- <name>binding-osgi-broker</name>
- <provider>/modules/module[type='binding-broker-impl'][name='binding-broker-impl']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
- <instance>
- <name>binding-rpc-broker</name>
- <provider>/modules/module[type='binding-broker-impl'][name='binding-broker-impl']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-dom-mapping-service</type>
- <instance>
- <name>runtime-mapping-singleton</name>
- <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
- <instance>
- <name>dom-broker</name>
- <provider>/modules/module[type='dom-broker-impl'][name='dom-broker']</provider>
- </instance>
- </service>
-
- <service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
- <instance>
- <name>binding-data-broker</name>
- <!-- DATA-BROKER start -->
- <provider>/modules/module[type='binding-data-broker'][name='binding-data-broker']</provider>
- <!-- DATA-BROKER end -->
- <!-- NEW-DATA-BROKER start -->
- <!--
- <provider>/modules/module[type='binding-data-compatible-broker'][name='binding-data-broker']</provider>
- -->
- <!-- NEW-DATA-BROKER end -->
- </instance>
- </service>
-
- </services>
- </data>
- </configuration>
- <required-capabilities>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor?module=netty-event-executor&revision=2013-11-12</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool?module=threadpool&revision=2013-04-09</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom?module=opendaylight-md-sal-dom&revision=2013-10-28</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl?module=opendaylight-sal-binding-broker-impl&revision=2013-10-28</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl?module=opendaylight-sal-dom-broker-impl&revision=2013-10-28</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:common?module=opendaylight-md-sal-common&revision=2013-10-28</capability>
- </required-capabilities>
-</snapshot>
+++ /dev/null
-<snapshot>
- <configuration>
- <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
- <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider:impl">
- prefix:toaster-provider-impl
- </type>
- <name>toaster-provider-impl</name>
-
- <rpc-registry>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
- <name>binding-rpc-broker</name>
- </rpc-registry>
-
- <notification-service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
- binding:binding-notification-service
- </type>
- <name>binding-notification-broker</name>
- </notification-service>
- </module>
-
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer:impl">
- prefix:toaster-consumer-impl
- </type>
- <name>toaster-consumer-impl</name>
-
- <rpc-registry>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
- <name>binding-rpc-broker</name>
- </rpc-registry>
-
- <notification-service>
- <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
- binding:binding-notification-service
- </type>
- <name>binding-notification-broker</name>
- </notification-service>
- </module>
- </modules>
-
- <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
- <service>
- <type xmlns:toaster="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider">toaster:toaster-provider</type>
- <instance>
- <name>toaster-provider</name>
- <provider>/modules/module[type='toaster-provider-impl'][name='toaster-provider-impl']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:toaster="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer">toaster:toaster-consumer</type>
- <instance>
- <name>toaster-consumer</name>
- <provider>/modules/module[type='toaster-consumer-impl'][name='toaster-consumer-impl']</provider>
- </instance>
- </service>
- </services>
- </data>
-
- </configuration>
-
- <required-capabilities>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&revision=2013-10-28</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer?module=toaster-consumer&revision=2014-01-31</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer:impl?module=toaster-consumer-impl&revision=2014-01-31</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider?module=toaster-provider&revision=2014-01-31</capability>
- <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider:impl?module=toaster-provider-impl&revision=2014-01-31</capability>
- </required-capabilities>
-
-</snapshot>
-
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
</encoder>
</appender>
<appender name="opendaylight.log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Web modules -->
<logger name="org.opendaylight.controller.web" level="INFO"/>
+ <!-- Clustering -->
+ <logger name="org.opendaylight.controller.cluster" level="INFO"/>
+ <logger name="org.opendaylight.controller.cluster.datastore.node" level="INFO"/>
+
<!--
Unsynchronized controller startup causes models to crop up in random
order, which results in temporary inability to fully resolve a model,
netconf.ssh.pk.path = ./configuration/RSA.pk
-netconf.config.persister.active=1,2
-# read startup configuration
-netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.directory.xml.XmlDirectoryStorageAdapter
-netconf.config.persister.1.properties.directoryStorage=configuration/initial/
-
-# include only xml files, files with other extensions will be skipped, multiple extensions are permitted e.g. netconf.config.persister.1.properties.includeExtensions=xml,cfg,config
-netconf.config.persister.1.properties.includeExtensions=xml
-netconf.config.persister.1.readonly=true
-
-netconf.config.persister.2.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter
-netconf.config.persister.2.properties.fileStorage=configuration/current/controller.currentconfig.xml
-netconf.config.persister.2.properties.numberOfBackups=1
+netconf.config.persister.active=1
+
+netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter
+netconf.config.persister.1.properties.fileStorage=etc/opendaylight/current/controller.currentconfig.xml
+netconf.config.persister.1.properties.numberOfBackups=1
# logback configuration
logback.configurationFile=configuration/logback.xml
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
# Standard package set. Note that:
# - javax.transaction* is exported with a mandatory attribute
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
jre-1.8= \
javax.accessibility, \
org.w3c.dom.xpath, \
org.xml.sax, \
org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>topology-lldp-discovery</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>liblldp</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>topology-manager</artifactId>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
- <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+ <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
</encoder>
</appender>
<appender name="opendaylight.log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- Web modules -->
<logger name="org.opendaylight.controller.web" level="INFO"/>
+ <!-- Clustering -->
+ <logger name="org.opendaylight.controller.cluster" level="INFO"/>
+ <logger name="org.opendaylight.controller.cluster.datastore.node" level="INFO"/>
+
<!--
Unsynchronized controller startup causes models to crop up in random
order, which results in temporary inability to fully resolve a model,
</schema-service>
</module>
- <!-- DISTRIBUTED_DATA_STORE -->
- <!-- Enable the following modules if you want to use the Distributed Data Store instead of the InMemoryDataStore -->
- <!--
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-operational-datastore-provider</type>
- <name>distributed-operational-store-module</name>
- <operational-schema-service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
- <name>yang-schema-service</name>
- </operational-schema-service>
- </module>
-
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-config-datastore-provider</type>
- <name>distributed-config-store-module</name>
- <configschema-service>
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
- <name>yang-schema-service</name>
- </config-schema-service>
- </module>
- -->
-
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:inmemory-datastore-provider">prefix:inmemory-operational-datastore-provider</type>
<name>operational-store-service</name>
<config-data-store>
<type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
<name>config-store-service</name>
- <!-- DISTRIBUTED_DATA_STORE -->
- <!--
- <name>distributed-config-store-service</name>
- -->
</config-data-store>
<operational-data-store>
<type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
<name>operational-store-service</name>
- <!-- DISTRIBUTED_DATA_STORE -->
- <!--
- <name>distributed-operational-store-service</name>
- -->
-
</operational-data-store>
</module>
<module>
</binding-mapping-service>
</binding-forwarded-data-broker>
</module>
- <!-- Cluster RPC -->
- <!-- Enable the following module if you want to use remote rpc connector
- <module>
- <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">prefix:remote-rpc-connector</type>
- <name>remote-rpc-connector</name>
- <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">
- <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
- <name>dom-broker</name>
- </dom-broker>
- </module>
- -->
</modules>
<services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
<service>
</instance>
</service>
- <!-- DISTRIBUTED_DATA_STORE -->
- <!-- Enable the following if you want to use the Distributed Data Store instead of the InMemory Data Store -->
- <!-- Note that you MUST delete the InMemoryDataStore related services which provide config-dom-datastore and operational-dom-datastore -->
- <!--
- <service>
- <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
- <instance>
- <name>distributed-config-store-service</name>
- <provider>/modules/module[type='distributed-config-datastore-provider'][name='distributed-config-store-module']</provider>
- </instance>
- </service>
- <service>
- <type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
- <instance>
- <name>distributed-operational-store-service</name>
- <provider>/modules/module[type='distributed-operational-datastore-provider'][name='distributed-operational-store-module']</provider>
- </instance>
- </service>
- -->
-
- <!-- DISTRIBUTED_DATA_STORE -->
- <!-- Delete the following two services (config-store-service and operational-store-service) if you want to use the distributed data store instead -->
<service>
<type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
<instance>
return currentBehavior.state();
}
+ protected ReplicatedLogEntry getLastLogEntry() {
+ return replicatedLog.last();
+ }
+
+ protected Long getCurrentTerm(){
+ return context.getTermInformation().getCurrentTerm();
+ }
+
+ protected Long getCommitIndex(){
+ return context.getCommitIndex();
+ }
+
+ protected Long getLastApplied(){
+ return context.getLastApplied();
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
}
@Override public void update(long currentTerm, String votedFor) {
- LOG.info("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+ LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
this.currentTerm = currentTerm;
this.votedFor = votedFor;
protected RaftState requestVote(ActorRef sender,
RequestVote requestVote) {
+
+ context.getLogger().debug(requestVote.toString());
+
boolean grantVote = false;
// Reply false if term < currentTerm (§5.1)
*
* @param index a log index that is known to be committed
*/
- protected void applyLogToStateMachine(long index) {
+ protected void applyLogToStateMachine(final long index) {
// Now maybe we apply to the state machine
for (long i = context.getLastApplied() + 1;
i < index + 1; i++) {
}
// Send a local message to the local RaftActor (it's derived class to be
// specific to apply the log to it's index)
+ context.getLogger().debug("Setting last applied to {}", index);
context.setLastApplied(index);
}
@Override protected RaftState handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- context.getLogger().info("Candidate: Received {}", appendEntries.toString());
+ context.getLogger().debug(appendEntries.toString());
return state();
}
if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
context.getLogger()
- .info("Follower: Received {}", appendEntries.toString());
+ .debug(appendEntries.toString());
}
// TODO : Refactor this method into a bunch of smaller methods
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
@Override protected RaftState handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- context.getLogger().info("Leader: Received {}", appendEntries.toString());
+ context.getLogger().debug(appendEntries.toString());
return state();
}
if(! appendEntriesReply.isSuccess()) {
context.getLogger()
- .info("Leader: Received {}", appendEntriesReply.toString());
+ .debug(appendEntriesReply.toString());
}
// Update the FollowerLogInformation
context.getLogger().debug("Replicate message " + logIndex);
+ // Create a tracker entry we will use this later to notify the
+ // client actor
+ trackerList.add(
+ new ClientRequestTrackerImpl(replicate.getClientActor(),
+ replicate.getIdentifier(),
+ logIndex)
+ );
+
if (followers.size() == 0) {
- context.setCommitIndex(
- replicate.getReplicatedLogEntry().getIndex());
-
- context.getActor()
- .tell(new ApplyState(replicate.getClientActor(),
- replicate.getIdentifier(),
- replicate.getReplicatedLogEntry()),
- context.getActor()
- );
+ context.setCommitIndex(logIndex);
+ applyLogToStateMachine(logIndex);
} else {
-
- // Create a tracker entry we will use this later to notify the
- // client actor
- trackerList.add(
- new ClientRequestTrackerImpl(replicate.getClientActor(),
- replicate.getIdentifier(),
- logIndex)
- );
-
sendAppendEntries();
}
}
List<ReplicatedLogEntry> entries = Collections.emptyList();
if (context.getReplicatedLog().isPresent(nextIndex)) {
- // TODO: Instead of sending all entries from nextIndex
- // only send a fixed number of entries to each follower
- // This is to avoid the situation where there are a lot of
- // entries to install for a fresh follower or to a follower
- // that has fallen too far behind with the log but yet is not
- // eligible to receive a snapshot
+ // FIXME : Sending one entry at a time
entries =
context.getReplicatedLog().getFrom(nextIndex, 1);
}
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
MockRaftActorContext actorContext =
new MockRaftActorContext("test", getSystem(), raftActor);
+ actorContext.getReplicatedLog().removeFrom(0);
+
+ actorContext.getReplicatedLog().append(new ReplicatedLogImplEntry(0, 1,
+ new MockRaftActorContext.MockPayload("foo")));
+
+ ReplicatedLogImplEntry entry =
+ new ReplicatedLogImplEntry(1, 1,
+ new MockRaftActorContext.MockPayload("foo"));
+
+ actorContext.getReplicatedLog().append(entry);
+
Leader leader = new Leader(actorContext);
RaftState raftState = leader
- .handleMessage(senderActor, new Replicate(null, "state-id",
- new MockRaftActorContext.MockReplicatedLogEntry(1,
- 100,
- new MockRaftActorContext.MockPayload("foo"))
- ));
+ .handleMessage(senderActor, new Replicate(null, "state-id",entry));
// State should not change
assertEquals(RaftState.Leader, raftState);
- assertEquals(100, actorContext.getCommitIndex());
+ assertEquals(1, actorContext.getCommitIndex());
final String out =
new ExpectMsg<String>(duration("1 seconds"),
*/
package org.opendaylight.controller.md.sal.binding.impl;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
public class AbstractReadWriteTransaction extends AbstractWriteTransaction<DOMDataReadWriteTransaction> {
org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier currentPath = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.create(
currentArguments);
- final Optional<NormalizedNode<?, ?>> d;
+ final Boolean exists;
try {
- d = getDelegate().read(store, currentPath).get();
- } catch (InterruptedException | ExecutionException e) {
+ exists = getDelegate().exists(store, currentPath).checkedGet();
+ } catch (ReadFailedException e) {
LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
throw new IllegalStateException("Failed to read pre-existing data", e);
}
- if (!d.isPresent() && iterator.hasNext()) {
+ if (!exists && iterator.hasNext()) {
getDelegate().merge(store, currentPath, currentOp.createDefault(currentArg));
}
}
--- /dev/null
+package org.opendaylight.controller.md.sal.binding.impl.test;
+
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
+import org.opendaylight.controller.md.sal.binding.test.AbstractSchemaAwareTest;
+import org.opendaylight.controller.md.sal.binding.test.DataBrokerTestCustomizer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import java.util.concurrent.ExecutionException;
+
+import static junit.framework.TestCase.assertNotNull;
+
+public class ForwardedBackwardsCompatibleDataBrokerTest extends
+ AbstractSchemaAwareTest {
+
+ private DataBrokerTestCustomizer testCustomizer;
+ private ForwardedBackwardsCompatibleDataBroker dataBroker;
+ private DOMDataBroker domBroker;
+
+ private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
+ private static final TopLevelListKey TOP_LIST_KEY = new TopLevelListKey("foo");
+ private static final InstanceIdentifier<TopLevelList> NODE_PATH = TOP_PATH.child(TopLevelList.class, TOP_LIST_KEY);
+ private static final TopLevelList NODE = new TopLevelListBuilder().setKey(TOP_LIST_KEY).build();
+
+ protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
+ return new DataBrokerTestCustomizer();
+ }
+
+ @Override
+ protected void setupWithSchema(final SchemaContext context) {
+ testCustomizer = createDataBrokerTestCustomizer();
+
+ domBroker = testCustomizer.createDOMDataBroker();
+ dataBroker = testCustomizer.createBackwardsCompatibleDataBroker();
+ testCustomizer.updateSchema(context);
+ }
+
+
+ /**
+ * The purpose of this test is to exercise the backwards compatible broker
+ * <p>
+ * This test tries to execute the code which ensures that the parents
+ * for a given node get automatically created.
+ *
+ * @see org.opendaylight.controller.md.sal.binding.impl.AbstractReadWriteTransaction#ensureParentsByMerge(org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType, org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier, org.opendaylight.yangtools.yang.binding.InstanceIdentifier)
+ */
+ @Test
+ public void test() throws InterruptedException, ExecutionException {
+ DataModificationTransaction writeTx =
+ dataBroker.beginTransaction();
+
+ writeTx.putOperationalData(NODE_PATH, NODE);
+
+ writeTx.commit();
+
+ // TOP_PATH should exist as it is the parent of NODE_PATH
+ DataObject object = dataBroker.readOperationalData(TOP_PATH);
+
+ assertNotNull(object);
+
+ }
+
+
+}
import javassist.ClassPool;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
import org.opendaylight.controller.md.sal.binding.impl.ForwardedBindingDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
}
public DOMStore createConfigurationDatastore() {
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
schemaService.registerSchemaContextListener(store);
return store;
}
public DOMStore createOperationalDatastore() {
- InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
schemaService.registerSchemaContextListener(store);
return store;
}
return new ForwardedBindingDataBroker(getDOMDataBroker(), getMappingService(), getSchemaService());
}
+ public ForwardedBackwardsCompatibleDataBroker createBackwardsCompatibleDataBroker() {
+ return new ForwardedBackwardsCompatibleDataBroker(getDOMDataBroker(), getMappingService(), getSchemaService(), MoreExecutors.sameThreadExecutor());
+ }
+
+
private SchemaService getSchemaService() {
return schemaService;
}
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.MutableClassToInstanceMap;
import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
@Beta
public class BindingTestContext implements AutoCloseable {
public void startNewDomDataBroker() {
checkState(executor != null, "Executor needs to be set");
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", executor);
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", executor);
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", executor,
+ MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", executor,
+ MoreExecutors.sameThreadExecutor());
newDatastores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
.put(LogicalDatastoreType.OPERATIONAL, operStore)
.put(LogicalDatastoreType.CONFIGURATION, configStore)
public class PathUtils {
public static String getParentPath(String currentElementPath){
- String parentPath = "";
+ StringBuilder parentPath = new StringBuilder();
if(currentElementPath != null){
String[] parentPaths = currentElementPath.split("/");
if(parentPaths.length > 2){
for(int i=0;i<parentPaths.length-1;i++){
if(parentPaths[i].length() > 0){
- parentPath += "/" + parentPaths[i];
+ parentPath.append("/");
+ parentPath.append(parentPaths[i]);
}
}
}
}
- return parentPath;
+ return parentPath.toString();
}
}
// @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.MergeDataReply)
}
+ public interface DataExistsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ boolean hasInstanceIdentifierPathArguments();
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments();
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder();
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.mdsal.DataExists}
+ */
+ public static final class DataExists extends
+ com.google.protobuf.GeneratedMessage
+ implements DataExistsOrBuilder {
+ // Use DataExists.newBuilder() to construct.
+ private DataExists(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DataExists(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DataExists defaultInstance;
+ public static DataExists getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DataExists getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DataExists(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = instanceIdentifierPathArguments_.toBuilder();
+ }
+ instanceIdentifierPathArguments_ = input.readMessage(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(instanceIdentifierPathArguments_);
+ instanceIdentifierPathArguments_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<DataExists> PARSER =
+ new com.google.protobuf.AbstractParser<DataExists>() {
+ public DataExists parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DataExists(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<DataExists> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+ public static final int INSTANCEIDENTIFIERPATHARGUMENTS_FIELD_NUMBER = 1;
+ private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierPathArguments_;
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public boolean hasInstanceIdentifierPathArguments() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments() {
+ return instanceIdentifierPathArguments_;
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder() {
+ return instanceIdentifierPathArguments_;
+ }
+
+ private void initFields() {
+ instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasInstanceIdentifierPathArguments()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getInstanceIdentifierPathArguments().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, instanceIdentifierPathArguments_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, instanceIdentifierPathArguments_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.mdsal.DataExists}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.Builder.class);
+ }
+
+ // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getInstanceIdentifierPathArgumentsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ } else {
+ instanceIdentifierPathArgumentsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists getDefaultInstanceForType() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.getDefaultInstance();
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists build() {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists buildPartial() {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ result.instanceIdentifierPathArguments_ = instanceIdentifierPathArguments_;
+ } else {
+ result.instanceIdentifierPathArguments_ = instanceIdentifierPathArgumentsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists) {
+ return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists other) {
+ if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.getDefaultInstance()) return this;
+ if (other.hasInstanceIdentifierPathArguments()) {
+ mergeInstanceIdentifierPathArguments(other.getInstanceIdentifierPathArguments());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasInstanceIdentifierPathArguments()) {
+
+ return false;
+ }
+ if (!getInstanceIdentifierPathArguments().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+ private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder> instanceIdentifierPathArgumentsBuilder_;
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public boolean hasInstanceIdentifierPathArguments() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments() {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ return instanceIdentifierPathArguments_;
+ } else {
+ return instanceIdentifierPathArgumentsBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public Builder setInstanceIdentifierPathArguments(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ instanceIdentifierPathArguments_ = value;
+ onChanged();
+ } else {
+ instanceIdentifierPathArgumentsBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public Builder setInstanceIdentifierPathArguments(
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder builderForValue) {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ instanceIdentifierPathArguments_ = builderForValue.build();
+ onChanged();
+ } else {
+ instanceIdentifierPathArgumentsBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public Builder mergeInstanceIdentifierPathArguments(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ instanceIdentifierPathArguments_ != org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance()) {
+ instanceIdentifierPathArguments_ =
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.newBuilder(instanceIdentifierPathArguments_).mergeFrom(value).buildPartial();
+ } else {
+ instanceIdentifierPathArguments_ = value;
+ }
+ onChanged();
+ } else {
+ instanceIdentifierPathArgumentsBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public Builder clearInstanceIdentifierPathArguments() {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+ onChanged();
+ } else {
+ instanceIdentifierPathArgumentsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder getInstanceIdentifierPathArgumentsBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getInstanceIdentifierPathArgumentsFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder() {
+ if (instanceIdentifierPathArgumentsBuilder_ != null) {
+ return instanceIdentifierPathArgumentsBuilder_.getMessageOrBuilder();
+ } else {
+ return instanceIdentifierPathArguments_;
+ }
+ }
+ /**
+ * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder>
+ getInstanceIdentifierPathArgumentsFieldBuilder() {
+ if (instanceIdentifierPathArgumentsBuilder_ == null) {
+ instanceIdentifierPathArgumentsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder>(
+ instanceIdentifierPathArguments_,
+ getParentForChildren(),
+ isClean());
+ instanceIdentifierPathArguments_ = null;
+ }
+ return instanceIdentifierPathArgumentsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.DataExists)
+ }
+
+ static {
+ defaultInstance = new DataExists(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.DataExists)
+ }
+
+ public interface DataExistsReplyOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool exists = 1;
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ boolean hasExists();
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ boolean getExists();
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.mdsal.DataExistsReply}
+ */
+ public static final class DataExistsReply extends
+ com.google.protobuf.GeneratedMessage
+ implements DataExistsReplyOrBuilder {
+ // Use DataExistsReply.newBuilder() to construct.
+ private DataExistsReply(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DataExistsReply(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DataExistsReply defaultInstance;
+ public static DataExistsReply getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DataExistsReply getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DataExistsReply(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ exists_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<DataExistsReply> PARSER =
+ new com.google.protobuf.AbstractParser<DataExistsReply>() {
+ public DataExistsReply parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DataExistsReply(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<DataExistsReply> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool exists = 1;
+ public static final int EXISTS_FIELD_NUMBER = 1;
+ private boolean exists_;
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public boolean hasExists() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public boolean getExists() {
+ return exists_;
+ }
+
+ private void initFields() {
+ exists_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasExists()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, exists_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, exists_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.opendaylight.controller.mdsal.DataExistsReply}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReplyOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.Builder.class);
+ }
+
+ // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ exists_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply getDefaultInstanceForType() {
+ return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.getDefaultInstance();
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply build() {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply buildPartial() {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.exists_ = exists_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply) {
+ return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply other) {
+ if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.getDefaultInstance()) return this;
+ if (other.hasExists()) {
+ setExists(other.getExists());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasExists()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bool exists = 1;
+ private boolean exists_ ;
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public boolean hasExists() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public boolean getExists() {
+ return exists_;
+ }
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public Builder setExists(boolean value) {
+ bitField0_ |= 0x00000001;
+ exists_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bool exists = 1;</code>
+ */
+ public Builder clearExists() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ exists_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.DataExistsReply)
+ }
+
+ static {
+ defaultInstance = new DataExistsReply(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.DataExistsReply)
+ }
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_opendaylight_controller_mdsal_CloseTransaction_descriptor;
private static
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_opendaylight_controller_mdsal_MergeDataReply_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
"thArguments\030\001 \002(\01325.org.opendaylight.con" +
"troller.mdsal.InstanceIdentifier\022?\n\016norm" +
"alizedNode\030\002 \002(\0132\'.org.opendaylight.cont" +
- "roller.mdsal.Node\"\020\n\016MergeDataReplyBV\n:o" +
- "rg.opendaylight.controller.protobuff.mes" +
- "sages.transactionB\030ShardTransactionMessa" +
- "ges"
+ "roller.mdsal.Node\"\020\n\016MergeDataReply\"l\n\nD" +
+ "ataExists\022^\n\037instanceIdentifierPathArgum" +
+ "ents\030\001 \002(\01325.org.opendaylight.controller" +
+ ".mdsal.InstanceIdentifier\"!\n\017DataExistsR" +
+ "eply\022\016\n\006exists\030\001 \002(\010BV\n:org.opendaylight",
+ ".controller.protobuff.messages.transacti" +
+ "onB\030ShardTransactionMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_MergeDataReply_descriptor,
new java.lang.String[] { });
+ internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor =
+ getDescriptor().getMessageTypes().get(14);
+ internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor,
+ new java.lang.String[] { "InstanceIdentifierPathArguments", });
+ internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor =
+ getDescriptor().getMessageTypes().get(15);
+ internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor,
+ new java.lang.String[] { "Exists", });
return null;
}
};
message MergeDataReply{
}
+
+message DataExists {
+ required InstanceIdentifier instanceIdentifierPathArguments = 1;
+}
+
+message DataExistsReply {
+ required bool exists = 1;
+}
NormalizedNodeToNodeCodec codec =
new NormalizedNodeToNodeCodec(schemaContext);
+ long start = System.currentTimeMillis();
Container container =
codec.encode(instanceIdentifierFromString(id), output);
+ long end = System.currentTimeMillis();
+
+ System.out.println("Timetaken to encode :"+(end-start));
assertNotNull(container);
assertEquals(id, container.getParentPath() + "/"
// first get the node representation of normalized node
final Node node = container.getNormalizedNode();
+ start = System.currentTimeMillis();
NormalizedNode<?, ?> normalizedNode =
codec.decode(instanceIdentifierFromString(id), node);
+ end = System.currentTimeMillis();
+
+ System.out.println("Timetaken to decode :"+(end-start));
assertEquals(normalizedNode.getValue().toString(), output.getValue()
.toString());
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-operational-datastore-provider</type>
<name>distributed-operational-store-module</name>
- <schema-service>
+ <operational-schema-service>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<name>yang-schema-service</name>
- </schema-service>
+ </operational-schema-service>
</module>
<module>
<type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-config-datastore-provider</type>
<name>distributed-config-store-module</name>
- <schema-service>
+ <config-schema-service>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
<name>yang-schema-service</name>
- </schema-service>
+ </config-schema-service>
</module>
<module>
netty.tcp {
hostname = "<CHANGE_ME>"
port = 2550
- maximum-frame-size = 2097152
- send-buffer-size = 52428800
- receive-buffer-size = 52428800
+ maximum-frame-size = 419430400
+ send-buffer-size = 52428800
+ receive-buffer-size = 52428800
}
}
<artifactId>akka-testkit_${scala.version}</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_${scala.version}</artifactId>
+ </dependency>
+
<!-- SAL Dependencies -->
<dependency>
}
protected abstract void handleReceive(Object message) throws Exception;
+
+ protected void ignoreMessage(Object message){
+ LOG.debug("Unhandled message {} ", message);
+ }
+
+ protected void unknownMessage(Object message) throws Exception{
+ unhandled(message);
+ }
}
import akka.actor.ActorSystem;
import akka.cluster.Cluster;
import akka.cluster.ClusterEvent;
+import com.google.common.base.Preconditions;
public class ClusterWrapperImpl implements ClusterWrapper {
private final Cluster cluster;
private final String currentMemberName;
public ClusterWrapperImpl(ActorSystem actorSystem){
+ Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+
cluster = Cluster.get(actorSystem);
+
+ Preconditions.checkState(cluster.getSelfRoles().size() > 0,
+ "No akka roles were specified\n" +
+ "One way to specify the member name is to pass a property on the command line like so\n" +
+ " -Dakka.cluster.roles.0=member-3\n" +
+ "member-3 here would be the name of the member"
+ );
+
currentMemberName = (String) cluster.getSelfRoles().toArray()[0];
}
public void subscribeToMemberEvents(ActorRef actorRef){
+ Preconditions.checkNotNull(actorRef, "actorRef should not be null");
+
cluster.subscribe(actorRef, ClusterEvent.initialStateAsEvents(),
ClusterEvent.MemberEvent.class,
ClusterEvent.UnreachableMember.class);
modification = null;
}
public CompositeModificationPayload(Object modification){
- this.modification = (PersistentMessages.CompositeModification) modification;
+ this.modification = (PersistentMessages.CompositeModification) Preconditions.checkNotNull(modification, "modification should not be null");
}
@Override public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
package org.opendaylight.controller.cluster.datastore;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import com.typesafe.config.ConfigObject;
private static final Logger
LOG = LoggerFactory.getLogger(DistributedDataStore.class);
+ // Look up maps to speed things up
+
+ // key = memberName, value = list of shardNames
+ private Map<String, List<String>> memberShardNames = new HashMap<>();
+
+ // key = shardName, value = list of replicaNames (replicaNames are the same as memberNames)
+ private Map<String, List<String>> shardReplicaNames = new HashMap<>();
+
public ConfigurationImpl(String moduleShardsConfigPath,
String modulesConfigPath){
+ Preconditions.checkNotNull(moduleShardsConfigPath, "moduleShardsConfigPath should not be null");
+ Preconditions.checkNotNull(modulesConfigPath, "modulesConfigPath should not be null");
+
+
File moduleShardsFile = new File("./configuration/initial/" + moduleShardsConfigPath);
File modulesFile = new File("./configuration/initial/" + modulesConfigPath);
}
@Override public List<String> getMemberShardNames(String memberName){
+
+ Preconditions.checkNotNull(memberName, "memberName should not be null");
+
+ if(memberShardNames.containsKey(memberName)){
+ return memberShardNames.get(memberName);
+ }
+
List<String> shards = new ArrayList();
for(ModuleShard ms : moduleShards){
for(Shard s : ms.getShards()){
}
}
}
+
+ memberShardNames.put(memberName, shards);
+
return shards;
}
@Override public Optional<String> getModuleNameFromNameSpace(String nameSpace) {
+
+ Preconditions.checkNotNull(nameSpace, "nameSpace should not be null");
+
for(Module m : modules){
if(m.getNameSpace().equals(nameSpace)){
return Optional.of(m.getName());
}
@Override public List<String> getShardNamesFromModuleName(String moduleName) {
+
+ Preconditions.checkNotNull(moduleName, "moduleName should not be null");
+
for(ModuleShard m : moduleShards){
if(m.getModuleName().equals(moduleName)){
List<String> l = new ArrayList<>();
}
@Override public List<String> getMembersFromShardName(String shardName) {
- List<String> shards = new ArrayList();
+
+ Preconditions.checkNotNull(shardName, "shardName should not be null");
+
+ if(shardReplicaNames.containsKey(shardName)){
+ return shardReplicaNames.get(shardName);
+ }
+
for(ModuleShard ms : moduleShards){
for(Shard s : ms.getShards()) {
if(s.getName().equals(shardName)){
- return s.getReplicas();
+ List<String> replicas = s.getReplicas();
+ shardReplicaNames.put(shardName, replicas);
+ return replicas;
}
}
}
+ shardReplicaNames.put(shardName, Collections.EMPTY_LIST);
return Collections.EMPTY_LIST;
}
import akka.actor.Props;
import akka.japi.Creator;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
public DataChangeListener(SchemaContext schemaContext,
AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener, YangInstanceIdentifier pathId) {
- this.listener = listener;
- this.schemaContext = schemaContext;
- this.pathId = pathId;
+
+ this.schemaContext = Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
+ this.listener = Preconditions.checkNotNull(listener, "listener should not be null");
+ this.pathId = Preconditions.checkNotNull(pathId, "pathId should not be null");
}
@Override public void handleReceive(Object message) throws Exception {
notificationsEnabled = message.isEnabled();
}
- public void dataChanged(Object message) {
+ private void dataChanged(Object message) {
// Do nothing if notifications are not enabled
if(!notificationsEnabled){
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorSelection;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
private final SchemaContext schemaContext;
public DataChangeListenerProxy(SchemaContext schemaContext,ActorSelection dataChangeListenerActor) {
- this.dataChangeListenerActor = dataChangeListenerActor;
+ this.dataChangeListenerActor = Preconditions.checkNotNull(dataChangeListenerActor, "dataChangeListenerActor should not be null");
this.schemaContext = schemaContext;
}
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.concurrent.Executors;
-
/**
*
*/
private static final Logger
LOG = LoggerFactory.getLogger(DistributedDataStore.class);
- private static final int DEFAULT_EXECUTOR_POOL_SIZE = 10;
+ private static final String EXECUTOR_MAX_POOL_SIZE_PROP =
+ "mdsal.dist-datastore-executor-pool.size";
+ private static final int DEFAULT_EXECUTOR_MAX_POOL_SIZE = 10;
+
+ private static final String EXECUTOR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.dist-datastore-executor-queue.size";
+ private static final int DEFAULT_EXECUTOR_MAX_QUEUE_SIZE = 5000;
- private final String type;
private final ActorContext actorContext;
private SchemaContext schemaContext;
-
-
/**
* Executor used to run FutureTask's
*
* This is typically used when we need to make a request to an actor and
* wait for it's response and the consumer needs to be provided a Future.
- *
- * FIXME : Make the thread pool size configurable.
*/
private final ListeningExecutorService executor =
- MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(DEFAULT_EXECUTOR_POOL_SIZE));
+ MoreExecutors.listeningDecorator(
+ SpecialExecutors.newBlockingBoundedFastThreadPool(
+ PropertyUtils.getIntSystemProperty(
+ EXECUTOR_MAX_POOL_SIZE_PROP,
+ DEFAULT_EXECUTOR_MAX_POOL_SIZE),
+ PropertyUtils.getIntSystemProperty(
+ EXECUTOR_MAX_QUEUE_SIZE_PROP,
+ DEFAULT_EXECUTOR_MAX_QUEUE_SIZE), "DistDataStore"));
public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster, Configuration configuration) {
- this(new ActorContext(actorSystem, actorSystem
+ Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+ Preconditions.checkNotNull(type, "type should not be null");
+ Preconditions.checkNotNull(cluster, "cluster should not be null");
+ Preconditions.checkNotNull(configuration, "configuration should not be null");
+
+
+ String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
+
+ LOG.info("Creating ShardManager : {}", shardManagerId);
+
+ this.actorContext = new ActorContext(actorSystem, actorSystem
.actorOf(ShardManager.props(type, cluster, configuration),
- "shardmanager-" + type), cluster, configuration), type);
+ shardManagerId ), cluster, configuration);
}
- public DistributedDataStore(ActorContext actorContext, String type) {
- this.type = type;
- this.actorContext = actorContext;
+ public DistributedDataStore(ActorContext actorContext) {
+ this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
}
YangInstanceIdentifier path, L listener,
AsyncDataBroker.DataChangeScope scope) {
+ Preconditions.checkNotNull(path, "path should not be null");
+ Preconditions.checkNotNull(listener, "listener should not be null");
+
+
+ LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+
ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
DataChangeListener.props(schemaContext,listener,path ));
import akka.japi.Creator;
import akka.serialization.Serialization;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.raft.ConfigParams;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import scala.concurrent.duration.FiniteDuration;
import java.util.ArrayList;
+import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
public static final String DEFAULT_NAME = "default";
- private final ListeningExecutorService storeExecutor =
- MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
-
+ // The state of this Shard
private final InMemoryDOMDataStore store;
private final Map<Object, DOMStoreThreePhaseCommitCohort>
Logging.getLogger(getContext().system(), this);
// By default persistent will be true and can be turned off using the system
- // property persistent
+ // property shard.persistent
private final boolean persistent;
- private final String name;
+ /// The name of this shard
+ private final ShardIdentifier name;
private volatile SchemaContext schemaContext;
private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
- private Shard(String name, Map<String, String> peerAddresses) {
- super(name, peerAddresses, Optional.of(configParams));
+ private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses) {
+ super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
this.name = name;
this.persistent = !"false".equals(setting);
- LOG.info("Creating shard : {} persistent : {}", name, persistent);
+ LOG.info("Shard created : {} persistent : {}", name, persistent);
- store = new InMemoryDOMDataStore(name, storeExecutor);
+ store = InMemoryDOMDataStoreFactory.create(name.toString(), null);
- shardMBean = ShardMBeanFactory.getShardStatsMBean(name);
+ shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString());
}
- public static Props props(final String name, final Map<String, String> peerAddresses) {
+ private static Map<String, String> mapPeerAddresses(Map<ShardIdentifier, String> peerAddresses){
+ Map<String , String> map = new HashMap<>();
+
+ for(Map.Entry<ShardIdentifier, String> entry : peerAddresses.entrySet()){
+ map.put(entry.getKey().toString(), entry.getValue());
+ }
+
+ return map;
+ }
+
+
+
+
+ public static Props props(final ShardIdentifier name,
+ final Map<ShardIdentifier, String> peerAddresses) {
+ Preconditions.checkNotNull(name, "name should not be null");
+ Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
+
return Props.create(new Creator<Shard>() {
@Override
}
- @Override public void onReceiveCommand(Object message){
- LOG.debug("Received message {} from {}", message.getClass().toString(), getSender());
+ @Override public void onReceiveCommand(Object message) {
+ LOG.debug("Received message {} from {}", message.getClass().toString(),
+ getSender());
- if (message.getClass().equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
- if(isLeader()) {
+ if (message.getClass()
+ .equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
+ if (isLeader()) {
createTransactionChain();
- } else if(getLeader() != null){
+ } else if (getLeader() != null) {
getLeader().forward(message, getContext());
}
} else if (message instanceof RegisterChangeListener) {
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof ForwardedCommitTransaction) {
handleForwardedCommit((ForwardedCommitTransaction) message);
- } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
- if(isLeader()) {
+ } else if (message.getClass()
+ .equals(CreateTransaction.SERIALIZABLE_CLASS)) {
+ if (isLeader()) {
createTransaction(CreateTransaction.fromSerializable(message));
- } else if(getLeader() != null){
+ } else if (getLeader() != null) {
getLeader().forward(message, getContext());
}
- } else if (message instanceof PeerAddressResolved){
+ } else if (message instanceof PeerAddressResolved) {
PeerAddressResolved resolved = (PeerAddressResolved) message;
- setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
+ setPeerAddress(resolved.getPeerId().toString(), resolved.getPeerAddress());
} else {
- super.onReceiveCommand(message);
+ super.onReceiveCommand(message);
}
}
- private ActorRef createTypedTransactionActor(CreateTransaction createTransaction,String transactionId){
- if(createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_ONLY.ordinal()){
- return getContext().actorOf(
- ShardTransaction.props( store.newReadOnlyTransaction(), getSelf(), schemaContext), transactionId);
+ private ActorRef createTypedTransactionActor(
+ CreateTransaction createTransaction, ShardTransactionIdentifier transactionId) {
+ if (createTransaction.getTransactionType()
+ == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
+
+ shardMBean.incrementReadOnlyTransactionCount();
+
+ return getContext().actorOf(
+ ShardTransaction
+ .props(store.newReadOnlyTransaction(), getSelf(),
+ schemaContext), transactionId.toString());
+
+ } else if (createTransaction.getTransactionType()
+ == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
- }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_WRITE.ordinal()){
- return getContext().actorOf(
- ShardTransaction.props( store.newReadWriteTransaction(), getSelf(), schemaContext), transactionId);
+ shardMBean.incrementReadWriteTransactionCount();
+ return getContext().actorOf(
+ ShardTransaction
+ .props(store.newReadWriteTransaction(), getSelf(),
+ schemaContext), transactionId.toString());
- }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.WRITE_ONLY.ordinal()){
- return getContext().actorOf(
- ShardTransaction.props( store.newWriteOnlyTransaction(), getSelf(), schemaContext), transactionId);
- }else{
- throw new IllegalArgumentException ("CreateTransaction message has unidentified transaction type="+createTransaction.getTransactionType()) ;
- }
- }
+
+ } else if (createTransaction.getTransactionType()
+ == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
+
+ shardMBean.incrementWriteOnlyTransactionCount();
+
+ return getContext().actorOf(
+ ShardTransaction
+ .props(store.newWriteOnlyTransaction(), getSelf(),
+ schemaContext), transactionId.toString());
+ } else {
+ // FIXME: This does not seem right
+ throw new IllegalArgumentException(
+ "CreateTransaction message has unidentified transaction type="
+ + createTransaction.getTransactionType());
+ }
+ }
private void createTransaction(CreateTransaction createTransaction) {
- String transactionId = "shard-" + createTransaction.getTransactionId();
- LOG.info("Creating transaction : {} " , transactionId);
- ActorRef transactionActor = createTypedTransactionActor(createTransaction,transactionId);
+ ShardTransactionIdentifier transactionId = ShardTransactionIdentifier.builder().remoteTransactionId(createTransaction.getTransactionId()).build();
+ LOG.debug("Creating transaction : {} ", transactionId);
+ ActorRef transactionActor =
+ createTypedTransactionActor(createTransaction, transactionId);
getSender()
- .tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor), createTransaction.getTransactionId()).toSerializable(),
+ .tell(new CreateTransactionReply(
+ Serialization.serializedActorPath(transactionActor),
+ createTransaction.getTransactionId()).toSerializable(),
getSelf());
}
private void commit(final ActorRef sender, Object serialized) {
- Modification modification = MutableCompositeModification.fromSerializable(serialized, schemaContext);
+ Modification modification = MutableCompositeModification
+ .fromSerializable(serialized, schemaContext);
DOMStoreThreePhaseCommitCohort cohort =
modificationToCohort.remove(serialized);
if (cohort == null) {
- LOG.error(
- "Could not find cohort for modification : {}", modification);
- LOG.info("Writing modification using a new transaction");
+ LOG.debug(
+ "Could not find cohort for modification : {}. Writing modification using a new transaction",
+ modification);
DOMStoreReadWriteTransaction transaction =
store.newReadWriteTransaction();
modification.apply(transaction);
future.get();
future = commitCohort.commit();
future.get();
- } catch (InterruptedException e) {
- LOG.error("Failed to commit", e);
- } catch (ExecutionException e) {
+ } catch (InterruptedException | ExecutionException e) {
+ shardMBean.incrementFailedTransactionsCount();
LOG.error("Failed to commit", e);
+ return;
}
+ //we want to just apply the recovery commit and return
+ shardMBean.incrementCommittedTransactionCount();
+ return;
}
final ListenableFuture<Void> future = cohort.commit();
- shardMBean.incrementCommittedTransactionCount();
final ActorRef self = getSelf();
future.addListener(new Runnable() {
@Override
public void run() {
try {
future.get();
-
- if(sender != null) {
sender
.tell(new CommitTransactionReply().toSerializable(),
self);
- } else {
- LOG.error("sender is null ???");
- }
+ shardMBean.incrementCommittedTransactionCount();
+ shardMBean.setLastCommittedTransactionTime(new Date());
} catch (InterruptedException | ExecutionException e) {
- // FIXME : Handle this properly
- LOG.error(e, "An exception happened when committing");
+ shardMBean.incrementFailedTransactionsCount();
+ sender.tell(new akka.actor.Status.Failure(e),self);
}
}
}, getContext().dispatcher());
}
private void handleForwardedCommit(ForwardedCommitTransaction message) {
- Object serializedModification = message.getModification().toSerializable();
+ Object serializedModification =
+ message.getModification().toSerializable();
modificationToCohort
- .put(serializedModification , message.getCohort());
+ .put(serializedModification, message.getCohort());
- if(persistent) {
- this.persistData(getSender(), "identifier", new CompositeModificationPayload(serializedModification));
+ if (persistent) {
+ this.persistData(getSender(), "identifier",
+ new CompositeModificationPayload(serializedModification));
} else {
this.commit(getSender(), serializedModification);
}
private void registerChangeListener(
RegisterChangeListener registerChangeListener) {
- LOG.debug("registerDataChangeListener for " + registerChangeListener.getPath());
+ LOG.debug("registerDataChangeListener for {}", registerChangeListener
+ .getPath());
ActorSelection dataChangeListenerPath = getContext()
// Notify the listener if notifications should be enabled or not
// If this shard is the leader then it will enable notifications else
// it will not
- dataChangeListenerPath.tell(new EnableNotification(isLeader()), getSelf());
+ dataChangeListenerPath
+ .tell(new EnableNotification(isLeader()), getSelf());
// Now store a reference to the data change listener so it can be notified
// at a later point if notifications should be enabled or disabled
dataChangeListeners.add(dataChangeListenerPath);
AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
- listener = new DataChangeListenerProxy(schemaContext,dataChangeListenerPath);
+ listener =
+ new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
org.opendaylight.yangtools.concepts.ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
registration =
getContext().actorOf(
DataChangeListenerRegistration.props(registration));
- LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = " + listenerRegistration.path().toString());
+ LOG.debug(
+ "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+ , listenerRegistration.path().toString());
getSender()
.tell(new RegisterChangeListenerReply(listenerRegistration.path()),
ShardTransactionChain.props(chain, schemaContext));
getSender()
.tell(new CreateTransactionChainReply(transactionChain.path())
- .toSerializable(),
+ .toSerializable(),
getSelf());
}
@Override protected void applyState(ActorRef clientActor, String identifier,
Object data) {
- if(data instanceof CompositeModificationPayload){
+ if (data instanceof CompositeModificationPayload) {
Object modification =
((CompositeModificationPayload) data).getModification();
- if(modification != null){
+ if (modification != null) {
commit(clientActor, modification);
} else {
- LOG.error("modification is null - this is very unexpected");
+ LOG.error(
+ "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
+ identifier, clientActor.path().toString());
}
LOG.error("Unknown state received {}", data);
}
+ // Update stats
+ ReplicatedLogEntry lastLogEntry = getLastLogEntry();
+
+ if(lastLogEntry != null){
+ shardMBean.setLastLogIndex(lastLogEntry.getIndex());
+ shardMBean.setLastLogTerm(lastLogEntry.getTerm());
+ }
+
+ shardMBean.setCommitIndex(getCommitIndex());
+ shardMBean.setLastApplied(getLastApplied());
+
}
@Override protected Object createSnapshot() {
}
@Override protected void onStateChanged() {
- for(ActorSelection dataChangeListener : dataChangeListeners){
- dataChangeListener.tell(new EnableNotification(isLeader()), getSelf());
+ for (ActorSelection dataChangeListener : dataChangeListeners) {
+ dataChangeListener
+ .tell(new EnableNotification(isLeader()), getSelf());
}
- if(getLeaderId() != null){
+ if (getLeaderId() != null) {
shardMBean.setLeader(getLeaderId());
}
shardMBean.setRaftState(getRaftState().name());
+ shardMBean.setCurrentTerm(getCurrentTerm());
}
@Override public String persistenceId() {
- return this.name;
+ return this.name.toString();
}
import akka.japi.Creator;
import akka.japi.Function;
import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import scala.concurrent.duration.Duration;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
private final Configuration configuration;
+ private ShardManagerInfoMBean mBean;
+
/**
* @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
* configuration or operational
public static Props props(final String type,
final ClusterWrapper cluster,
final Configuration configuration) {
+
+ Preconditions.checkNotNull(type, "type should not be null");
+ Preconditions.checkNotNull(cluster, "cluster should not be null");
+ Preconditions.checkNotNull(configuration, "configuration should not be null");
+
return Props.create(new Creator<ShardManager>() {
@Override
} else if(message instanceof ClusterEvent.UnreachableMember) {
ignoreMessage(message);
} else{
- throw new Exception ("Not recognized message received, message="+message);
+ unknownMessage(message);
}
}
return;
}
- getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
- }
-
- private void ignoreMessage(Object message){
- LOG.debug("Unhandled message : " + message);
+ getSender().tell(new LocalShardNotFound(message.getShardName()),
+ getSelf());
}
private void memberRemoved(ClusterEvent.MemberRemoved message) {
for(ShardInformation info : localShards.values()){
String shardName = info.getShardName();
- info.updatePeerAddress(getShardActorName(memberName, shardName),
+ info.updatePeerAddress(getShardIdentifier(memberName, shardName),
getShardActorPath(shardName, memberName));
}
}
private void findPrimary(FindPrimary message) {
String shardName = message.getShardName();
- List<String> members =
- configuration.getMembersFromShardName(shardName);
-
// First see if the there is a local replica for the shard
ShardInformation info = localShards.get(shardName);
if(info != null) {
}
}
+ List<String> members =
+ configuration.getMembersFromShardName(shardName);
+
if(cluster.getCurrentMemberName() != null) {
members.remove(cluster.getCurrentMemberName());
}
private String getShardActorPath(String shardName, String memberName) {
Address address = memberNameToAddress.get(memberName);
if(address != null) {
- return address.toString() + "/user/shardmanager-" + this.type + "/"
- + getShardActorName(
- memberName, shardName);
+ StringBuilder builder = new StringBuilder();
+ builder.append(address.toString())
+ .append("/user/")
+ .append(ShardManagerIdentifier.builder().type(type).build().toString())
+ .append("/")
+ .append(getShardIdentifier(memberName, shardName));
+ return builder.toString();
}
return null;
}
* @param shardName
* @return
*/
- private String getShardActorName(String memberName, String shardName){
- return memberName + "-shard-" + shardName + "-" + this.type;
+ private ShardIdentifier getShardIdentifier(String memberName, String shardName){
+ return ShardIdentifier.builder().memberName(memberName).shardName(shardName).type(type).build();
}
/**
List<String> memberShardNames =
this.configuration.getMemberShardNames(memberName);
+ List<String> localShardActorNames = new ArrayList<>();
for(String shardName : memberShardNames){
- String shardActorName = getShardActorName(memberName, shardName);
- Map<String, String> peerAddresses = getPeerAddresses(shardName);
+ ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
+ Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
ActorRef actor = getContext()
- .actorOf(Shard.props(shardActorName, peerAddresses),
- shardActorName);
+ .actorOf(Shard.props(shardId, peerAddresses),
+ shardId.toString());
+ localShardActorNames.add(shardId.toString());
localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
}
+ mBean = ShardManagerInfo
+ .createShardManagerMBean("shard-manager-" + this.type, localShardActorNames);
+
}
/**
* @param shardName
* @return
*/
- private Map<String, String> getPeerAddresses(String shardName){
+ private Map<ShardIdentifier, String> getPeerAddresses(String shardName){
- Map<String, String> peerAddresses = new HashMap<>();
+ Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
List<String> members =
this.configuration.getMembersFromShardName(shardName);
for(String memberName : members){
if(!currentMemberName.equals(memberName)){
- String shardActorName = getShardActorName(memberName, shardName);
+ ShardIdentifier shardId = getShardIdentifier(memberName,
+ shardName);
String path =
getShardActorPath(shardName, currentMemberName);
- peerAddresses.put(shardActorName, path);
+ peerAddresses.put(shardId, path);
}
}
return peerAddresses;
}
-
@Override
public SupervisorStrategy supervisorStrategy() {
return new OneForOneStrategy(10, Duration.create("1 minute"),
private final String shardName;
private final ActorRef actor;
private final ActorPath actorPath;
- private final Map<String, String> peerAddresses;
+ private final Map<ShardIdentifier, String> peerAddresses;
private ShardInformation(String shardName, ActorRef actor,
- Map<String, String> peerAddresses) {
+ Map<ShardIdentifier, String> peerAddresses) {
this.shardName = shardName;
this.actor = actor;
this.actorPath = actor.path();
return actorPath;
}
- public Map<String, String> getPeerAddresses() {
- return peerAddresses;
- }
-
- public void updatePeerAddress(String peerId, String peerAddress){
- LOG.info("updatePeerAddress for peer {} with address {}", peerId, peerAddress);
+ public void updatePeerAddress(ShardIdentifier peerId, String peerAddress){
+ LOG.info("updatePeerAddress for peer {} with address {}", peerId,
+ peerAddress);
if(peerAddresses.containsKey(peerId)){
peerAddresses.put(peerId, peerAddress);
- LOG.info("Sending PeerAddressResolved for peer {} with address {} to {}", peerId, peerAddress, actor.path());
+ LOG.debug(
+ "Sending PeerAddressResolved for peer {} with address {} to {}",
+ peerId, peerAddress, actor.path());
actor
.tell(new PeerAddressResolved(peerId, peerAddress),
}
}
}
+
+
+
import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
@Override
public void handleReceive(Object message) throws Exception {
if (ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction,ReadData.fromSerializable(message));
+ readData(transaction, ReadData.fromSerializable(message));
+ } else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ dataExists(transaction, DataExists.fromSerializable(message));
} else {
super.handleReceive(message);
}
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
+ //default scope test method to check if we get correct exception
+ void forUnitTestOnlyExplicitTransactionClose(){
+ transaction.close();
+ }
+
}
import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
deleteData(transaction,DeleteData.fromSerizalizable(message));
} else if (ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
readyTransaction(transaction,new ReadyTransaction());
+ } else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ dataExists(transaction, DataExists.fromSerializable(message));
}else {
super.handleReceive(message);
}
getSender().tell(new CloseTransactionReply().toSerializable(), getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
+
+ /**
+ * The following method is used in unit testing only
+ * hence the default scope.
+ * This is done to test out failure cases.
+ */
+ public void forUnitTestOnlyExplicitTransactionClose() {
+ transaction.close();
+ }
}
import akka.event.LoggingAdapter;
import akka.japi.Creator;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import java.util.concurrent.ExecutionException;
-
/**
* The ShardTransaction Actor represents a remote transaction
* <p>
protected ShardTransaction(DOMStoreTransactionChain transactionChain,
ActorRef shardActor, SchemaContext schemaContext) {
this.transactionChain = transactionChain;
- //this.transaction = transaction;
this.shardActor = shardActor;
this.schemaContext = schemaContext;
}
getSender().tell(new GetCompositeModificationReply(
new ImmutableCompositeModification(modification)), getSelf());
}else{
- throw new Exception ("ShardTransaction:handleRecieve received an unknown message"+message);
+ throw new UnknownMessageException(message);
}
}
final ActorRef sender = getSender();
final ActorRef self = getSelf();
final YangInstanceIdentifier path = message.getPath();
- final ListenableFuture<Optional<NormalizedNode<?, ?>>> future =
- transaction.read(path);
+ final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
+ transaction.read(path);
- future.addListener(new Runnable() {
+ future.addListener(new Runnable() {
@Override
public void run() {
try {
- Optional<NormalizedNode<?, ?>> optional = future.get();
+ Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
if (optional.isPresent()) {
sender.tell(new ReadDataReply(schemaContext,optional.get()).toSerializable(), self);
} else {
sender.tell(new ReadDataReply(schemaContext,null).toSerializable(), self);
}
- } catch (InterruptedException | ExecutionException e) {
- log.error(e,
- "An exception happened when reading data from path : "
- + path.toString());
+ } catch (Exception e) {
+ sender.tell(new akka.actor.Status.Failure(e),self);
}
}
}, getContext().dispatcher());
}
+ protected void dataExists(DOMStoreReadTransaction transaction, DataExists message) {
+ final YangInstanceIdentifier path = message.getPath();
+
+ try {
+ Boolean exists = transaction.exists(path).checkedGet();
+ getSender().tell(new DataExistsReply(exists).toSerializable(), getSelf());
+ } catch (ReadFailedException e) {
+ getSender().tell(new akka.actor.Status.Failure(e),getSelf());
+ }
+
+ }
protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
modification.addModification(
new WriteModification(message.getPath(), message.getData(),schemaContext));
LOG.debug("writeData at path : " + message.getPath().toString());
- transaction.write(message.getPath(), message.getData());
- getSender().tell(new WriteDataReply().toSerializable(), getSelf());
+
+ try {
+ transaction.write(message.getPath(), message.getData());
+ getSender().tell(new WriteDataReply().toSerializable(), getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
}
protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
modification.addModification(
new MergeModification(message.getPath(), message.getData(), schemaContext));
LOG.debug("mergeData at path : " + message.getPath().toString());
- transaction.merge(message.getPath(), message.getData());
- getSender().tell(new MergeDataReply().toSerializable(), getSelf());
+ try {
+ transaction.merge(message.getPath(), message.getData());
+ getSender().tell(new MergeDataReply().toSerializable(), getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
}
protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
+ LOG.debug("deleteData at path : " + message.getPath().toString());
modification.addModification(new DeleteModification(message.getPath()));
- transaction.delete(message.getPath());
- getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
+ try {
+ transaction.delete(message.getPath());
+ getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
}
protected void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message) {
chain.close();
getSender().tell(new CloseTransactionChainReply().toSerializable(), getSelf());
}else{
- throw new Exception("Not recognized message recieved="+message);
+ unknownMessage(message);
}
}
+ private ActorRef getShardActor(){
+ return getContext().parent();
+ }
+
private ActorRef createTypedTransactionActor(CreateTransaction createTransaction,String transactionId){
if(createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_ONLY.ordinal()){
return getContext().actorOf(
- ShardTransaction.props( chain.newReadOnlyTransaction(), getSelf(), schemaContext), transactionId);
+ ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(), schemaContext), transactionId);
}else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_WRITE.ordinal()){
return getContext().actorOf(
- ShardTransaction.props( chain.newReadWriteTransaction(), getSelf(), schemaContext), transactionId);
+ ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(), schemaContext), transactionId);
}else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.WRITE_ONLY.ordinal()){
return getContext().actorOf(
- ShardTransaction.props( chain.newWriteOnlyTransaction(), getSelf(), schemaContext), transactionId);
+ ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(), schemaContext), transactionId);
}else{
throw new IllegalArgumentException ("CreateTransaction message has unidentified transaction type="+createTransaction.getTransactionType()) ;
}
getSender().tell(new CloseTransactionReply().toSerializable(), getSelf());
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
+
+ /**
+ * The following method is used in unit testing only
+ * hence the default scope.
+ * This is done to test out failure cases.
+ */
+ public void forUnitTestOnlyExplicitTransactionClose() {
+ transaction.close();
+ }
}
} else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
abort(new AbortTransaction());
} else {
- throw new Exception ("Not recognized message received,message="+message);
+ unknownMessage(message);
}
}
Boolean canCommit = future.get();
sender.tell(new CanCommitTransactionReply(canCommit).toSerializable(), self);
} catch (InterruptedException | ExecutionException e) {
- log.error(e, "An exception happened when aborting");
+ log.error(e, "An exception happened when checking canCommit");
}
}
}, getContext().dispatcher());
}
@Override public ListenableFuture<Boolean> canCommit() {
+ LOG.debug("txn {} canCommit", transactionId);
Callable<Boolean> call = new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
for(ActorPath actorPath : cohortPaths){
+
+ Object message = new CanCommitTransaction().toSerializable();
+ LOG.debug("txn {} Sending {} to {}", transactionId, message, actorPath);
+
ActorSelection cohort = actorContext.actorSelection(actorPath);
try {
Object response =
actorContext.executeRemoteOperation(cohort,
- new CanCommitTransaction().toSerializable(),
+ message,
ActorContext.ASK_DURATION);
if (response.getClass().equals(CanCommitTransactionReply.SERIALIZABLE_CLASS)) {
}
}
} catch(RuntimeException e){
+ // FIXME : Need to properly handle this
LOG.error("Unexpected Exception", e);
return false;
}
}
@Override public ListenableFuture<Void> preCommit() {
+ LOG.debug("txn {} preCommit", transactionId);
return voidOperation(new PreCommitTransaction().toSerializable(), PreCommitTransactionReply.SERIALIZABLE_CLASS);
}
@Override public ListenableFuture<Void> abort() {
+ LOG.debug("txn {} abort", transactionId);
return voidOperation(new AbortTransaction().toSerializable(), AbortTransactionReply.SERIALIZABLE_CLASS);
}
@Override public ListenableFuture<Void> commit() {
+ LOG.debug("txn {} commit", transactionId);
return voidOperation(new CommitTransaction().toSerializable(), CommitTransactionReply.SERIALIZABLE_CLASS);
}
for(ActorPath actorPath : cohortPaths){
ActorSelection cohort = actorContext.actorSelection(actorPath);
+ LOG.debug("txn {} Sending {} to {}", transactionId, message, actorPath);
+
try {
Object response =
actorContext.executeRemoteOperation(cohort,
import akka.actor.ActorSelection;
import akka.actor.Props;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListeningExecutorService;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
private final TransactionType transactionType;
private final ActorContext actorContext;
private final Map<String, TransactionContext> remoteTransactionPaths = new HashMap<>();
- private final String identifier;
+ private final TransactionIdentifier identifier;
private final ListeningExecutorService executor;
private final SchemaContext schemaContext;
ListeningExecutorService executor,
SchemaContext schemaContext
) {
+ this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
+ this.transactionType = Preconditions.checkNotNull(transactionType, "transactionType should not be null");
+ this.executor = Preconditions.checkNotNull(executor, "executor should not be null");
+ this.schemaContext = Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
+
+ String memberName = actorContext.getCurrentMemberName();
+ if(memberName == null){
+ memberName = "UNKNOWN-MEMBER";
+ }
+ this.identifier = TransactionIdentifier.builder().memberName(memberName).counter(counter.getAndIncrement()).build();
- this.identifier = actorContext.getCurrentMemberName() + "-txn-" + counter.getAndIncrement();
- this.transactionType = transactionType;
- this.actorContext = actorContext;
- this.executor = executor;
- this.schemaContext = schemaContext;
-
+ LOG.debug("Created txn {}", identifier);
}
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
final YangInstanceIdentifier path) {
+ LOG.debug("txn {} read {}", identifier, path);
+
createTransactionIfMissing(actorContext, path);
return transactionContext(path).readData(path);
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ YangInstanceIdentifier path) {
+ LOG.debug("txn {} exists {}", identifier, path);
+
+ createTransactionIfMissing(actorContext, path);
+
+ return transactionContext(path).dataExists(path);
+ }
+
@Override
public void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ LOG.debug("txn {} write {}", identifier, path);
+
createTransactionIfMissing(actorContext, path);
transactionContext(path).writeData(path, data);
@Override
public void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ LOG.debug("txn {} merge {}", identifier, path);
+
createTransactionIfMissing(actorContext, path);
transactionContext(path).mergeData(path, data);
@Override
public void delete(YangInstanceIdentifier path) {
+ LOG.debug("txn {} delete {}", identifier, path);
+
createTransactionIfMissing(actorContext, path);
transactionContext(path).deleteData(path);
public DOMStoreThreePhaseCommitCohort ready() {
List<ActorPath> cohortPaths = new ArrayList<>();
+ LOG.debug("txn {} Trying to get {} transactions ready for commit", identifier, remoteTransactionPaths.size());
+
for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+
+ LOG.debug("txn {} Readying transaction for shard {}", identifier, transactionContext.getShardName());
+
Object result = transactionContext.readyTransaction();
if(result.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)){
}
}
- return new ThreePhaseCommitCohortProxy(actorContext, cohortPaths, identifier, executor);
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortPaths, identifier.toString(), executor);
}
@Override
try {
Object response = actorContext.executeShardOperation(shardName,
- new CreateTransaction(identifier,this.transactionType.ordinal() ).toSerializable(),
+ new CreateTransaction(identifier.toString(),this.transactionType.ordinal() ).toSerializable(),
ActorContext.ASK_DURATION);
if (response.getClass()
.equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
String transactionPath = reply.getTransactionPath();
- LOG.info("Received transaction path = {}" , transactionPath );
+ LOG.debug("txn {} Received transaction path = {}", identifier, transactionPath);
ActorSelection transactionActor =
actorContext.actorSelection(transactionPath);
remoteTransactionPaths.put(shardName, transactionContext);
}
} catch(TimeoutException | PrimaryNotFoundException e){
- LOG.error("Creating NoOpTransaction because of : {}", e.getMessage());
+ LOG.error("txn {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
remoteTransactionPaths.put(shardName,
new NoOpTransactionContext(shardName));
}
final YangInstanceIdentifier path);
void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+
+ CheckedFuture<Boolean, ReadFailedException> dataExists(YangInstanceIdentifier path);
}
- private class TransactionContextImpl implements TransactionContext{
+ private class TransactionContextImpl implements TransactionContext {
private final String shardName;
private final String actorPath;
- private final ActorSelection actor;
+ private final ActorSelection actor;
private TransactionContextImpl(String shardName, String actorPath,
return actor;
}
- @Override public String getResolvedCohortPath(String cohortPath){
+ @Override public String getResolvedCohortPath(String cohortPath) {
return actorContext.resolvePath(actorPath, cohortPath);
}
getActor().tell(new DeleteData(path).toSerializable(), null);
}
- @Override public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data){
- getActor().tell(new MergeData(path, data, schemaContext).toSerializable(), null);
+ @Override public void mergeData(YangInstanceIdentifier path,
+ NormalizedNode<?, ?> data) {
+ getActor()
+ .tell(new MergeData(path, data, schemaContext).toSerializable(),
+ null);
}
- @Override public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
- final YangInstanceIdentifier path) {
-
- Callable<Optional<NormalizedNode<?,?>>> call = new Callable<Optional<NormalizedNode<?,?>>>() {
+ @Override
+ public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
+ final YangInstanceIdentifier path) {
- @Override public Optional<NormalizedNode<?,?>> call() throws Exception {
- Object response = actorContext
- .executeRemoteOperation(getActor(), new ReadData(path).toSerializable(),
- ActorContext.ASK_DURATION);
- if(response.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)){
- ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,path, response);
- if(reply.getNormalizedNode() == null){
- return Optional.absent();
+ Callable<Optional<NormalizedNode<?, ?>>> call =
+ new Callable<Optional<NormalizedNode<?, ?>>>() {
+
+ @Override public Optional<NormalizedNode<?, ?>> call()
+ throws Exception {
+ Object response = actorContext
+ .executeRemoteOperation(getActor(),
+ new ReadData(path).toSerializable(),
+ ActorContext.ASK_DURATION);
+ if (response.getClass()
+ .equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+ ReadDataReply reply = ReadDataReply
+ .fromSerializable(schemaContext, path,
+ response);
+ if (reply.getNormalizedNode() == null) {
+ return Optional.absent();
+ }
+ return Optional.<NormalizedNode<?, ?>>of(
+ reply.getNormalizedNode());
}
- return Optional.<NormalizedNode<?,?>>of(reply.getNormalizedNode());
- }
- return Optional.absent();
- }
- };
+ throw new ReadFailedException("Read Failed " + path);
+ }
+ };
- return MappingCheckedFuture.create(executor.submit(call), ReadFailedException.MAPPER);
+ return MappingCheckedFuture
+ .create(executor.submit(call), ReadFailedException.MAPPER);
}
- @Override public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- getActor().tell(new WriteData(path, data, schemaContext).toSerializable(), null);
+ @Override public void writeData(YangInstanceIdentifier path,
+ NormalizedNode<?, ?> data) {
+ getActor()
+ .tell(new WriteData(path, data, schemaContext).toSerializable(),
+ null);
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> dataExists(
+ final YangInstanceIdentifier path) {
+
+ Callable<Boolean> call = new Callable<Boolean>() {
+
+ @Override public Boolean call() throws Exception {
+ Object o = actorContext.executeRemoteOperation(getActor(),
+ new DataExists(path).toSerializable(),
+ ActorContext.ASK_DURATION
+ );
+
+
+ if (DataExistsReply.SERIALIZABLE_CLASS
+ .equals(o.getClass())) {
+ return DataExistsReply.fromSerializable(o).exists();
+ }
+
+ throw new ReadFailedException("Exists Failed " + path);
+ }
+ };
+ return MappingCheckedFuture
+ .create(executor.submit(call), ReadFailedException.MAPPER);
+ }
}
private class NoOpTransactionContext implements TransactionContext {
}
@Override public void closeTransaction() {
- LOG.error("closeTransaction called");
+ LOG.warn("txn {} closeTransaction called", identifier);
}
@Override public Object readyTransaction() {
- LOG.error("readyTransaction called");
+ LOG.warn("txn {} readyTransaction called", identifier);
cohort = actorContext.getActorSystem().actorOf(Props.create(NoOpCohort.class));
return new ReadyTransactionReply(cohort.path()).toSerializable();
}
@Override public void deleteData(YangInstanceIdentifier path) {
- LOG.error("deleteData called path = {}", path);
+ LOG.warn("txt {} deleteData called path = {}", identifier, path);
}
@Override public void mergeData(YangInstanceIdentifier path,
NormalizedNode<?, ?> data) {
- LOG.error("mergeData called path = {}", path);
+ LOG.warn("txn {} mergeData called path = {}", identifier, path);
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
YangInstanceIdentifier path) {
- LOG.error("readData called path = {}", path);
+ LOG.warn("txn {} readData called path = {}", identifier, path);
return Futures.immediateCheckedFuture(
Optional.<NormalizedNode<?, ?>>absent());
}
@Override public void writeData(YangInstanceIdentifier path,
NormalizedNode<?, ?> data) {
- LOG.error("writeData called path = {}", path);
+ LOG.warn("txn {} writeData called path = {}", identifier, path);
+ }
+
+ @Override public CheckedFuture<Boolean, ReadFailedException> dataExists(
+ YangInstanceIdentifier path) {
+ LOG.warn("txn {} dataExists called path = {}", identifier, path);
+
+ // Returning false instead of an exception to keep this aligned with
+ // read
+ return Futures.immediateCheckedFuture(false);
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+public class UnknownMessageException extends Exception {
+ private final Object message;
+
+ public UnknownMessageException(Object message) {
+ this.message = message;
+ }
+
+ @Override public String getMessage() {
+ return "Unknown message received " + " - " + message;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class ShardIdentifier {
+ private final String shardName;
+ private final String memberName;
+ private final String type;
+
+
+ public ShardIdentifier(String shardName, String memberName, String type) {
+
+ Preconditions.checkNotNull(shardName, "shardName should not be null");
+ Preconditions.checkNotNull(memberName, "memberName should not be null");
+ Preconditions.checkNotNull(type, "type should not be null");
+
+ this.shardName = shardName;
+ this.memberName = memberName;
+ this.type = type;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ ShardIdentifier that = (ShardIdentifier) o;
+
+ if (!memberName.equals(that.memberName)) {
+ return false;
+ }
+ if (!shardName.equals(that.shardName)) {
+ return false;
+ }
+ if (!type.equals(that.type)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = shardName.hashCode();
+ result = 31 * result + memberName.hashCode();
+ result = 31 * result + type.hashCode();
+ return result;
+ }
+
+ @Override public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append(memberName).append("-shard-").append(shardName).append("-").append(type);
+ return builder.toString();
+ }
+
+ public static Builder builder(){
+ return new Builder();
+ }
+
+ public static class Builder {
+ private String shardName;
+ private String memberName;
+ private String type;
+
+ public ShardIdentifier build(){
+ return new ShardIdentifier(shardName, memberName, type);
+ }
+
+ public Builder shardName(String shardName){
+ this.shardName = shardName;
+ return this;
+ }
+
+ public Builder memberName(String memberName){
+ this.memberName = memberName;
+ return this;
+ }
+
+ public Builder type(String type){
+ this.type = type;
+ return this;
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+public class ShardManagerIdentifier {
+ private final String type;
+
+ public ShardManagerIdentifier(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ ShardManagerIdentifier that = (ShardManagerIdentifier) o;
+
+ if (!type.equals(that.type)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return type.hashCode();
+ }
+
+ @Override public String toString() {
+ StringBuilder builder = new StringBuilder();
+ builder.append("shardmanager-").append(type);
+ return builder.toString();
+ }
+
+ public static Builder builder(){
+ return new Builder();
+ }
+
+ public static class Builder {
+ private String type;
+
+ public Builder type(String type){
+ this.type = type;
+ return this;
+ }
+
+ public ShardManagerIdentifier build(){
+ return new ShardManagerIdentifier(this.type);
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class ShardTransactionIdentifier {
+ private final String remoteTransactionId;
+
+ public ShardTransactionIdentifier(String remoteTransactionId) {
+ this.remoteTransactionId = Preconditions.checkNotNull(remoteTransactionId, "remoteTransactionId should not be null");
+ }
+
+ public static Builder builder(){
+ return new Builder();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ ShardTransactionIdentifier that = (ShardTransactionIdentifier) o;
+
+ if (!remoteTransactionId.equals(that.remoteTransactionId)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return remoteTransactionId.hashCode();
+ }
+
+ @Override public String toString() {
+ final StringBuilder sb =
+ new StringBuilder();
+ sb.append("shard-").append(remoteTransactionId);
+ return sb.toString();
+ }
+
+ public static class Builder {
+ private String remoteTransactionId;
+
+ public Builder remoteTransactionId(String remoteTransactionId){
+ this.remoteTransactionId = remoteTransactionId;
+ return this;
+ }
+
+ public ShardTransactionIdentifier build(){
+ return new ShardTransactionIdentifier(remoteTransactionId);
+ }
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class TransactionIdentifier {
+ private final String memberName;
+ private final long counter;
+
+
+ public TransactionIdentifier(String memberName, long counter) {
+ this.memberName = Preconditions.checkNotNull(memberName, "memberName should not be null");
+ this.counter = counter;
+ }
+
+ public static Builder builder(){
+ return new Builder();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ TransactionIdentifier that = (TransactionIdentifier) o;
+
+ if (counter != that.counter) {
+ return false;
+ }
+ if (!memberName.equals(that.memberName)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = memberName.hashCode();
+ result = 31 * result + (int) (counter ^ (counter >>> 32));
+ return result;
+ }
+
+ @Override public String toString() {
+ final StringBuilder sb =
+ new StringBuilder();
+ sb.append(memberName).append("-txn-").append(counter);
+ return sb.toString();
+ }
+
+ public static class Builder {
+ private String memberName;
+ private long counter;
+
+ public TransactionIdentifier build(){
+ return new TransactionIdentifier(memberName, counter);
+ }
+
+ public Builder memberName(String memberName){
+ this.memberName = memberName;
+ return this;
+ }
+
+ public Builder counter(long counter){
+ this.counter = counter;
+ return this;
+ }
+ }
+}
public static String BASE_JMX_PREFIX = "org.opendaylight.controller:";
public static String JMX_TYPE_DISTRIBUTED_DATASTORE = "DistributedDatastore";
public static String JMX_CATEGORY_SHARD = "Shard";
+ public static String JMX_CATEGORY_SHARD_MANAGER = "ShardManager";
private static final Logger LOG = LoggerFactory
.getLogger(AbstractBaseMBean.class);
* Date: 7/16/14
*/
public class ShardMBeanFactory {
- private static Map<String,ShardStats> shardMBeans= new HashMap<String,ShardStats>();
+ private static Map<String, ShardStats> shardMBeans =
+ new HashMap<String, ShardStats>();
- public static ShardStats getShardStatsMBean(String shardName){
- if(shardMBeans.containsKey(shardName)){
+ public static ShardStats getShardStatsMBean(String shardName) {
+ if (shardMBeans.containsKey(shardName)) {
return shardMBeans.get(shardName);
- }else {
- ShardStats shardStatsMBeanImpl = new ShardStats(shardName);
+ } else {
+ ShardStats shardStatsMBeanImpl = new ShardStats(shardName);
- if(shardStatsMBeanImpl.registerMBean()) {
- shardMBeans.put(shardName, shardStatsMBeanImpl);
- }
- return shardStatsMBeanImpl;
- }
- }
+ if (shardStatsMBeanImpl.registerMBean()) {
+ shardMBeans.put(shardName, shardStatsMBeanImpl);
+ }
+ return shardStatsMBeanImpl;
+ }
+ }
}
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
/**
* @author: syedbahm
*/
public class ShardStats extends AbstractBaseMBean implements ShardStatsMBean {
- private Long committedTransactionsCount;
- private Long journalMessagesCount;
- final private String shardName;
- private String leader;
- private String raftState;
- ShardStats(String shardName){
- this.shardName = shardName;
- committedTransactionsCount =0L;
- journalMessagesCount = 0L;
- };
+ private final String shardName;
+
+ private Long committedTransactionsCount = 0L;
+
+ private Long readOnlyTransactionCount = 0L;
+
+ private Long writeOnlyTransactionCount = 0L;
+
+ private Long readWriteTransactionCount = 0L;
+
+ private String leader;
+
+ private String raftState;
+
+ private Long lastLogTerm = -1L;
+
+ private Long lastLogIndex = -1L;
+
+ private Long currentTerm = -1L;
+
+ private Long commitIndex = -1L;
+
+ private Long lastApplied = -1L;
+
+ private Date lastCommittedTransactionTime = new Date(0L);
+
+ private Long failedTransactionsCount = 0L;
+
+ private SimpleDateFormat sdf =
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+
+ ShardStats(String shardName) {
+ this.shardName = shardName;
+ }
+
+
+ @Override
+ public String getShardName() {
+ return shardName;
+ }
+
+ @Override
+ public Long getCommittedTransactionsCount() {
+ return committedTransactionsCount;
+ }
+
+ @Override public String getLeader() {
+ return leader;
+ }
+
+ @Override public String getRaftState() {
+ return raftState;
+ }
+
+ @Override public Long getReadOnlyTransactionCount() {
+ return readOnlyTransactionCount;
+ }
+
+ @Override public Long getWriteOnlyTransactionCount() {
+ return writeOnlyTransactionCount;
+ }
+
+ @Override public Long getReadWriteTransactionCount() {
+ return readWriteTransactionCount;
+ }
+
+ @Override public Long getLastLogIndex() {
+ return lastLogIndex;
+ }
+
+ @Override public Long getLastLogTerm() {
+ return lastLogTerm;
+ }
+
+ @Override public Long getCurrentTerm() {
+ return currentTerm;
+ }
+
+ @Override public Long getCommitIndex() {
+ return commitIndex;
+ }
+
+ @Override public Long getLastApplied() {
+ return lastApplied;
+ }
+
+ @Override
+ public String getLastCommittedTransactionTime() {
+ return sdf.format(lastCommittedTransactionTime);
+ }
- @Override
- public String getShardName() {
- return shardName;
- }
+ @Override public Long getFailedTransactionsCount() {
+ return failedTransactionsCount;
+ }
- @Override
- public Long getCommittedTransactionsCount() {
- return committedTransactionsCount;
- }
+ public Long incrementCommittedTransactionCount() {
+ return committedTransactionsCount++;
+ }
- @Override
- public Long getJournalMessagesCount() {
- //FIXME: this will be populated once after integration with Raft stuff
- return journalMessagesCount;
- }
+ public Long incrementReadOnlyTransactionCount() {
+ return readOnlyTransactionCount++;
+ }
- @Override public String getLeader() {
- return leader;
- }
+ public Long incrementWriteOnlyTransactionCount() {
+ return writeOnlyTransactionCount++;
+ }
- @Override public String getRaftState() {
- return raftState;
- }
+ public Long incrementReadWriteTransactionCount() {
+ return readWriteTransactionCount++;
+ }
- public Long incrementCommittedTransactionCount() {
- return committedTransactionsCount++;
- }
+ public void setLeader(String leader) {
+ this.leader = leader;
+ }
+ public void setRaftState(String raftState) {
+ this.raftState = raftState;
+ }
- public void updateCommittedTransactionsCount(long currentCount){
- committedTransactionsCount = currentCount;
+ public void setLastLogTerm(Long lastLogTerm) {
+ this.lastLogTerm = lastLogTerm;
+ }
- }
+ public void setLastLogIndex(Long lastLogIndex) {
+ this.lastLogIndex = lastLogIndex;
+ }
- public void updateJournalMessagesCount(long currentCount){
- journalMessagesCount = currentCount;
+ public void setCurrentTerm(Long currentTerm) {
+ this.currentTerm = currentTerm;
+ }
- }
+ public void setCommitIndex(Long commitIndex) {
+ this.commitIndex = commitIndex;
+ }
- public void setLeader(String leader){
- this.leader = leader;
- }
+ public void setLastApplied(Long lastApplied) {
+ this.lastApplied = lastApplied;
+ }
- public void setRaftState(String raftState){
- this.raftState = raftState;
- }
+ public void setLastCommittedTransactionTime(
+ Date lastCommittedTransactionTime) {
+ this.lastCommittedTransactionTime = lastCommittedTransactionTime;
+ }
- @Override
- protected String getMBeanName() {
- return shardName;
- }
+ @Override
+ protected String getMBeanName() {
+ return shardName;
+ }
- @Override
- protected String getMBeanType() {
- return JMX_TYPE_DISTRIBUTED_DATASTORE;
- }
+ @Override
+ protected String getMBeanType() {
+ return JMX_TYPE_DISTRIBUTED_DATASTORE;
+ }
- @Override
- protected String getMBeanCategory() {
- return JMX_CATEGORY_SHARD;
- }
+ @Override
+ protected String getMBeanCategory() {
+ return JMX_CATEGORY_SHARD;
+ }
+ public void incrementFailedTransactionsCount() {
+ this.failedTransactionsCount++;
+ }
}
* @author: syedbahm
*/
public interface ShardStatsMBean {
- String getShardName();
- Long getCommittedTransactionsCount();
- Long getJournalMessagesCount();
- String getLeader();
- String getRaftState();
+ String getShardName();
+
+ Long getCommittedTransactionsCount();
+
+ String getLeader();
+
+ String getRaftState();
+
+ Long getReadOnlyTransactionCount();
+
+ Long getWriteOnlyTransactionCount();
+
+ Long getReadWriteTransactionCount();
+
+ Long getLastLogIndex();
+
+ Long getLastLogTerm();
+
+ Long getCurrentTerm();
+
+ Long getCommitIndex();
+
+ Long getLastApplied();
+
+ String getLastCommittedTransactionTime();
+
+ Long getFailedTransactionsCount();
+
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager;
+
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
+
+import java.util.List;
+
+public class ShardManagerInfo extends AbstractBaseMBean implements
+ ShardManagerInfoMBean {
+
+ private final String name;
+ private final List<String> localShards;
+
+ public ShardManagerInfo(String name, List<String> localShards) {
+ this.name = name;
+ this.localShards = localShards;
+ }
+
+
+ @Override protected String getMBeanName() {
+ return name;
+ }
+
+ @Override protected String getMBeanType() {
+ return JMX_TYPE_DISTRIBUTED_DATASTORE;
+ }
+
+ @Override protected String getMBeanCategory() {
+ return JMX_CATEGORY_SHARD_MANAGER;
+ }
+
+ public static ShardManagerInfo createShardManagerMBean(String name, List<String> localShards){
+ ShardManagerInfo shardManagerInfo = new ShardManagerInfo(name,
+ localShards);
+
+ shardManagerInfo.registerMBean();
+
+ return shardManagerInfo;
+ }
+
+ @Override public List<String> getLocalShards() {
+ return localShards;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager;
+
+import java.util.List;
+
+public interface ShardManagerInfoMBean {
+ List<String> getLocalShards();
+}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class AbortTransaction implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransaction.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransaction.class;
@Override
public Object toSerializable() {
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class AbortTransactionReply implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
@Override
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CanCommitTransaction implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
@Override
public Object toSerializable() {
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
public class CloseDataChangeListenerRegistration implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
+ public static final Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
@Override
public Object toSerializable() {
return ListenerRegistrationMessages.CloseDataChangeListenerRegistration.newBuilder().build();
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CommitTransaction implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransaction.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransaction.class;
@Override
public Object toSerializable() {
public class CommitTransactionReply implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
@Override
public Object toSerializable() {
public class CreateTransaction implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
+ public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
private final String transactionId;
private final int transactionType;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
public class CreateTransactionChain implements SerializableMessage{
- public static Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChain.class;
+ public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChain.class;
@Override
public Object toSerializable() {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+public class DataExists implements SerializableMessage{
+
+ public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExists.class;
+
+ private final YangInstanceIdentifier path;
+
+ public DataExists(YangInstanceIdentifier path) {
+ this.path = path;
+ }
+
+ public YangInstanceIdentifier getPath() {
+ return path;
+ }
+
+ @Override public Object toSerializable() {
+ return ShardTransactionMessages.DataExists.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ InstanceIdentifierUtils.toSerializable(path)).build();
+ }
+
+ public static DataExists fromSerializable(Object serializable){
+ ShardTransactionMessages.DataExists o = (ShardTransactionMessages.DataExists) serializable;
+ return new DataExists(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+
+public class DataExistsReply implements SerializableMessage{
+
+
+ public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExistsReply.class;
+
+ private final boolean exists;
+
+ public DataExistsReply(boolean exists) {
+ this.exists = exists;
+ }
+
+ public boolean exists() {
+ return exists;
+ }
+
+ @Override public Object toSerializable() {
+ return ShardTransactionMessages.DataExistsReply.newBuilder()
+ .setExists(exists).build();
+ }
+
+ public static DataExistsReply fromSerializable(Object serializable){
+ ShardTransactionMessages.DataExistsReply o = (ShardTransactionMessages.DataExistsReply) serializable;
+ return new DataExistsReply(o.getExists());
+ }
+
+}
package org.opendaylight.controller.cluster.datastore.messages;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+
public class PeerAddressResolved {
- private final String peerId;
+ private final ShardIdentifier peerId;
private final String peerAddress;
- public PeerAddressResolved(String peerId, String peerAddress) {
+ public PeerAddressResolved(ShardIdentifier peerId, String peerAddress) {
this.peerId = peerId;
this.peerAddress = peerAddress;
}
- public String getPeerId() {
+ public ShardIdentifier getPeerId() {
return peerId;
}
public class PreCommitTransaction implements SerializableMessage{
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
@Override
public Object toSerializable() {
public class PreCommitTransactionReply implements SerializableMessage{
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
+ public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
@Override
public Object toSerializable() {
.getLogger(InstanceIdentifierUtils.class);
public static String getParentPath(String currentElementPath) {
- String parentPath = "";
+
+ StringBuilder parentPath = new StringBuilder();
if (currentElementPath != null) {
String[] parentPaths = currentElementPath.split("/");
if (parentPaths.length > 2) {
for (int i = 0; i < parentPaths.length - 1; i++) {
if (parentPaths[i].length() > 0) {
- parentPath += "/" + parentPaths[i];
+ parentPath.append( "/");
+ parentPath.append( parentPaths[i]);
}
}
}
}
- return parentPath;
+ return parentPath.toString();
}
@Deprecated
odl-cluster-data {
akka {
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
cluster {
roles = [
"member-1"
netty.tcp {
hostname = "127.0.0.1"
port = 2550
- maximum-frame-size = 2097152
+ maximum-frame-size = 419430400
send-buffer-size = 52428800
receive-buffer-size = 52428800
}
odl-cluster-rpc {
akka {
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
actor {
provider = "akka.cluster.ClusterActorRefProvider"
import akka.actor.Props;
import akka.event.Logging;
import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
import java.util.Collections;
import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
public class BasicIntegrationTest extends AbstractActorTest {
new JavaTestKit(getSystem()) {{
- final Props props = Shard.props("config", Collections.EMPTY_MAP);
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
final ActorRef shard = getSystem().actorOf(props);
new Within(duration("5 seconds")) {
}
}.get(); // this extracts the received message
- Assert.assertNotNull(transactionChain);
+ assertNotNull(transactionChain);
System.out.println("Successfully created transaction chain");
}
}.get(); // this extracts the received message
- Assert.assertNotNull(transaction);
+ assertNotNull(transaction);
System.out.println("Successfully created transaction");
}
}.get(); // this extracts the received message
- Assert.assertTrue(writeDone);
+ assertTrue(writeDone);
System.out.println("Successfully wrote data");
}
}.get(); // this extracts the received message
- Assert.assertNotNull(cohort);
+ assertNotNull(cohort);
System.out.println("Successfully readied the transaction");
}
}.get(); // this extracts the received message
- Assert.assertTrue(preCommitDone);
+ assertTrue(preCommitDone);
System.out.println("Successfully pre-committed the transaction");
import java.io.File;
import java.util.List;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class ConfigurationImplTest {
assertTrue(memberShardNames.contains("people-1"));
assertTrue(memberShardNames.contains("cars-1"));
+
+ // Retrieve once again to hit cache
+
+ memberShardNames =
+ configuration.getMemberShardNames("member-1");
+
+ assertTrue(memberShardNames.contains("people-1"));
+ assertTrue(memberShardNames.contains("cars-1"));
+
+ }
+
+ @Test
+ public void testGetMembersFromShardName(){
+ List<String> members =
+ configuration.getMembersFromShardName("default");
+
+ assertEquals(3, members.size());
+
+ assertTrue(members.contains("member-1"));
+ assertTrue(members.contains("member-2"));
+ assertTrue(members.contains("member-3"));
+
+ assertFalse(members.contains("member-26"));
+
+ // Retrieve once again to hit cache
+ members =
+ configuration.getMembersFromShardName("default");
+
+ assertEquals(3, members.size());
+
+ assertTrue(members.contains("member-1"));
+ assertTrue(members.contains("member-2"));
+ assertTrue(members.contains("member-3"));
+
+ assertFalse(members.contains("member-26"));
+
+
+ // Try to find a shard which is not present
+
+ members =
+ configuration.getMembersFromShardName("foobar");
+
+ assertEquals(0, members.size());
}
@Test
public class DataChangeListenerRegistrationTest extends AbstractActorTest {
private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
- private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor);
+ private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
+ MoreExecutors.sameThreadExecutor());
static {
store.onGlobalContextUpdated(TestModel.createTestContext());
final ActorRef subject = getSystem().actorOf(props, "testCloseListenerRegistration");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new CloseDataChangeListenerRegistration().toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(CloseDataChangeListenerRegistrationReply.SERIALIZABLE_CLASS)) {
return "match";
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
import akka.actor.Props;
-import junit.framework.Assert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
public class DistributedDataStoreTest extends AbstractActorTest{
private DistributedDataStore distributedDataStore;
private MockActorContext mockActorContext;
private ActorRef doNothingActorRef;
- @org.junit.Before
+ @Before
public void setUp() throws Exception {
ShardStrategyFactory.setConfiguration(new MockConfiguration());
final Props props = Props.create(DoNothingActor.class);
doNothingActorRef = getSystem().actorOf(props);
mockActorContext = new MockActorContext(getSystem(), doNothingActorRef);
- distributedDataStore = new DistributedDataStore(mockActorContext, "config");
+ distributedDataStore = new DistributedDataStore(mockActorContext);
distributedDataStore.onGlobalContextUpdated(
TestModel.createTestContext());
.build());
}
- @org.junit.After
+ @After
public void tearDown() throws Exception {
}
- @org.junit.Test
+ @Test
+ public void testConstructor(){
+ ActorSystem actorSystem = mock(ActorSystem.class);
+
+ new DistributedDataStore(actorSystem, "config",
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ verify(actorSystem).actorOf(any(Props.class), eq("shardmanager-config"));
+ }
+
+ @Test
public void testRegisterChangeListenerWhenShardIsNotLocal() throws Exception {
ListenerRegistration registration =
}, AsyncDataBroker.DataChangeScope.BASE);
// Since we do not expect the shard to be local registration will return a NoOpRegistration
- Assert.assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
+ assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
- Assert.assertNotNull(registration);
+ assertNotNull(registration);
}
- @org.junit.Test
+ @Test
public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
mockActorContext.setExecuteLocalShardOperationResponse(new RegisterChangeListenerReply(doNothingActorRef.path()));
}
}, AsyncDataBroker.DataChangeScope.BASE);
- Assert.assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
+ assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
- Assert.assertNotNull(registration);
+ assertNotNull(registration);
}
- @org.junit.Test
+ @Test
public void testCreateTransactionChain() throws Exception {
final DOMStoreTransactionChain transactionChain = distributedDataStore.createTransactionChain();
- Assert.assertNotNull(transactionChain);
+ assertNotNull(transactionChain);
}
- @org.junit.Test
+ @Test
public void testNewReadOnlyTransaction() throws Exception {
final DOMStoreReadTransaction transaction = distributedDataStore.newReadOnlyTransaction();
- Assert.assertNotNull(transaction);
+ assertNotNull(transaction);
}
- @org.junit.Test
+ @Test
public void testNewWriteOnlyTransaction() throws Exception {
final DOMStoreWriteTransaction transaction = distributedDataStore.newWriteOnlyTransaction();
- Assert.assertNotNull(transaction);
+ assertNotNull(transaction);
}
- @org.junit.Test
+ @Test
public void testNewReadWriteTransaction() throws Exception {
final DOMStoreReadWriteTransaction transaction = distributedDataStore.newReadWriteTransaction();
- Assert.assertNotNull(transaction);
+ assertNotNull(transaction);
}
}
import akka.testkit.JavaTestKit;
import junit.framework.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
@Test
public void testOnReceiveCreateTransactionChain() throws Exception {
new JavaTestKit(getSystem()) {{
- final Props props = Shard.props("config", Collections.EMPTY_MAP);
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
final ActorRef subject =
getSystem().actorOf(props, "testCreateTransactionChain");
@Test
public void testOnReceiveRegisterListener() throws Exception {
new JavaTestKit(getSystem()) {{
- final Props props = Shard.props("config", Collections.EMPTY_MAP);
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
final ActorRef subject =
getSystem().actorOf(props, "testRegisterChangeListener");
@Test
public void testCreateTransaction(){
new JavaTestKit(getSystem()) {{
- final Props props = Shard.props("config", Collections.EMPTY_MAP);
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
final ActorRef subject =
getSystem().actorOf(props, "testCreateTransaction");
@Test
public void testPeerAddressResolved(){
new JavaTestKit(getSystem()) {{
- Map<String, String> peerAddresses = new HashMap<>();
- peerAddresses.put("member-2", null);
- final Props props = Shard.props("config", peerAddresses);
+ Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+
+ final ShardIdentifier identifier =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ peerAddresses.put(identifier, null);
+ final Props props = Shard.props(identifier, peerAddresses);
final ActorRef subject =
getSystem().actorOf(props, "testPeerAddressResolved");
protected void run() {
subject.tell(
- new PeerAddressResolved("member-2", "akka://foobar"),
+ new PeerAddressResolved(identifier, "akka://foobar"),
getRef());
expectNoMsg();
private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
- private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor);
+ private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
+ MoreExecutors.sameThreadExecutor());
static {
store.onGlobalContextUpdated(TestModel.createTestContext());
final ActorRef subject = getSystem().actorOf(props, "testCreateTransaction");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
return CreateTransactionReply.fromSerializable(in).getTransactionPath();
final ActorRef subject = getSystem().actorOf(props, "testCloseTransactionChain");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new CloseTransactionChain().toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(CloseTransactionChainReply.SERIALIZABLE_CLASS)) {
return "match";
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+
+import java.util.Collections;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Covers negative test cases
+ * @author Basheeruddin Ahmed <syedbahm@cisco.com>
+ */
+public class ShardTransactionFailureTest extends AbstractActorTest {
+ private static ListeningExecutorService storeExecutor =
+ MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
+
+ private static final InMemoryDOMDataStore store =
+ new InMemoryDOMDataStore("OPER", storeExecutor,
+ MoreExecutors.sameThreadExecutor());
+
+ private static final SchemaContext testSchemaContext =
+ TestModel.createTestContext();
+
+ private static final ShardIdentifier SHARD_IDENTIFIER =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ static {
+ store.onGlobalContextUpdated(testSchemaContext);
+ }
+
+
+ @Test(expected = ReadFailedException.class)
+ public void testNegativeReadWithReadOnlyTransactionClosed()
+ throws Throwable {
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeReadWithReadOnlyTransactionClosed");
+
+ ShardTransactionMessages.ReadData readData =
+ ShardTransactionMessages.ReadData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()
+ ).build();
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ((ShardReadTransaction) subject.underlyingActor())
+ .forUnitTestOnlyExplicitTransactionClose();
+
+ future = akka.pattern.Patterns.ask(subject, readData, 3000);
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+
+ @Test(expected = ReadFailedException.class)
+ public void testNegativeReadWithReadWriteOnlyTransactionClosed()
+ throws Throwable {
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeReadWithReadWriteOnlyTransactionClosed");
+
+ ShardTransactionMessages.ReadData readData =
+ ShardTransactionMessages.ReadData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()
+ ).build();
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ((ShardReadWriteTransaction) subject.underlyingActor())
+ .forUnitTestOnlyExplicitTransactionClose();
+
+ future = akka.pattern.Patterns.ask(subject, readData, 3000);
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+ @Test(expected = ReadFailedException.class)
+ public void testNegativeExistsWithReadWriteOnlyTransactionClosed()
+ throws Throwable {
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeExistsWithReadWriteOnlyTransactionClosed");
+
+ ShardTransactionMessages.DataExists dataExists =
+ ShardTransactionMessages.DataExists.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()
+ ).build();
+
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, dataExists, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ((ShardReadWriteTransaction) subject.underlyingActor())
+ .forUnitTestOnlyExplicitTransactionClose();
+
+ future = akka.pattern.Patterns.ask(subject, dataExists, 3000);
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testNegativeWriteWithTransactionReady() throws Exception {
+
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeWriteWithTransactionReady");
+
+ ShardTransactionMessages.ReadyTransaction readyTransaction =
+ ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ShardTransactionMessages.WriteData writeData =
+ ShardTransactionMessages.WriteData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()).setNormalizedNode(
+ NormalizedNodeMessages.Node.newBuilder().build()
+
+ ).build();
+
+ future = akka.pattern.Patterns.ask(subject, writeData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+
+ @Test(expected = IllegalStateException.class)
+ public void testNegativeReadWriteWithTransactionReady() throws Exception {
+
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeReadWriteWithTransactionReady");
+
+ ShardTransactionMessages.ReadyTransaction readyTransaction =
+ ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ShardTransactionMessages.WriteData writeData =
+ ShardTransactionMessages.WriteData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()).setNormalizedNode(
+ NormalizedNodeMessages.Node.newBuilder().build()
+
+ ).build();
+
+ future = akka.pattern.Patterns.ask(subject, writeData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testNegativeMergeTransactionReady() throws Exception {
+
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props, "testNegativeMergeTransactionReady");
+
+ ShardTransactionMessages.ReadyTransaction readyTransaction =
+ ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ShardTransactionMessages.MergeData mergeData =
+ ShardTransactionMessages.MergeData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()).setNormalizedNode(
+ NormalizedNodeMessages.Node.newBuilder().build()
+
+ ).build();
+
+ future = akka.pattern.Patterns.ask(subject, mergeData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+
+ @Test(expected = IllegalStateException.class)
+ public void testNegativeDeleteDataWhenTransactionReady() throws Exception {
+
+
+ final ActorRef shard =
+ getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ TestModel.createTestContext());
+
+ final TestActorRef<ShardTransaction> subject = TestActorRef
+ .create(getSystem(), props,
+ "testNegativeDeleteDataWhenTransactionReady");
+
+ ShardTransactionMessages.ReadyTransaction readyTransaction =
+ ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+ Future<Object> future =
+ akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+ ShardTransactionMessages.DeleteData deleteData =
+ ShardTransactionMessages.DeleteData.newBuilder()
+ .setInstanceIdentifierPathArguments(
+ NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+ .build()).build();
+
+ future = akka.pattern.Patterns.ask(subject, deleteData, 3000);
+ assertTrue(future.isCompleted());
+ Await.result(future, Duration.Zero());
+
+
+ }
+
+
+}
import com.google.common.util.concurrent.MoreExecutors;
import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
private static final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", storeExecutor);
+ new InMemoryDOMDataStore("OPER", storeExecutor, MoreExecutors.sameThreadExecutor());
private static final SchemaContext testSchemaContext = TestModel.createTestContext();
+ private static final ShardIdentifier SHARD_IDENTIFIER =
+ ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+
static {
store.onGlobalContextUpdated(testSchemaContext);
}
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
final ActorRef subject = getSystem().actorOf(props, "testReadData");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
if (ReadDataReply.fromSerializable(testSchemaContext,YangInstanceIdentifier.builder().build(), in)
@Test
public void testOnReceiveReadDataWhenDataNotFound() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props( store.newReadOnlyTransaction(), shard, testSchemaContext);
final ActorRef subject = getSystem().actorOf(props, "testReadDataWhenDataNotFound");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
if (ReadDataReply.fromSerializable(testSchemaContext,TestModel.TEST_PATH, in)
}};
}
+ @Test
+ public void testOnReceiveDataExistsPositive() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
+ final ActorRef subject = getSystem().actorOf(props, "testDataExistsPositive");
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+
+ subject.tell(
+ new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
+ getRef());
+
+ final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+ // do not put code outside this method, will run afterwards
+ @Override
+ protected String match(Object in) {
+ if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+ if (DataExistsReply.fromSerializable(in)
+ .exists()) {
+ return "match";
+ }
+ return null;
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertEquals("match", out);
+
+ expectNoMsg();
+ }
+
+
+ };
+ }};
+ }
+
+ @Test
+ public void testOnReceiveDataExistsNegative() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+ final Props props =
+ ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
+ final ActorRef subject = getSystem().actorOf(props, "testDataExistsNegative");
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+
+ subject.tell(
+ new DataExists(TestModel.TEST_PATH).toSerializable(),
+ getRef());
+
+ final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+ // do not put code outside this method, will run afterwards
+ @Override
+ protected String match(Object in) {
+ if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+ if (!DataExistsReply.fromSerializable(in)
+ .exists()) {
+ return "match";
+ }
+ return null;
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ assertEquals("match", out);
+
+ expectNoMsg();
+ }
+
+
+ };
+ }};
+ }
+
private void assertModification(final ActorRef subject,
final Class<? extends Modification> modificationType) {
new JavaTestKit(getSystem()) {{
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject
.tell(new ShardTransaction.GetCompositedModification(),
final CompositeModification compositeModification =
new ExpectMsg<CompositeModification>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected CompositeModification match(Object in) {
if (in instanceof ShardTransaction.GetCompositeModificationReply) {
return ((ShardTransaction.GetCompositeModificationReply) in)
@Test
public void testOnReceiveWriteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props(store.newWriteOnlyTransaction(), shard, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testWriteData");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new WriteData(TestModel.TEST_PATH,
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(WriteDataReply.SERIALIZABLE_CLASS)) {
return "match";
@Test
public void testOnReceiveMergeData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props(store.newReadWriteTransaction(), shard, testSchemaContext);
final ActorRef subject =
getSystem().actorOf(props, "testMergeData");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new MergeData(TestModel.TEST_PATH,
final String out = new ExpectMsg<String>(duration("500 milliseconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(MergeDataReply.SERIALIZABLE_CLASS)) {
return "match";
@Test
public void testOnReceiveDeleteData() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props( store.newWriteOnlyTransaction(), shard, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testDeleteData");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(DeleteDataReply.SERIALIZABLE_CLASS)) {
return "match";
@Test
public void testOnReceiveReadyTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props( store.newReadWriteTransaction(), shard, TestModel.createTestContext());
final ActorRef subject =
getSystem().actorOf(props, "testReadyTransaction");
new Within(duration("1 seconds")) {
+ @Override
protected void run() {
subject.tell(new ReadyTransaction().toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
return "match";
@Test
public void testOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props(store.newReadWriteTransaction(), shard, TestModel.createTestContext());
final ActorRef subject =
watch(subject);
new Within(duration("2 seconds")) {
+ @Override
protected void run() {
subject.tell(new CloseTransaction().toSerializable(), getRef());
final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in.getClass().equals(CloseTransactionReply.SERIALIZABLE_CLASS)) {
return "match";
final String termination = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
// do not put code outside this method, will run afterwards
+ @Override
protected String match(Object in) {
if (in instanceof Terminated) {
return "match";
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
try {
- final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+ final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
final Props props =
ShardTransaction.props(store.newReadOnlyTransaction(), shard, TestModel.createTestContext());
final TestActorRef subject = TestActorRef.apply(props,getSystem());
} catch (Exception cs) {
- assertEquals(cs.getClass().getSimpleName(), Exception.class.getSimpleName());
- assertTrue(cs.getMessage().startsWith("ShardTransaction:handleRecieve received an unknown message"));
+ assertEquals(UnknownMessageException.class.getSimpleName(), cs.getClass().getSimpleName());
+ assertTrue(cs.getMessage(), cs.getMessage().startsWith("Unknown message received "));
}
}
}
import akka.actor.ActorRef;
import akka.actor.Props;
-
import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
-
import junit.framework.Assert;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
+ actorContext.setExecuteRemoteOperationResponse(
+ new ReadDataReply(TestModel.createTestContext(), null)
+ .toSerializable());
+
ListenableFuture<Optional<NormalizedNode<?, ?>>> read =
transactionProxy.read(TestModel.TEST_PATH);
}
@Test
- public void testReadWhenANullIsReturned() throws Exception {
+ public void testExists() throws Exception {
final Props props = Props.create(DoNothingActor.class);
final ActorRef actorRef = getSystem().actorOf(props);
actorContext.setExecuteShardOperationResponse(createTransactionReply(actorRef));
actorContext.setExecuteRemoteOperationResponse("message");
+
TransactionProxy transactionProxy =
new TransactionProxy(actorContext,
TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
- ListenableFuture<Optional<NormalizedNode<?, ?>>> read =
- transactionProxy.read(TestModel.TEST_PATH);
+ actorContext.setExecuteRemoteOperationResponse(new DataExistsReply(false).toSerializable());
- Optional<NormalizedNode<?, ?>> normalizedNodeOptional = read.get();
+ CheckedFuture<Boolean, ReadFailedException> exists =
+ transactionProxy.exists(TestModel.TEST_PATH);
- Assert.assertFalse(normalizedNodeOptional.isPresent());
+ Assert.assertFalse(exists.checkedGet());
- actorContext.setExecuteRemoteOperationResponse(new ReadDataReply(
- TestModel.createTestContext(), null).toSerializable());
+ actorContext.setExecuteRemoteOperationResponse(new DataExistsReply(true).toSerializable());
- read = transactionProxy.read(TestModel.TEST_PATH);
+ exists = transactionProxy.exists(TestModel.TEST_PATH);
- normalizedNodeOptional = read.get();
+ Assert.assertTrue(exists.checkedGet());
- Assert.assertFalse(normalizedNodeOptional.isPresent());
+ actorContext.setExecuteRemoteOperationResponse("bad message");
+
+ exists = transactionProxy.exists(TestModel.TEST_PATH);
+
+ try {
+ exists.checkedGet();
+ fail();
+ } catch(ReadFailedException e){
+ }
+
+ }
+
+ @Test(expected = ReadFailedException.class)
+ public void testReadWhenAnInvalidMessageIsSentInReply() throws Exception {
+ final Props props = Props.create(DoNothingActor.class);
+ final ActorRef actorRef = getSystem().actorOf(props);
+
+ final MockActorContext actorContext = new MockActorContext(this.getSystem());
+ actorContext.setExecuteLocalOperationResponse(createPrimaryFound(actorRef));
+ actorContext.setExecuteShardOperationResponse(createTransactionReply(actorRef));
+ actorContext.setExecuteRemoteOperationResponse("message");
+
+ TransactionProxy transactionProxy =
+ new TransactionProxy(actorContext,
+ TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
+
+
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+ read = transactionProxy.read(TestModel.TEST_PATH);
+
+ read.checkedGet();
}
@Test
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class ShardIdentifierTest {
+
+ @Test
+ public void testBasic(){
+ ShardIdentifier id = ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config").build();
+
+ assertEquals("member-1-shard-inventory-config", id.toString());
+ }
+
+
+}
--- /dev/null
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+ public class ShardManagerIdentifierTest {
+
+ @Test
+ public void testIdentifier(){
+ assertEquals("shardmanager-operational", ShardManagerIdentifier.builder().type("operational").build().toString());
+ }
+
+}
import javax.management.MBeanServer;
import javax.management.ObjectName;
+import java.text.SimpleDateFormat;
+import java.util.Date;
public class ShardStatsTest {
- private MBeanServer mbeanServer;
- private ShardStats shardStats;
- private ObjectName testMBeanName;
+ private MBeanServer mbeanServer;
+ private ShardStats shardStats;
+ private ObjectName testMBeanName;
- @Before
- public void setUp() throws Exception {
+ @Before
+ public void setUp() throws Exception {
- shardStats = new ShardStats("shard-1");
- shardStats.registerMBean();
- mbeanServer= shardStats.getMBeanServer();
- String objectName = AbstractBaseMBean.BASE_JMX_PREFIX + "type="+shardStats.getMBeanType()+",Category="+
- shardStats.getMBeanCategory() + ",name="+
- shardStats.getMBeanName();
- testMBeanName = new ObjectName(objectName);
- }
+ shardStats = new ShardStats("shard-1");
+ shardStats.registerMBean();
+ mbeanServer = shardStats.getMBeanServer();
+ String objectName =
+ AbstractBaseMBean.BASE_JMX_PREFIX + "type=" + shardStats
+ .getMBeanType() + ",Category=" +
+ shardStats.getMBeanCategory() + ",name=" +
+ shardStats.getMBeanName();
+ testMBeanName = new ObjectName(objectName);
+ }
- @After
- public void tearDown() throws Exception {
- shardStats.unregisterMBean();
- }
+ @After
+ public void tearDown() throws Exception {
+ shardStats.unregisterMBean();
+ }
- @Test
- public void testGetShardName() throws Exception {
+ @Test
+ public void testGetShardName() throws Exception {
- Object attribute = mbeanServer.getAttribute(testMBeanName,"ShardName");
- Assert.assertEquals((String) attribute, "shard-1");
+ Object attribute = mbeanServer.getAttribute(testMBeanName, "ShardName");
+ Assert.assertEquals((String) attribute, "shard-1");
- }
+ }
- @Test
- public void testGetCommittedTransactionsCount() throws Exception {
- //let us increment some transactions count and then check
- shardStats.incrementCommittedTransactionCount();
- shardStats.incrementCommittedTransactionCount();
- shardStats.incrementCommittedTransactionCount();
+ @Test
+ public void testGetCommittedTransactionsCount() throws Exception {
+ //let us increment some transactions count and then check
+ shardStats.incrementCommittedTransactionCount();
+ shardStats.incrementCommittedTransactionCount();
+ shardStats.incrementCommittedTransactionCount();
- //now let us get from MBeanServer what is the transaction count.
- Object attribute = mbeanServer.getAttribute(testMBeanName,"CommittedTransactionsCount");
- Assert.assertEquals((Long) attribute, (Long)3L);
+ //now let us get from MBeanServer what is the transaction count.
+ Object attribute = mbeanServer.getAttribute(testMBeanName,
+ "CommittedTransactionsCount");
+ Assert.assertEquals((Long) attribute, (Long) 3L);
- }
-}
\ No newline at end of file
+ }
+
+ @Test
+ public void testGetLastCommittedTransactionTime() throws Exception {
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ Assert.assertEquals(shardStats.getLastCommittedTransactionTime(),
+ sdf.format(new Date(0L)));
+ long millis = System.currentTimeMillis();
+ shardStats.setLastCommittedTransactionTime(new Date(millis));
+
+ //now let us get from MBeanServer what is the transaction count.
+ Object attribute = mbeanServer.getAttribute(testMBeanName,
+ "LastCommittedTransactionTime");
+ Assert.assertEquals((String) attribute, sdf.format(new Date(millis)));
+ Assert.assertNotEquals((String) attribute,
+ sdf.format(new Date(millis - 1)));
+
+ }
+
+ @Test
+ public void testGetFailedTransactionsCount() throws Exception {
+ //let us increment some transactions count and then check
+ shardStats.incrementFailedTransactionsCount();
+ shardStats.incrementFailedTransactionsCount();
+
+
+ //now let us get from MBeanServer what is the transaction count.
+ Object attribute =
+ mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount");
+ Assert.assertEquals((Long) attribute, (Long) 2L);
+
+
+
+ }
+}
@Before
public void setUp(){
- store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor());
+ store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
store.onGlobalContextUpdated(TestModel.createTestContext());
}
akka {
- loggers = [akka.testkit.TestEventListener]
+ loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
+
actor {
serializers {
java = "akka.serialization.JavaSerializer"
*/
CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
LogicalDatastoreType store, YangInstanceIdentifier path);
+
+ /**
+ * Checks if data is available in the logical data store located at provided path
+ *
+ * @param path
+ * Path which uniquely identifies subtree which client want to
+ * check existence of
+ * @return a CheckFuture containing the result of the check.
+ * <ul>
+ * <li>If the data at the supplied path exists, the Future returns a Boolean
+ * whose value is true, false otherwise</li>
+ * <li>If checking for the data fails, the Future will fail with a
+ * {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
+ * </ul>
+ */
+ CheckedFuture<Boolean, ReadFailedException> exists(
+ LogicalDatastoreType store, YangInstanceIdentifier path);
+
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.api;
+
+/**
+ * Exception reported when no RPC implementation is found in the system.
+ */
+public class RpcImplementationUnavailableException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public RpcImplementationUnavailableException(final String message) {
+ super(message);
+ }
+
+ public RpcImplementationUnavailableException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+}
*/
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-import java.util.concurrent.Executors;
-
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.util.PropertyUtils;
import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
/**
*
public final class DomInmemoryDataBrokerModule extends
org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractDomInmemoryDataBrokerModule {
+ private static final String FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.datastore-future-callback-queue.size";
+ private static final int DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE = 1000;
+
+ private static final String FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE_PROP =
+ "mdsal.datastore-future-callback-pool.size";
+ private static final int DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE = 20;
+ private static final String COMMIT_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.datastore-commit-queue.size";
+ private static final int DEFAULT_COMMIT_EXECUTOR_MAX_QUEUE_SIZE = 5000;
+
public DomInmemoryDataBrokerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
@Override
public java.lang.AutoCloseable createInstance() {
- ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
//Initializing Operational DOM DataStore defaulting to InMemoryDOMDataStore if one is not configured
DOMStore operStore = getOperationalDataStoreDependency();
if(operStore == null){
//we will default to InMemoryDOMDataStore creation
- operStore = new InMemoryDOMDataStore("DOM-OPER", storeExecutor);
- //here we will register the SchemaContext listener
- getSchemaServiceDependency().registerSchemaContextListener((InMemoryDOMDataStore)operStore);
+ operStore = InMemoryDOMDataStoreFactory.create("DOM-OPER", getSchemaServiceDependency());
}
DOMStore configStore = getConfigDataStoreDependency();
if(configStore == null){
//we will default to InMemoryDOMDataStore creation
- configStore = new InMemoryDOMDataStore("DOM-CFG", storeExecutor);
- //here we will register the SchemaContext listener
- getSchemaServiceDependency().registerSchemaContextListener((InMemoryDOMDataStore)configStore);
+ configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
.<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
.put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+ /*
+ * We use a single-threaded executor for commits with a bounded queue capacity. If the
+ * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
+ * fail. This is done to relieve back pressure. This should be an extreme scenario - either
+ * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
+ * continuously hammering commits too fast or the controller is just over-capacity for the
+ * system it's running on.
+ */
+ ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+ PropertyUtils.getIntSystemProperty(
+ COMMIT_EXECUTOR_MAX_QUEUE_SIZE_PROP,
+ DEFAULT_COMMIT_EXECUTOR_MAX_QUEUE_SIZE), "WriteTxCommit");
+
+ /*
+ * We use an executor for commit ListenableFuture callbacks that favors reusing available
+ * threads over creating new threads at the expense of execution time. The assumption is
+ * that most ListenableFuture callbacks won't execute a lot of business logic where we want
+ * it to run quicker - many callbacks will likely just handle error conditions and do
+ * nothing on success. The executor queue capacity is bounded and, if the capacity is
+ * reached, subsequent submitted tasks will block the caller.
+ */
+ Executor listenableFutureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(
+ PropertyUtils.getIntSystemProperty(
+ FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE_PROP,
+ DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE),
+ PropertyUtils.getIntSystemProperty(
+ FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE_PROP,
+ DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE), "CommitFutures");
+
DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
- new DeadlockDetectingListeningExecutorService(Executors.newSingleThreadExecutor(),
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION));
+ new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+ listenableFutureExecutor));
return newDataBroker;
}
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
import javax.annotation.concurrent.GuardedBy;
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
Preconditions.checkArgument(listener != null, "Listener must not be null");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
- ListenableFuture<Void> commitFuture = executor.submit(new CommitCoordinationTask(
- transaction, cohorts, listener));
+
+ ListenableFuture<Void> commitFuture = null;
+ try {
+ commitFuture = executor.submit(new CommitCoordinationTask(transaction, cohorts, listener));
+ } catch(RejectedExecutionException e) {
+ LOG.error("The commit executor's queue is full - submit task was rejected. \n" +
+ executor, e);
+ return Futures.immediateFailedCheckedFuture(
+ new TransactionCommitFailedException(
+ "Could not submit the commit task - the commit queue capacity has been exceeded.", e));
+ }
+
if (listener.isPresent()) {
Futures.addCallback(commitFuture, new DOMDataCommitErrorInvoker(transaction, listener.get()));
}
return getSubtransaction(store).read(path);
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+
@Override
public void close() {
closeSubtransactions();
final LogicalDatastoreType store, final YangInstanceIdentifier path) {
return getSubtransaction(store).read(path);
}
-}
\ No newline at end of file
+
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+ return getSubtransaction(store).exists(path);
+ }
+}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.JdkFutureAdapters;
import com.google.common.util.concurrent.ListenableFuture;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.annotation.Nullable;
-
import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import javax.annotation.Nullable;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
public class BackwardsCompatibleMountPoint implements MountProvisionInstance, SchemaContextProvider, SchemaService {
private final DataProviderService dataReader;
final Optional<NormalizedNode<?, ?>> normalizedNodeOptional = Optional.<NormalizedNode<?, ?>>fromNullable(normalized.getValue());
return Futures.immediateCheckedFuture(normalizedNodeOptional);
}
+
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+
+ try {
+ return Futures.immediateCheckedFuture(read(store, path).get().isPresent());
+ } catch (InterruptedException | ExecutionException e) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+ }
+ }
}
@VisibleForTesting
return new BackwardsCompatibleReadTransaction(dataReader, dataNormalizer).read(store, path);
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+
+ try {
+ return Futures.immediateCheckedFuture(read(store, path).get().isPresent());
+ } catch (InterruptedException | ExecutionException e) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+ }
+ }
+
@Override
public boolean cancel() {
return delegateWriteTx.cancel();
package org.opendaylight.controller.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.ListenableFuture;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcResult;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
class RoutedRpcSelector implements RpcImplementation, AutoCloseable, Identifiable<RpcRoutingContext> {
}
if (potential == null) {
return router.invokeRpc(rpc, (YangInstanceIdentifier) route, input);
+ } else {
+ return potential.invokeRpc(rpc, input);
}
- checkState(potential != null, "No implementation is available for rpc:%s path:%s", rpc, route);
- return potential.invokeRpc(rpc, input);
}
public void addPath(final QName context, final YangInstanceIdentifier path, final RoutedRpcRegImpl routedRpcRegImpl) {
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.opendaylight.controller.sal.core.api.Broker.RpcRegistration;
import org.opendaylight.controller.sal.core.api.RoutedRpcDefaultImplementation;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.controller.sal.core.api.RpcImplementationUnavailableException;
import org.opendaylight.controller.sal.core.api.RpcRegistrationListener;
import org.opendaylight.controller.sal.core.api.RpcRoutingContext;
import org.opendaylight.controller.sal.dom.broker.spi.RpcRouter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.ListenableFuture;
-
+/**
+ * RPC broker responsible for routing requests to remote systems.
+ */
public class SchemaAwareRpcBroker implements RpcRouter, Identifiable<String>, RoutedRpcDefaultImplementation {
private static final Logger LOG = LoggerFactory.getLogger(SchemaAwareRpcBroker.class);
@Override
public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(final QName rpc, final YangInstanceIdentifier route, final CompositeNode input) {
- checkState(defaultDelegate != null, "No implementation is available for rpc:%s path:%s", rpc, route);
- return defaultDelegate.invokeRpc(rpc, route, input);
+ if (defaultDelegate == null) {
+ return Futures.immediateFailedCheckedFuture(new RpcImplementationUnavailableException("No RPC implementation found"));
+ }
+
+ LOG.debug("Forwarding RPC {} path {} to delegate {}", rpc, route);
+ return defaultDelegate.invokeRpc(rpc, route, input);
}
void remove(final GlobalRpcRegistration registration) {
@Before
public void setupStore() {
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.Mockito;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
private SchemaContext schemaContext;
private DOMDataBrokerImpl domBroker;
private ListeningExecutorService executor;
+ private ExecutorService futureExecutor;
+ private CommitExecutorService commitExecutor;
@Before
public void setupStore() {
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
.put(OPERATIONAL, operStore) //
.build();
- executor = new DeadlockDetectingListeningExecutorService(Executors.newSingleThreadExecutor(),
- TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION);
+ commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
+ futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
+ executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
domBroker = new DOMDataBrokerImpl(stores, executor);
}
if( executor != null ) {
executor.shutdownNow();
}
+
+ if(futureExecutor != null) {
+ futureExecutor.shutdownNow();
+ }
}
@Test(timeout=10000)
assertTrue(afterCommitRead.isPresent());
}
+ @Test(expected=TransactionCommitFailedException.class)
+ public void testRejectedCommit() throws Exception {
+
+ commitExecutor.delegate = Mockito.mock( ExecutorService.class );
+ Mockito.doThrow( new RejectedExecutionException( "mock" ) )
+ .when( commitExecutor.delegate ).execute( Mockito.any( Runnable.class ) );
+ Mockito.doNothing().when( commitExecutor.delegate ).shutdown();
+ Mockito.doReturn( Collections.emptyList() ).when( commitExecutor.delegate ).shutdownNow();
+ Mockito.doReturn( "" ).when( commitExecutor.delegate ).toString();
+ Mockito.doReturn( true ).when( commitExecutor.delegate )
+ .awaitTermination( Mockito.anyLong(), Mockito.any( TimeUnit.class ) );
+
+ DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+ writeTx.put( OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME) );
+
+ writeTx.submit().checkedGet( 5, TimeUnit.SECONDS );
+ }
+
/**
* Tests a simple DataChangeListener notification after a write.
*/
assertTrue( "onDataChanged was not called", latch.await( 5, TimeUnit.SECONDS ) );
}
}
+
+ static class CommitExecutorService extends ForwardingExecutorService {
+
+ ExecutorService delegate;
+
+ public CommitExecutorService( ExecutorService delegate ) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ protected ExecutorService delegate() {
+ return delegate;
+ }
+ }
}
@Before
public void setupStore() {
- InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
- InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+ InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
schemaContext = TestModel.createTestContext();
operStore.onGlobalContextUpdated(schemaContext);
* </ul>
*/
CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(YangInstanceIdentifier path);
+
+ /**
+ * Checks if data is available in the logical data store located at provided path
+ *
+ * @param path
+ * Path which uniquely identifies subtree which client want to
+ * check existence of
+ * @return a CheckFuture containing the result of the check.
+ * <ul>
+ * <li>If the data at the supplied path exists, the Future returns a Boolean
+ * whose value is true, false otherwise</li>
+ * <li>If checking for the data fails, the Future will fail with a
+ * {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
+ * </ul>
+ */
+ CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path);
}
package org.opendaylight.controller.config.yang.inmemory_datastore_provider;
-import java.util.concurrent.Executors;
-
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
public class InMemoryConfigDataStoreProviderModule extends org.opendaylight.controller.config.yang.inmemory_datastore_provider.AbstractInMemoryConfigDataStoreProviderModule {
+
public InMemoryConfigDataStoreProviderModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
@Override
public java.lang.AutoCloseable createInstance() {
- InMemoryDOMDataStore ids = new InMemoryDOMDataStore("DOM-CFG", MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()));
- getSchemaServiceDependency().registerSchemaContextListener(ids);
- return ids;
+ return InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
}
}
package org.opendaylight.controller.config.yang.inmemory_datastore_provider;
-import java.util.concurrent.Executors;
-
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
public class InMemoryOperationalDataStoreProviderModule extends org.opendaylight.controller.config.yang.inmemory_datastore_provider.AbstractInMemoryOperationalDataStoreProviderModule {
+
public InMemoryOperationalDataStoreProviderModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
}
@Override
public java.lang.AutoCloseable createInstance() {
- InMemoryDOMDataStore ids = new InMemoryDOMDataStore("DOM-OPER", MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()));
- getOperationalSchemaServiceDependency().registerSchemaContextListener(ids);
- return ids;
+ return InMemoryDOMDataStoreFactory.create("DOM-OPER", getOperationalSchemaServiceDependency());
}
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.slf4j.Logger;
class ChangeListenerNotifyTask implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(ChangeListenerNotifyTask.class);
+
private final Iterable<? extends DataChangeListenerRegistration<?>> listeners;
private final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event;
+ @SuppressWarnings("rawtypes")
+ private final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent>
+ notificationMgr;
+
+ @SuppressWarnings("rawtypes")
public ChangeListenerNotifyTask(final Iterable<? extends DataChangeListenerRegistration<?>> listeners,
- final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event) {
+ final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event,
+ final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent> notificationMgr) {
this.listeners = listeners;
this.event = event;
+ this.notificationMgr = notificationMgr;
}
@Override
public void run() {
for (DataChangeListenerRegistration<?> listener : listeners) {
- try {
- listener.getInstance().onDataChanged(event);
- } catch (Exception e) {
- LOG.error("Unhandled exception during invoking listener {} with event {}", listener, event, e);
- }
+ notificationMgr.submitNotification(listener.getInstance(), event);
}
-
}
@Override
public String toString() {
return "ChangeListenerNotifyTask [listeners=" + listeners + ", event=" + event + "]";
}
-
}
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
+
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.util.ExecutorServiceUtil;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
+import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.slf4j.LoggerFactory;
import javax.annotation.concurrent.GuardedBy;
+
import java.util.Collections;
import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static com.google.common.base.Preconditions.checkState;
public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
TransactionReadyPrototype,AutoCloseable {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
+
+ @SuppressWarnings("rawtypes")
+ private static final QueuedNotificationManager.Invoker<AsyncDataChangeListener,
+ AsyncDataChangeEvent> DCL_NOTIFICATION_MGR_INVOKER =
+ new QueuedNotificationManager.Invoker<AsyncDataChangeListener,
+ AsyncDataChangeEvent>() {
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void invokeListener( AsyncDataChangeListener listener,
+ AsyncDataChangeEvent notification ) {
+ listener.onDataChanged(notification);
+ }
+ };
+
+ private static final String DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.datastore-dcl-notification-queue.size";
+
+ private static final int DEFAULT_DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE = 1000;
+
private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
private final ListenerTree listenerTree = ListenerTree.create();
private final AtomicLong txCounter = new AtomicLong(0);
- private final ListeningExecutorService executor;
+ private final ListeningExecutorService listeningExecutor;
+
+ @SuppressWarnings("rawtypes")
+ private final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent>
+ dataChangeListenerNotificationManager;
+ private final ExecutorService dataChangeListenerExecutor;
private final String name;
- public InMemoryDOMDataStore(final String name, final ListeningExecutorService executor) {
+ public InMemoryDOMDataStore(final String name, final ListeningExecutorService listeningExecutor,
+ final ExecutorService dataChangeListenerExecutor) {
this.name = Preconditions.checkNotNull(name);
- this.executor = Preconditions.checkNotNull(executor);
+ this.listeningExecutor = Preconditions.checkNotNull(listeningExecutor);
+
+ this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
+
+ int maxDCLQueueSize = PropertyUtils.getIntSystemProperty(
+ DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE_PROP, DEFAULT_DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE );
+
+ dataChangeListenerNotificationManager =
+ new QueuedNotificationManager<>(this.dataChangeListenerExecutor,
+ DCL_NOTIFICATION_MGR_INVOKER, maxDCLQueueSize, "DataChangeListenerQueueMgr");
}
@Override
}
@Override
- public void close(){
- executor.shutdownNow();
+ public void close() {
+ ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+ ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
}
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> ListenerRegistration<L> registerChangeListener(
.setAfter(data) //
.addCreated(path, data) //
.build();
- executor.submit(new ChangeListenerNotifyTask(Collections.singletonList(reg), event));
+
+ new ChangeListenerNotifyTask(Collections.singletonList(reg), event,
+ dataChangeListenerNotificationManager).run();
}
}
@Override
public void close() {
- executor.shutdownNow();
-
+ // FIXME: this call doesn't look right here - listeningExecutor is shared and owned
+ // by the outer class.
+ //listeningExecutor.shutdownNow();
}
protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
@Override
public ListenableFuture<Boolean> canCommit() {
- return executor.submit(new Callable<Boolean>() {
+ return listeningExecutor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws TransactionCommitFailedException {
try {
@Override
public ListenableFuture<Void> preCommit() {
- return executor.submit(new Callable<Void>() {
+ return listeningExecutor.submit(new Callable<Void>() {
@Override
public Void call() {
candidate = dataTree.prepare(modification);
- listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree);
+ listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree,
+ dataChangeListenerNotificationManager);
return null;
}
});
for (ChangeListenerNotifyTask task : listenerResolver.call()) {
LOG.trace("Scheduling invocation of listeners: {}", task);
- executor.submit(task);
+ task.run();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import javax.annotation.Nullable;
+
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+ * A factory for creating InMemoryDOMDataStore instances.
+ *
+ * @author Thomas Pantelis
+ */
+public final class InMemoryDOMDataStoreFactory {
+
+ private static final String DCL_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+ "mdsal.datastore-dcl-notification-queue.size";
+ private static final int DEFAULT_DCL_EXECUTOR_MAX_QUEUE_SIZE = 1000;
+
+ private static final String DCL_EXECUTOR_MAX_POOL_SIZE_PROP =
+ "mdsal.datastore-dcl-notification-pool.size";
+ private static final int DEFAULT_DCL_EXECUTOR_MAX_POOL_SIZE = 20;
+
+ private InMemoryDOMDataStoreFactory() {
+ }
+
+ /**
+ * Creates an InMemoryDOMDataStore instance.
+ *
+ * @param name the name of the data store
+ * @param schemaService the SchemaService to which to register the data store.
+ * @return an InMemoryDOMDataStore instance
+ */
+ public static InMemoryDOMDataStore create(final String name,
+ @Nullable final SchemaService schemaService) {
+
+ // For DataChangeListener notifications we use an executor that provides the fastest
+ // task execution time to get higher throughput as DataChangeListeners typically provide
+ // much of the business logic for a data model. If the executor queue size limit is reached,
+ // subsequent submitted notifications will block the calling thread.
+
+ int dclExecutorMaxQueueSize = PropertyUtils.getIntSystemProperty(
+ DCL_EXECUTOR_MAX_QUEUE_SIZE_PROP, DEFAULT_DCL_EXECUTOR_MAX_QUEUE_SIZE);
+ int dclExecutorMaxPoolSize = PropertyUtils.getIntSystemProperty(
+ DCL_EXECUTOR_MAX_POOL_SIZE_PROP, DEFAULT_DCL_EXECUTOR_MAX_POOL_SIZE);
+
+ ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+ dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
+
+ InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+ MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()),
+ dataChangeListenerExecutor);
+
+ if(schemaService != null) {
+ schemaService.registerSchemaContextListener(dataStore);
+ }
+
+ return dataStore;
+ }
+}
import java.util.Set;
import java.util.concurrent.Callable;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Node;
import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
private final DataTreeCandidate candidate;
private final ListenerTree listenerRoot;
- public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
+ @SuppressWarnings("rawtypes")
+ private final NotificationManager<AsyncDataChangeListener, AsyncDataChangeEvent> notificationMgr;
+
+ @SuppressWarnings("rawtypes")
+ public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree,
+ final NotificationManager<AsyncDataChangeListener, AsyncDataChangeEvent> notificationMgr) {
this.candidate = Preconditions.checkNotNull(candidate);
this.listenerRoot = Preconditions.checkNotNull(listenerTree);
+ this.notificationMgr = Preconditions.checkNotNull(notificationMgr);
}
/**
* @param listeners
* @param entries
*/
- private static void addNotificationTask(final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder,
+ private void addNotificationTask(final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder,
final ListenerTree.Node listeners, final Collection<DOMImmutableDataChangeEvent> entries) {
if (!entries.isEmpty()) {
* @param listeners
* @param event
*/
- private static void addNotificationTaskByScope(
+ private void addNotificationTaskByScope(
final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final ListenerTree.Node listeners,
final DOMImmutableDataChangeEvent event) {
DataChangeScope eventScope = event.getScope();
List<DataChangeListenerRegistration<?>> listenerSet = Collections
.<DataChangeListenerRegistration<?>> singletonList(listenerReg);
if (eventScope == DataChangeScope.BASE) {
- taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+ taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
} else if (eventScope == DataChangeScope.ONE && listenerScope != DataChangeScope.BASE) {
- taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+ taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
} else if (eventScope == DataChangeScope.SUBTREE && listenerScope == DataChangeScope.SUBTREE) {
- taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+ taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
}
}
}
* @param listeners
* @param entries
*/
- private static void addNotificationTasksAndMergeEvents(
+ private void addNotificationTasksAndMergeEvents(
final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final ListenerTree.Node listeners,
final Collection<DOMImmutableDataChangeEvent> entries) {
}
}
- private static void addNotificationTaskExclusively(
+ private void addNotificationTaskExclusively(
final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final Node listeners,
final DOMImmutableDataChangeEvent event) {
for (DataChangeListenerRegistration<?> listener : listeners.getListeners()) {
if (listener.getScope() == event.getScope()) {
Set<DataChangeListenerRegistration<?>> listenerSet = Collections
.<DataChangeListenerRegistration<?>> singleton(listener);
- taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+ taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
}
}
}
}
}
- public static ResolveDataChangeEventsTask create(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
- return new ResolveDataChangeEventsTask(candidate, listenerTree);
+ @SuppressWarnings("rawtypes")
+ public static ResolveDataChangeEventsTask create(final DataTreeCandidate candidate,
+ final ListenerTree listenerTree,
+ final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent> notificationMgr) {
+ return new ResolveDataChangeEventsTask(candidate, listenerTree, notificationMgr);
}
}
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import static com.google.common.base.Preconditions.checkNotNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
+import static com.google.common.base.Preconditions.checkNotNull;
/**
*
return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
}
}
-}
\ No newline at end of file
+
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path) {
+ LOG.debug("Tx: {} Exists: {}", getIdentifier(), path);
+ checkNotNull(path, "Path must not be null.");
+
+ try {
+ return Futures.immediateCheckedFuture(
+ read(path).checkedGet().isPresent());
+ } catch (ReadFailedException e) {
+ return Futures.immediateFailedCheckedFuture(e);
+ }
+ }
+}
return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
}
}
-}
\ No newline at end of file
+
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ YangInstanceIdentifier path) {
+ try {
+ return Futures.immediateCheckedFuture(
+ read(path).checkedGet().isPresent());
+ } catch (ReadFailedException e) {
+ return Futures.immediateFailedCheckedFuture(e);
+ }
+ }
+}
/**
* A walking context, pretty much equivalent to an iterator, but it
- * exposes the undelying tree structure.
+ * exposes the underlying tree structure.
+ */
+ /*
+ * FIXME: BUG-1511: split this class out as ListenerWalker.
*/
public static final class Walker implements AutoCloseable {
private final Lock lock;
* only as long as the {@link org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker} instance through which it is reached remains
* unclosed.
*/
+ /*
+ * FIXME: BUG-1511: split this class out as ListenerNode.
+ */
public static final class Node implements StoreTreeNode<Node>, Identifiable<PathArgument> {
private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
private final Map<PathArgument, Node> children = new HashMap<>();
import java.util.Collection;
import java.util.Map;
-
+import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.WriteTransactionCustomizer;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedList;
import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
import org.opendaylight.yangtools.yang.common.QName;
private InMemoryDOMDataStore datastore;
private SchemaContext schemaContext;
+ private TestDCLExecutorService dclExecutorService;
@Before
public final void setup() throws Exception {
ModuleInfoBackedContext context = ModuleInfoBackedContext.create();
context.registerModuleInfo(moduleInfo);
schemaContext = context.tryToCreateSchemaContext().get();
+
+ dclExecutorService = new TestDCLExecutorService(
+ SpecialExecutors.newBlockingBoundedFastThreadPool(1, 10, "DCL" ));
+
datastore = new InMemoryDOMDataStore("TEST",
- MoreExecutors.sameThreadExecutor());
+ MoreExecutors.sameThreadExecutor(), dclExecutorService );
datastore.onGlobalContextUpdated(schemaContext);
}
+ @After
+ public void tearDown() {
+ if( dclExecutorService != null ) {
+ dclExecutorService.shutdownNow();
+ }
+ }
+
public final DatastoreTestTask newTestTask() {
- return new DatastoreTestTask(datastore).cleanup(DatastoreTestTask
+ return new DatastoreTestTask(datastore, dclExecutorService).cleanup(DatastoreTestTask
.simpleDelete(TOP_LEVEL));
}
package org.opendaylight.controller.md.sal.dom.store.impl;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
private WriteTransactionCustomizer cleanup;
private YangInstanceIdentifier changePath;
private DataChangeScope changeScope;
- private boolean postSetup = false;
+ private volatile boolean postSetup = false;
private final ChangeEventListener internalListener;
+ private final TestDCLExecutorService dclExecutorService;
- public DatastoreTestTask(final DOMStore datastore) {
+ public DatastoreTestTask(final DOMStore datastore, final TestDCLExecutorService dclExecutorService) {
this.store = datastore;
+ this.dclExecutorService = dclExecutorService;
internalListener = new ChangeEventListener();
}
return this;
}
- public void run() throws InterruptedException, ExecutionException {
+ public void run() throws InterruptedException, ExecutionException, TimeoutException {
if (setup != null) {
execute(setup);
}
}
Preconditions.checkState(write != null, "Write Transaction must be set.");
+
postSetup = true;
+ dclExecutorService.afterTestSetup();
+
execute(write);
if (registration != null) {
registration.close();
}
+
if (changeListener != null) {
- changeListener.onDataChanged(internalListener.receivedChange.get());
+ changeListener.onDataChanged(getChangeEvent());
}
if (read != null) {
read.verify(store.newReadOnlyTransaction());
}
}
- public Future<AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>> getChangeEvent() {
- return internalListener.receivedChange;
+ public AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> getChangeEvent() {
+ try {
+ return internalListener.receivedChange.get(10, TimeUnit.SECONDS);
+ } catch( Exception e ) {
+ fail( "Error getting the AsyncDataChangeEvent from the Future: " + e );
+ }
+
+ // won't get here
+ return null;
+ }
+
+ public void verifyNoChangeEvent() {
+ try {
+ Object unexpected = internalListener.receivedChange.get(500, TimeUnit.MILLISECONDS);
+ fail( "Got unexpected AsyncDataChangeEvent from the Future: " + unexpected );
+ } catch( TimeoutException e ) {
+ // Expected
+ } catch( Exception e ) {
+ fail( "Error getting the AsyncDataChangeEvent from the Future: " + e );
+ }
}
private void execute(final WriteTransactionCustomizer writeCustomizer) throws InterruptedException,
abstract protected void customizeTask(DatastoreTestTask task);
@Test
- public final void putTopLevelOneNested() throws InterruptedException, ExecutionException {
+ public final void putTopLevelOneNested() throws Exception {
DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR));
customizeTask(task);
}
@Test
- public final void existingTopWriteSibling() throws InterruptedException, ExecutionException {
+ public final void existingTopWriteSibling() throws Exception {
DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
new WriteTransactionCustomizer() {
@Override
@Test
- public final void existingTopWriteTwoNested() throws InterruptedException, ExecutionException {
+ public final void existingTopWriteTwoNested() throws Exception {
DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
new WriteTransactionCustomizer() {
@Override
@Test
- public final void existingOneNestedWriteAdditionalNested() throws InterruptedException, ExecutionException {
+ public final void existingOneNestedWriteAdditionalNested() throws Exception {
DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
new WriteTransactionCustomizer() {
@Override
protected abstract void existingOneNestedWriteAdditionalNested(DatastoreTestTask task) throws InterruptedException, ExecutionException;
- protected abstract void putTopLevelOneNested(DatastoreTestTask task) throws InterruptedException,
- ExecutionException;
+ protected abstract void putTopLevelOneNested(DatastoreTestTask task) throws Exception;
@Test
- public final void replaceTopLevelNestedChanged() throws InterruptedException, ExecutionException {
+ public final void replaceTopLevelNestedChanged() throws Exception {
DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
writeOneTopMultipleNested(FOO, BAZ));
customizeTask(task);
ExecutionException;
@Test
- public final void putTopLevelWithTwoNested() throws InterruptedException, ExecutionException {
+ public final void putTopLevelWithTwoNested() throws Exception {
DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR, BAZ));
customizeTask(task);
ExecutionException;
@Test
- public final void twoNestedExistsOneIsDeleted() throws InterruptedException, ExecutionException {
+ public final void twoNestedExistsOneIsDeleted() throws Exception {
DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR, BAZ)).test(
deleteNested(FOO, BAZ));
ExecutionException;
@Test
- public final void nestedListExistsRootDeleted() throws InterruptedException, ExecutionException {
+ public final void nestedListExistsRootDeleted() throws Exception {
DatastoreTestTask task = newTestTask().cleanup(null).setup(writeOneTopMultipleNested(FOO, BAR, BAZ))
.test(DatastoreTestTask.simpleDelete(TOP_LEVEL));
*/
package org.opendaylight.controller.md.sal.dom.store.impl;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.ExecutionException;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
public class InMemoryDataStoreTest {
@Before
public void setupStore() {
- domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+ domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
schemaContext = TestModel.createTestContext();
domStore.onGlobalContextUpdated(schemaContext);
}
assertEquals( "After commit read: data", containerNode, afterCommitRead.get() );
}
+
+ @Test
+ public void testExistsForExistingData() throws Exception {
+
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ assertNotNull( writeTx );
+
+ ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
+ .withNodeIdentifier( new NodeIdentifier( TestModel.TEST_QNAME ) )
+ .addChild( ImmutableNodes.mapNodeBuilder( TestModel.OUTER_LIST_QNAME )
+ .addChild( ImmutableNodes.mapEntry( TestModel.OUTER_LIST_QNAME,
+ TestModel.ID_QNAME, 1 ) ).build() ).build();
+
+ writeTx.merge( TestModel.TEST_PATH, containerNode );
+
+ CheckedFuture<Boolean, ReadFailedException> exists =
+ writeTx.exists(TestModel.TEST_PATH);
+
+ assertEquals(true, exists.checkedGet());
+
+ DOMStoreThreePhaseCommitCohort ready = writeTx.ready();
+
+ ready.preCommit().get();
+
+ ready.commit().get();
+
+ DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+ assertNotNull( readTx );
+
+ exists =
+ readTx.exists(TestModel.TEST_PATH);
+
+ assertEquals(true, exists.checkedGet());
+ }
+
+ @Test
+ public void testExistsForNonExistingData() throws Exception {
+
+ DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+ assertNotNull( writeTx );
+
+ CheckedFuture<Boolean, ReadFailedException> exists =
+ writeTx.exists(TestModel.TEST_PATH);
+
+ assertEquals(false, exists.checkedGet());
+
+ DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+ assertNotNull( readTx );
+
+ exists =
+ readTx.exists(TestModel.TEST_PATH);
+
+ assertEquals(false, exists.checkedGet());
+ }
+
+ @Test(expected=ReadFailedException.class)
+ public void testExistsThrowsReadFailedException() throws Exception {
+
+ DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+ assertNotNull( readTx );
+
+ readTx.close();
+
+ readTx.exists(TestModel.TEST_PATH).checkedGet();
+ }
+
+
+
@Test(expected=ReadFailedException.class)
public void testReadWithReadOnlyTransactionClosed() throws Throwable {
@Override
public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), TOP_LEVEL, path(FOO), path(FOO, BAR));
assertEmpty(change.getUpdatedData());
public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO, BAZ));
assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), TOP_LEVEL, path(FOO), path(FOO, BAR), path(FOO, BAZ));
assertEmpty(change.getUpdatedData());
protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertEmpty(change.getCreatedData());
assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
protected void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertEmpty(change.getCreatedData());
assertEmpty(change.getUpdatedData());
@Override
protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO,BAZ));
assertNotContains(change.getCreatedData(), path(FOO,BAR));
@Override
protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO,BAR),path(FOO,BAZ));
assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
@Override
protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO_SIBLING));
assertContains(change.getUpdatedData(), TOP_LEVEL);
@Before
public void setupStore() {
- domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+ domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor(),
+ MoreExecutors.sameThreadExecutor());
loadSchemas(RockTheHouseInput.class);
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import java.util.concurrent.ExecutorService;
+
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+ * A forwarding Executor used by unit tests for DataChangeListener notifications
+ *
+ * @author Thomas Pantelis
+ */
+public class TestDCLExecutorService extends ForwardingExecutorService {
+
+ // Start with a same thread executor to avoid timing issues during test setup.
+ private volatile ExecutorService currentExecutor = MoreExecutors.sameThreadExecutor();
+
+ // The real executor to use when test setup is complete.
+ private final ExecutorService postSetupExecutor;
+
+
+ public TestDCLExecutorService( ExecutorService postSetupExecutor ) {
+ this.postSetupExecutor = postSetupExecutor;
+ }
+
+ @Override
+ protected ExecutorService delegate() {
+ return currentExecutor;
+ }
+
+ public void afterTestSetup() {
+ // Test setup complete - switch to the real executor.
+ currentExecutor = postSetupExecutor;
+ }
+}
\ No newline at end of file
import static org.junit.Assert.assertNotNull;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@Override
public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertContains(change.getCreatedData(), path(FOO, BAZ));
protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertFalse(change.getCreatedData().isEmpty());
protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- Future<?> future = task.getChangeEvent();
/*
* Base listener should be notified only and only if actual node changed its state,
* since deletion of child, did not result in change of node we are listening
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertEmpty(change.getCreatedData());
assertEmpty(change.getUpdatedData());
@Override
protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) {
- Future<?> future = task.getChangeEvent();
/*
* One listener should be notified only and only if actual node changed its state,
* since deletion of nested child (in this case /nested-list/nested-list[foo],
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- Future<?> future = task.getChangeEvent();
/*
* One listener should be notified only and only if actual node changed its state,
* since deletion of nested child (in this case /nested-list/nested-list[foo],
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO_SIBLING));
assertNotContains(change.getUpdatedData(), path(FOO), TOP_LEVEL);
import static org.junit.Assert.assertNotNull;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@Override
public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertContains(change.getCreatedData(), path(FOO, BAZ));
protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertFalse(change.getCreatedData().isEmpty());
protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- Future<?> future = task.getChangeEvent();
/*
* One listener should be notified only and only if actual node changed its state,
* since deletion of nested child (in this case /nested-list/nested-list[foo],
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertEmpty(change.getCreatedData());
assertEmpty(change.getUpdatedData());
@Override
protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) {
- Future<?> future = task.getChangeEvent();
/*
* One listener should be notified only and only if actual node changed its state,
* since deletion of nested child (in this case /nested-list/nested-list[foo],
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- Future<?> future = task.getChangeEvent();
/*
* One listener should be notified only and only if actual node changed its state,
* since deletion of nested child (in this case /nested-list/nested-list[foo],
* and this means settable future containing receivedDataChangeEvent is not done.
*
*/
- assertFalse(future.isDone());
+ task.verifyNoChangeEvent();
}
@Override
protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO_SIBLING));
assertNotContains(change.getUpdatedData(),path(FOO), TOP_LEVEL);
@Override
public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotContains(change.getCreatedData(), TOP_LEVEL);
assertContains(change.getCreatedData(), path(FOO), path(FOO, BAR));
public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertContains(change.getCreatedData(), path(FOO, BAZ));
protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertFalse(change.getCreatedData().isEmpty());
protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertNotNull(change);
assertTrue(change.getCreatedData().isEmpty());
assertContains(change.getUpdatedData(), path(FOO));
public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertEmpty(change.getCreatedData());
assertEmpty(change.getUpdatedData());
@Override
protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO,BAZ));
assertNotContains(change.getCreatedData(), path(FOO,BAR));
@Override
protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO,BAR),path(FOO,BAZ));
assertContains(change.getUpdatedData(), path(FOO));
@Override
protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
assertContains(change.getCreatedData(), path(FOO_SIBLING));
assertNotContains(change.getUpdatedData(), path(FOO), TOP_LEVEL);
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkCondition;
import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkNotNull;
-import java.io.File;
-import java.io.InputStream;
+import com.google.common.base.Optional;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.config.api.JmxAttributeValidationException;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceSalFacade;
+import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.protocol.framework.ReconnectStrategy;
import org.opendaylight.protocol.framework.TimedReconnectStrategy;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Host;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
-import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.FilesystemSchemaCachingProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProviders;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-
/**
*
*/
{
private static final Logger logger = LoggerFactory.getLogger(NetconfConnectorModule.class);
- private static AbstractCachingSchemaSourceProvider<String, InputStream> GLOBAL_NETCONF_SOURCE_PROVIDER = null;
private BundleContext bundleContext;
private Optional<NetconfSessionCapabilities> userCapabilities;
+ private SchemaSourceRegistry schemaRegistry;
+ private SchemaContextFactory schemaContextFactory;
public NetconfConnectorModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
super(identifier, dependencyResolver);
final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade
= new NetconfDeviceSalFacade(id, domBroker, bindingBroker, bundleContext, globalProcessingExecutor);
+
+ final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO =
+ new NetconfDevice.SchemaResourcesDTO(schemaRegistry, schemaContextFactory, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
+
final NetconfDevice device =
- NetconfDevice.createNetconfDevice(id, getGlobalNetconfSchemaProvider(), globalProcessingExecutor, salFacade);
+ new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer());
final NetconfDeviceCommunicator listener = userCapabilities.isPresent() ?
new NetconfDeviceCommunicator(id, device, userCapabilities.get()) : new NetconfDeviceCommunicator(id, device);
return Optional.of(parsedOverrideCapabilities);
}
- private synchronized AbstractCachingSchemaSourceProvider<String, InputStream> getGlobalNetconfSchemaProvider() {
- if(GLOBAL_NETCONF_SOURCE_PROVIDER == null) {
- final String storageFile = "cache/schema";
- // File directory = bundleContext.getDataFile(storageFile);
- final File directory = new File(storageFile);
- final SchemaSourceProvider<String> defaultProvider = SchemaSourceProviders.noopProvider();
- GLOBAL_NETCONF_SOURCE_PROVIDER = FilesystemSchemaCachingProvider.createFromStringSourceProvider(defaultProvider, directory);
- }
- return GLOBAL_NETCONF_SOURCE_PROVIDER;
- }
-
public void setBundleContext(final BundleContext bundleContext) {
this.bundleContext = bundleContext;
}
return new InetSocketAddress(ip, getPort().getValue());
}
}
+
+ public void setSchemaRegistry(final SchemaSourceRegistry schemaRegistry) {
+ this.schemaRegistry = schemaRegistry;
+ }
+
+ public void setSchemaContextFactory(final SchemaContextFactory schemaContextFactory) {
+ this.schemaContextFactory = schemaContextFactory;
+ }
}
*/
package org.opendaylight.controller.config.yang.md.sal.connector.netconf;
+import java.io.File;
+
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
import org.opendaylight.controller.config.spi.Module;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceFilter;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.util.FilesystemSchemaSourceCache;
+import org.opendaylight.yangtools.yang.parser.repo.SharedSchemaRepository;
+import org.opendaylight.yangtools.yang.parser.util.TextToASTTransformer;
import org.osgi.framework.BundleContext;
/**
public class NetconfConnectorModuleFactory extends
org.opendaylight.controller.config.yang.md.sal.connector.netconf.AbstractNetconfConnectorModuleFactory {
+ // TODO this should be injected
+ // Netconf devices have separated schema registry + factory from controller
+ private final SharedSchemaRepository repository = new SharedSchemaRepository(NAME);
+ private final SchemaContextFactory schemaContextFactory
+ = repository.createSchemaContextFactory(SchemaSourceFilter.ALWAYS_ACCEPT);
+
+ public NetconfConnectorModuleFactory() {
+ // Start cache and Text to AST transformer
+ final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(repository, YangTextSchemaSource.class, new File("cache/schema"));
+ repository.registerSchemaSourceListener(cache);
+ repository.registerSchemaSourceListener(TextToASTTransformer.create(repository, repository));
+ }
+
@Override
- public Module createModule(String instanceName, DependencyResolver dependencyResolver,
- DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
- NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+ public Module createModule(final String instanceName, final DependencyResolver dependencyResolver,
+ final DynamicMBeanWithInstance old, final BundleContext bundleContext) throws Exception {
+ final NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
old, bundleContext);
+
module.setBundleContext(bundleContext);
+ module.setSchemaRegistry(repository);
+ module.setSchemaContextFactory(schemaContextFactory);
return module;
}
@Override
- public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
- NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+ public Module createModule(final String instanceName, final DependencyResolver dependencyResolver, final BundleContext bundleContext) {
+ final NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
bundleContext);
module.setBundleContext(bundleContext);
+ module.setSchemaRegistry(repository);
+ module.setSchemaContextFactory(schemaContextFactory);
return module;
}
}
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public interface RemoteDeviceHandler<PREF> extends AutoCloseable {
- void onDeviceConnected(SchemaContextProvider remoteSchemaContextProvider,
+ void onDeviceConnected(SchemaContext remoteSchemaContext,
PREF netconfSessionPreferences, RpcImplementation deviceRpc);
void onDeviceDisconnected();
*/
package org.opendaylight.controller.sal.connect.netconf;
-import java.io.InputStream;
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.opendaylight.controller.sal.connect.api.MessageTransformer;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
-import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
-import org.opendaylight.controller.sal.connect.netconf.schema.NetconfDeviceSchemaProviderFactory;
-import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaSourceProvider;
-import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
+import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaYangSourceProvider;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaResolutionException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistration;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
/**
* This is a mediator between NetconfDeviceCommunicator and NetconfDeviceSalFacade
*/
private static final Logger logger = LoggerFactory.getLogger(NetconfDevice.class);
+ public static final Function<QName, SourceIdentifier> QNAME_TO_SOURCE_ID_FUNCTION = new Function<QName, SourceIdentifier>() {
+ @Override
+ public SourceIdentifier apply(final QName input) {
+ return new SourceIdentifier(input.getLocalName(), Optional.fromNullable(input.getFormattedRevision()));
+ }
+ };
+
private final RemoteDeviceId id;
+ private final SchemaContextFactory schemaContextFactory;
private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
private final ListeningExecutorService processingExecutor;
+ private final SchemaSourceRegistry schemaRegistry;
private final MessageTransformer<NetconfMessage> messageTransformer;
- private final SchemaContextProviderFactory schemaContextProviderFactory;
- private final SchemaSourceProviderFactory<InputStream> sourceProviderFactory;
private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
private final NotificationHandler notificationHandler;
+ private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
- public static NetconfDevice createNetconfDevice(final RemoteDeviceId id,
- final AbstractCachingSchemaSourceProvider<String, InputStream> schemaSourceProvider,
- final ExecutorService executor, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade) {
- return createNetconfDevice(id, schemaSourceProvider, executor, salFacade, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
- }
-
- @VisibleForTesting
- protected static NetconfDevice createNetconfDevice(final RemoteDeviceId id,
- final AbstractCachingSchemaSourceProvider<String, InputStream> schemaSourceProvider,
- final ExecutorService executor, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
- final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
-
- return new NetconfDevice(id, salFacade, executor, new NetconfMessageTransformer(),
- new NetconfDeviceSchemaProviderFactory(id), new SchemaSourceProviderFactory<InputStream>() {
- @Override
- public SchemaSourceProvider<InputStream> createSourceProvider(final RpcImplementation deviceRpc) {
- return schemaSourceProvider.createInstanceFor(new NetconfRemoteSchemaSourceProvider(id,
- deviceRpc));
- }
- }, stateSchemasResolver);
- }
-
- @VisibleForTesting
- protected NetconfDevice(final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
- final ExecutorService processingExecutor, final MessageTransformer<NetconfMessage> messageTransformer,
- final SchemaContextProviderFactory schemaContextProviderFactory,
- final SchemaSourceProviderFactory<InputStream> sourceProviderFactory,
- final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+ public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
+ final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer) {
this.id = id;
+ this.schemaRegistry = schemaResourcesDTO.getSchemaRegistry();
this.messageTransformer = messageTransformer;
+ this.schemaContextFactory = schemaResourcesDTO.getSchemaContextFactory();
this.salFacade = salFacade;
- this.sourceProviderFactory = sourceProviderFactory;
- this.stateSchemasResolver = stateSchemasResolver;
- this.processingExecutor = MoreExecutors.listeningDecorator(processingExecutor);
- this.schemaContextProviderFactory = schemaContextProviderFactory;
+ this.stateSchemasResolver = schemaResourcesDTO.getStateSchemasResolver();
+ this.processingExecutor = MoreExecutors.listeningDecorator(globalProcessingExecutor);
this.notificationHandler = new NotificationHandler(salFacade, messageTransformer, id);
}
// http://netty.io/wiki/thread-model.html
logger.debug("{}: Session to remote device established with {}", id, remoteSessionCapabilities);
- final ListenableFuture<?> salInitializationFuture = processingExecutor.submit(new Runnable() {
+ final NetconfDeviceRpc deviceRpc = setUpDeviceRpc(listener);
+
+ final DeviceSourcesResolver task = new DeviceSourcesResolver(deviceRpc, remoteSessionCapabilities, id, stateSchemasResolver);
+ final ListenableFuture<DeviceSources> sourceResolverFuture = processingExecutor.submit(task);
+
+ final FutureCallback<DeviceSources> resolvedSourceCallback = new FutureCallback<DeviceSources>() {
@Override
- public void run() {
- final NetconfDeviceRpc deviceRpc = setUpDeviceRpc(remoteSessionCapabilities, listener);
-
- final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
- logger.warn("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
- // TODO use this for shared schema context
-
- final SchemaSourceProvider<InputStream> delegate = sourceProviderFactory.createSourceProvider(deviceRpc);
- final SchemaContextProvider schemaContextProvider = setUpSchemaContext(delegate, remoteSessionCapabilities);
- updateMessageTransformer(schemaContextProvider);
- salFacade.onDeviceConnected(schemaContextProvider, remoteSessionCapabilities, deviceRpc);
- notificationHandler.onRemoteSchemaUp();
+ public void onSuccess(final DeviceSources result) {
+ addProvidedSourcesToSchemaRegistry(deviceRpc, result);
+ setUpSchema(result);
}
- });
- Futures.addCallback(salInitializationFuture, new FutureCallback<Object>() {
- @Override
- public void onSuccess(final Object result) {
- logger.debug("{}: Initialization in sal successful", id);
- logger.info("{}: Netconf connector initialized successfully", id);
+ private void setUpSchema(final DeviceSources result) {
+ processingExecutor.submit(new RecursiveSchemaSetup(result, remoteSessionCapabilities, deviceRpc, listener));
}
@Override
public void onFailure(final Throwable t) {
- // Unable to initialize device, set as disconnected
- logger.error("{}: Initialization failed", id, t);
- salFacade.onDeviceDisconnected();
- // TODO ssh connection is still open if sal initialization fails
+ logger.warn("{}: Unexpected error resolving device sources: {}", id, t);
+ handleSalInitializationFailure(t, listener);
}
- });
+ };
+
+ Futures.addCallback(sourceResolverFuture, resolvedSourceCallback);
+ }
+
+ private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
+ updateMessageTransformer(result);
+ salFacade.onDeviceConnected(result, remoteSessionCapabilities, deviceRpc);
+ notificationHandler.onRemoteSchemaUp();
+
+ logger.debug("{}: Initialization in sal successful", id);
+ logger.info("{}: Netconf connector initialized successfully", id);
+ }
+
+ private void handleSalInitializationFailure(final Throwable t, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ logger.error("{}: Initialization in sal failed, disconnecting from device", id, t);
+ listener.close();
+ onRemoteSessionDown();
}
/**
* Update initial message transformer to use retrieved schema
+ * @param currentSchemaContext
*/
- private void updateMessageTransformer(final SchemaContextProvider schemaContextProvider) {
- messageTransformer.onGlobalContextUpdated(schemaContextProvider.getSchemaContext());
+ private void updateMessageTransformer(final SchemaContext currentSchemaContext) {
+ messageTransformer.onGlobalContextUpdated(currentSchemaContext);
}
- private SchemaContextProvider setUpSchemaContext(final SchemaSourceProvider<InputStream> sourceProvider, final NetconfSessionCapabilities capabilities) {
- return schemaContextProviderFactory.createContextProvider(capabilities.getModuleBasedCaps(), sourceProvider);
+ private void addProvidedSourcesToSchemaRegistry(final NetconfDeviceRpc deviceRpc, final DeviceSources deviceSources) {
+ final NetconfRemoteSchemaYangSourceProvider yangProvider = new NetconfRemoteSchemaYangSourceProvider(id, deviceRpc);
+ for (final SourceIdentifier sourceId : deviceSources.getProvidedSources()) {
+ sourceRegistrations.add(schemaRegistry.registerSchemaSource(yangProvider,
+ PotentialSchemaSource.create(sourceId, YangTextSchemaSource.class, PotentialSchemaSource.Costs.REMOTE_IO.getValue())));
+ }
}
- private NetconfDeviceRpc setUpDeviceRpc(final NetconfSessionCapabilities capHolder, final RemoteDeviceCommunicator<NetconfMessage> listener) {
- Preconditions.checkArgument(capHolder.isMonitoringSupported(),
- "%s: Netconf device does not support netconf monitoring, yang schemas cannot be acquired. Netconf device capabilities", capHolder);
- return new NetconfDeviceRpc(listener, messageTransformer);
+ private NetconfDeviceRpc setUpDeviceRpc(final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ return new NetconfDeviceRpc(listener, messageTransformer);
}
@Override
public void onRemoteSessionDown() {
salFacade.onDeviceDisconnected();
+ for (final SchemaSourceRegistration<? extends SchemaSourceRepresentation> sourceRegistration : sourceRegistrations) {
+ sourceRegistration.close();
+ }
}
@Override
}
/**
- * Handles incoming notifications. Either caches them(until onRemoteSchemaUp is called) or passes to sal Facade.
+ * Just a transfer object containing schema related dependencies. Injected in constructor.
*/
- private final static class NotificationHandler {
+ public static class SchemaResourcesDTO {
+ private final SchemaSourceRegistry schemaRegistry;
+ private final SchemaContextFactory schemaContextFactory;
+ private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
+
+ public SchemaResourcesDTO(final SchemaSourceRegistry schemaRegistry, final SchemaContextFactory schemaContextFactory, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+ this.schemaRegistry = Preconditions.checkNotNull(schemaRegistry);
+ this.schemaContextFactory = Preconditions.checkNotNull(schemaContextFactory);
+ this.stateSchemasResolver = Preconditions.checkNotNull(stateSchemasResolver);
+ }
+
+ public SchemaSourceRegistry getSchemaRegistry() {
+ return schemaRegistry;
+ }
+
+ public SchemaContextFactory getSchemaContextFactory() {
+ return schemaContextFactory;
+ }
- private final RemoteDeviceHandler<?> salFacade;
- private final List<NetconfMessage> cache = new LinkedList<>();
- private final MessageTransformer<NetconfMessage> messageTransformer;
- private boolean passNotifications = false;
+ public NetconfStateSchemas.NetconfStateSchemasResolver getStateSchemasResolver() {
+ return stateSchemasResolver;
+ }
+ }
+
+ /**
+ * Schema building callable.
+ */
+ private static class DeviceSourcesResolver implements Callable<DeviceSources> {
+ private final NetconfDeviceRpc deviceRpc;
+ private final NetconfSessionCapabilities remoteSessionCapabilities;
private final RemoteDeviceId id;
+ private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
- NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
- this.salFacade = salFacade;
- this.messageTransformer = messageTransformer;
+ public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+ this.deviceRpc = deviceRpc;
+ this.remoteSessionCapabilities = remoteSessionCapabilities;
this.id = id;
+ this.stateSchemasResolver = stateSchemasResolver;
}
- synchronized void handleNotification(final NetconfMessage notification) {
- if(passNotifications) {
- passNotification(messageTransformer.toNotification(notification));
- } else {
- cacheNotification(notification);
+ @Override
+ public DeviceSources call() throws Exception {
+
+ final Set<SourceIdentifier> requiredSources = Sets.newHashSet(Collections2.transform(
+ remoteSessionCapabilities.getModuleBasedCaps(), QNAME_TO_SOURCE_ID_FUNCTION));
+
+ // If monitoring is not supported, we will still attempt to create schema, sources might be already provided
+ final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
+ logger.debug("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
+
+ final Set<SourceIdentifier> providedSources = Sets.newHashSet(Collections2.transform(
+ availableSchemas.getAvailableYangSchemasQNames(), QNAME_TO_SOURCE_ID_FUNCTION));
+
+ final Set<SourceIdentifier> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
+
+ if (!requiredSourcesNotProvided.isEmpty()) {
+ logger.warn("{}: Netconf device does not provide all yang models reported in hello message capabilities, required but not provided: {}",
+ id, requiredSourcesNotProvided);
+ logger.warn("{}: Attempting to build schema context from required sources", id);
}
- }
- /**
- * Forward all cached notifications and pass all notifications from this point directly to sal facade.
- */
- synchronized void onRemoteSchemaUp() {
- passNotifications = true;
- for (final NetconfMessage cachedNotification : cache) {
- passNotification(messageTransformer.toNotification(cachedNotification));
+ // TODO should we perform this ? We have a mechanism to fix initialization of devices not reporting or required modules in hello
+ // That is overriding capabilities in configuration using attribute yang-module-capabilities
+ // This is more user friendly even though it clashes with attribute yang-module-capabilities
+ // Some devices do not report all required models in hello message, but provide them
+ final Set<SourceIdentifier> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
+ if (!providedSourcesNotRequired.isEmpty()) {
+ logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
+ id, providedSourcesNotRequired);
+ logger.warn("{}: Adding provided but not required sources as required to prevent failures", id);
+ requiredSources.addAll(providedSourcesNotRequired);
}
- cache.clear();
+ return new DeviceSources(requiredSources, providedSources);
}
+ }
- private void cacheNotification(final NetconfMessage notification) {
- Preconditions.checkState(passNotifications == false);
+ /**
+ * Contains RequiredSources - sources from capabilities.
+ *
+ */
+ private static final class DeviceSources {
+ private final Collection<SourceIdentifier> requiredSources;
+ private final Collection<SourceIdentifier> providedSources;
- logger.debug("{}: Caching notification {}, remote schema not yet fully built", id, notification);
- if(logger.isTraceEnabled()) {
- logger.trace("{}: Caching notification {}", id, XmlUtil.toString(notification.getDocument()));
- }
+ public DeviceSources(final Collection<SourceIdentifier> requiredSources, final Collection<SourceIdentifier> providedSources) {
+ this.requiredSources = requiredSources;
+ this.providedSources = providedSources;
+ }
- cache.add(notification);
+ public Collection<SourceIdentifier> getRequiredSources() {
+ return requiredSources;
}
- private void passNotification(final CompositeNode parsedNotification) {
- logger.debug("{}: Forwarding notification {}", id, parsedNotification);
- Preconditions.checkNotNull(parsedNotification);
- salFacade.onNotification(parsedNotification);
+ public Collection<SourceIdentifier> getProvidedSources() {
+ return providedSources;
}
+
}
+ /**
+ * Schema builder that tries to build schema context from provided sources or biggest subset of it.
+ */
+ private final class RecursiveSchemaSetup implements Runnable {
+ private final DeviceSources deviceSources;
+ private final NetconfSessionCapabilities remoteSessionCapabilities;
+ private final NetconfDeviceRpc deviceRpc;
+ private final RemoteDeviceCommunicator<NetconfMessage> listener;
+
+ public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+ this.deviceSources = deviceSources;
+ this.remoteSessionCapabilities = remoteSessionCapabilities;
+ this.deviceRpc = deviceRpc;
+ this.listener = listener;
+ }
+
+ @Override
+ public void run() {
+ setUpSchema(deviceSources.getRequiredSources());
+ }
+
+ /**
+ * Recursively build schema context, in case of success or final failure notify device
+ */
+ private void setUpSchema(final Collection<SourceIdentifier> requiredSources) {
+ logger.trace("{}: Trying to build schema context from {}", id, requiredSources);
+
+ // If no more sources, fail
+ if(requiredSources.isEmpty()) {
+ handleSalInitializationFailure(new IllegalStateException(id + ": No more sources for schema context"), listener);
+ return;
+ }
+
+ final CheckedFuture<SchemaContext, SchemaResolutionException> schemaBuilderFuture = schemaContextFactory.createSchemaContext(requiredSources);
+
+ final FutureCallback<SchemaContext> RecursiveSchemaBuilderCallback = new FutureCallback<SchemaContext>() {
+
+ @Override
+ public void onSuccess(final SchemaContext result) {
+ logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
+ handleSalInitializationSuccess(result, remoteSessionCapabilities, deviceRpc);
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ // In case source missing, try without it
+ if (t instanceof MissingSchemaSourceException) {
+ final SourceIdentifier missingSource = ((MissingSchemaSourceException) t).getSourceId();
+ logger.warn("{}: Unable to build schema context, missing source {}, will reattempt without it", id, missingSource);
+ setUpSchema(stripMissingSource(requiredSources, missingSource));
+
+ // In case resolution error, try only with resolved sources
+ } else if (t instanceof SchemaResolutionException) {
+ // TODO check for infinite loop
+ final SchemaResolutionException resolutionException = (SchemaResolutionException) t;
+ logger.warn("{}: Unable to build schema context, unsatisfied imports {}, will reattempt with resolved only", id, resolutionException.getUnsatisfiedImports());
+ setUpSchema(resolutionException.getResolvedSources());
+ // unknown error, fail
+ } else {
+ handleSalInitializationFailure(t, listener);
+ }
+ }
+ };
+
+ Futures.addCallback(schemaBuilderFuture, RecursiveSchemaBuilderCallback);
+ }
+
+ private Collection<SourceIdentifier> stripMissingSource(final Collection<SourceIdentifier> requiredSources, final SourceIdentifier sIdToRemove) {
+ final LinkedList<SourceIdentifier> sourceIdentifiers = Lists.newLinkedList(requiredSources);
+ final boolean removed = sourceIdentifiers.remove(sIdToRemove);
+ Preconditions.checkState(removed, "{}: Trying to remove {} from {} failed", id, sIdToRemove, requiredSources);
+ return sourceIdentifiers;
+ }
+ }
}
*/
private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
if(remoteSessionCapabilities.isMonitoringSupported() == false) {
- logger.warn("{}: Netconf monitoring not supported on device, cannot detect available schemas");
+ logger.warn("{}: Netconf monitoring not supported on device, cannot detect provided schemas");
return EMPTY;
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.connect.netconf;
+
+import com.google.common.base.Preconditions;
+import java.util.LinkedList;
+import java.util.List;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.MessageTransformer;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handles incoming notifications. Either caches them(until onRemoteSchemaUp is called) or passes to sal Facade.
+ */
+final class NotificationHandler {
+
+ private static final Logger logger = LoggerFactory.getLogger(NotificationHandler.class);
+
+ private final RemoteDeviceHandler<?> salFacade;
+ private final List<NetconfMessage> queue = new LinkedList<>();
+ private final MessageTransformer<NetconfMessage> messageTransformer;
+ private final RemoteDeviceId id;
+ private boolean passNotifications = false;
+
+ NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
+ this.salFacade = Preconditions.checkNotNull(salFacade);
+ this.messageTransformer = Preconditions.checkNotNull(messageTransformer);
+ this.id = Preconditions.checkNotNull(id);
+ }
+
+ synchronized void handleNotification(final NetconfMessage notification) {
+ if(passNotifications) {
+ passNotification(messageTransformer.toNotification(notification));
+ } else {
+ queueNotification(notification);
+ }
+ }
+
+ /**
+ * Forward all cached notifications and pass all notifications from this point directly to sal facade.
+ */
+ synchronized void onRemoteSchemaUp() {
+ passNotifications = true;
+
+ for (final NetconfMessage cachedNotification : queue) {
+ passNotification(messageTransformer.toNotification(cachedNotification));
+ }
+
+ queue.clear();
+ }
+
+ private void queueNotification(final NetconfMessage notification) {
+ Preconditions.checkState(passNotifications == false);
+
+ logger.debug("{}: Caching notification {}, remote schema not yet fully built", id, notification);
+ if(logger.isTraceEnabled()) {
+ logger.trace("{}: Caching notification {}", id, XmlUtil.toString(notification.getDocument()));
+ }
+
+ queue.add(notification);
+ }
+
+ private void passNotification(final CompositeNode parsedNotification) {
+ logger.debug("{}: Forwarding notification {}", id, parsedNotification);
+ Preconditions.checkNotNull(parsedNotification);
+ salFacade.onNotification(parsedNotification);
+ }
+}
private final RemoteDeviceId id;
private final Lock sessionLock = new ReentrantLock();
+ // TODO implement concurrent message limit
private final Queue<Request> requests = new ArrayDeque<>();
private NetconfClientSession session;
+ private Future<?> initFuture;
public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
final NetconfSessionCapabilities netconfSessionCapabilities) {
public void initializeRemoteConnection(final NetconfClientDispatcher dispatch,
final NetconfClientConfiguration config) {
if(config instanceof NetconfReconnectingClientConfiguration) {
- dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
+ initFuture = dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
} else {
- dispatch.createClient(config);
+ initFuture = dispatch.createClient(config);
}
}
@Override
public void close() {
- tearDown( String.format( "The netconf session to %1$s has been closed", id.getName() ) );
+ // Cancel reconnect if in progress
+ if(initFuture != null) {
+ initFuture.cancel(false);
+ }
+ // Disconnect from device
+ if(session != null) {
+ session.close();
+ }
+ tearDown(id + ": Netconf session closed");
}
@Override
private void processMessage(final NetconfMessage message) {
Request request = null;
sessionLock.lock();
+
try {
request = requests.peek();
- if (request.future.isUncancellable()) {
+ if (request != null && request.future.isUncancellable()) {
requests.poll();
- }
- else {
+ } else {
request = null;
logger.warn("{}: Ignoring unsolicited message {}", id, msgToS(message));
}
*/
package org.opendaylight.controller.sal.connect.netconf.sal;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
-
import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionCapabilities> {
private static final Logger logger= LoggerFactory.getLogger(NetconfDeviceSalFacade.class);
}
@Override
- public synchronized void onDeviceConnected(final SchemaContextProvider remoteSchemaContextProvider,
+ public synchronized void onDeviceConnected(final SchemaContext schemaContext,
final NetconfSessionCapabilities netconfSessionPreferences, final RpcImplementation deviceRpc) {
- final SchemaContext schemaContext = remoteSchemaContextProvider.getSchemaContext();
- // TODO remove deprecated SchemaContextProvider from SchemaAwareRpcBroker
// TODO move SchemaAwareRpcBroker from sal-broker-impl, now we have depend on the whole sal-broker-impl
final RpcProvisionRegistry rpcRegistry = new SchemaAwareRpcBroker(id.getPath().toString(), new org.opendaylight.controller.sal.dom.broker.impl.SchemaContextProvider() {
@Override
*/
package org.opendaylight.controller.sal.connect.netconf.sal.tx;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.CONFIG_SOURCE_RUNNING;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
-
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.ExecutionException;
+
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.CONFIG_SOURCE_RUNNING;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
+
public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction {
final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_CONFIG_QNAME,
NetconfMessageTransformUtil.wrap(NETCONF_GET_CONFIG_QNAME, CONFIG_SOURCE_RUNNING, toFilterStructure(path)));
- ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+ final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
@Override
public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
checkReadSuccess(result, path);
final YangInstanceIdentifier path) {
final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_QNAME, NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, toFilterStructure(path)));
- ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+ final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
@Override
public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
checkReadSuccess(result, path);
throw new IllegalArgumentException(String.format("%s, Cannot read data %s for %s datastore, unknown datastore type", id, path, store));
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+ data = read(store, path);
+
+ try {
+ return Futures.immediateCheckedFuture(data.get().isPresent());
+ } catch (InterruptedException | ExecutionException e) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+ }
+ }
+
static YangInstanceIdentifier toLegacyPath(final DataNormalizer normalizer, final YangInstanceIdentifier path, final RemoteDeviceId id) {
try {
return normalizer.toLegacy(path);
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import java.util.concurrent.ExecutionException;
+
public class NetconfDeviceReadWriteTx implements DOMDataReadWriteTransaction {
private final DOMDataReadTransaction delegateReadTx;
return delegateReadTx.read(store, path);
}
+ @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+ LogicalDatastoreType store,
+ YangInstanceIdentifier path) {
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+ data = read(store, path);
+
+ try {
+ return Futures.immediateCheckedFuture(data.get().isPresent());
+ } catch (InterruptedException | ExecutionException e) {
+ return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+ }
+ }
+
@Override
public Object getIdentifier() {
return this;
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connect.netconf.schema;
-
-import java.io.InputStream;
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
-import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import org.opendaylight.yangtools.yang.parser.impl.util.YangSourceContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-public final class NetconfDeviceSchemaProviderFactory implements SchemaContextProviderFactory {
-
- private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceSchemaProviderFactory.class);
-
- private final RemoteDeviceId id;
-
- public NetconfDeviceSchemaProviderFactory(final RemoteDeviceId id) {
- this.id = id;
- }
-
- @Override
- public SchemaContextProvider createContextProvider(final Collection<QName> capabilities, final SchemaSourceProvider<InputStream> sourceProvider) {
-
- final YangSourceContext sourceContext = YangSourceContext.createFrom(capabilities, sourceProvider);
-
- if (sourceContext.getMissingSources().isEmpty() == false) {
- logger.warn("{}: Sources for following models are missing {}", id, sourceContext.getMissingSources());
- }
-
- logger.debug("{}: Trying to create schema context from {}", id, sourceContext.getValidSources());
- final List<InputStream> modelsToParse = YangSourceContext.getValidInputStreams(sourceContext);
-
- Preconditions.checkState(sourceContext.getValidSources().isEmpty() == false,
- "%s: Unable to create schema context, no sources provided by device", id);
- try {
- final SchemaContext schemaContext = tryToParseContext(modelsToParse);
- logger.debug("{}: Schema context successfully created.", id);
- return new NetconfSchemaContextProvider(schemaContext);
- } catch (final RuntimeException e) {
- logger.error("{}: Unable to create schema context, unexpected error", id, e);
- throw new IllegalStateException(id + ": Unable to create schema context", e);
- }
- }
-
- private static SchemaContext tryToParseContext(final List<InputStream> modelsToParse) {
- final YangParserImpl parser = new YangParserImpl();
- final Set<Module> models = parser.parseYangModelsFromStreams(modelsToParse);
- return parser.resolveSchemaContext(models);
- }
-
- private static final class NetconfSchemaContextProvider implements SchemaContextProvider {
- private final SchemaContext schemaContext;
-
- public NetconfSchemaContextProvider(final SchemaContext schemaContext) {
- this.schemaContext = schemaContext;
- }
-
- @Override
- public SchemaContext getSchemaContext() {
- return schemaContext;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connect.netconf.schema;
-
-import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
-import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
-public final class NetconfRemoteSchemaSourceProvider implements SchemaSourceProvider<String> {
-
- public static final QName GET_SCHEMA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING,
- "get-schema");
- public static final QName GET_DATA_QNAME = QName
- .create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING, "data");
-
- private static final Logger logger = LoggerFactory.getLogger(NetconfRemoteSchemaSourceProvider.class);
-
- private final RpcImplementation rpc;
- private final RemoteDeviceId id;
-
- public NetconfRemoteSchemaSourceProvider(final RemoteDeviceId id, final RpcImplementation rpc) {
- this.id = id;
- this.rpc = Preconditions.checkNotNull(rpc);
- }
-
- @Override
- public Optional<String> getSchemaSource(final String moduleName, final Optional<String> revision) {
- final ImmutableCompositeNode getSchemaRequest = createGetSchemaRequest(moduleName, revision);
-
- logger.trace("{}: Loading YANG schema source for {}:{}", id, moduleName, revision);
- try {
- final RpcResult<CompositeNode> schemaReply = rpc.invokeRpc(GET_SCHEMA_QNAME, getSchemaRequest).get();
- if (schemaReply.isSuccessful()) {
- final Optional<String> schemaBody = getSchemaFromRpc(id, schemaReply.getResult());
- if (schemaBody.isPresent()) {
- logger.debug("{}: YANG Schema successfully retrieved for {}:{}", id, moduleName, revision);
- return schemaBody;
- }
- } else {
- logger.warn("{}: YANG schema was not successfully retrieved for {}:{}. Errors: {}", id, moduleName,
- revision, schemaReply.getErrors());
- }
- return Optional.absent();
- } catch (final InterruptedException e){
- Thread.currentThread().interrupt();
- throw new IllegalStateException(e);
- } catch (final Exception e) {
- logger.error("{}: YANG schema was not successfully retrieved for {}:{}", id, moduleName, revision, e);
- throw new IllegalStateException(e);
- }
- }
-
- private ImmutableCompositeNode createGetSchemaRequest(final String moduleName, final Optional<String> revision) {
- final CompositeNodeBuilder<ImmutableCompositeNode> request = ImmutableCompositeNode.builder();
- request.setQName(GET_SCHEMA_QNAME).addLeaf("identifier", moduleName);
- if (revision.isPresent()) {
- request.addLeaf("version", revision.get());
- }
- request.addLeaf("format", "yang");
- return request.toInstance();
- }
-
- private static Optional<String> getSchemaFromRpc(final RemoteDeviceId id, final CompositeNode result) {
- if (result == null) {
- return Optional.absent();
- }
- final SimpleNode<?> simpleNode = result.getFirstSimpleByName(GET_DATA_QNAME.withoutRevision());
-
- Preconditions.checkNotNull(simpleNode,
- "%s Unexpected response to get-schema, expected response with one child %s, but was %s",
- id, GET_DATA_QNAME.withoutRevision(), result);
-
- final Object potential = simpleNode.getValue();
- return potential instanceof String ? Optional.of((String) potential) : Optional.<String>absent();
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.connect.netconf.schema;
+
+import com.google.common.base.Function;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.ExecutionException;
+import org.apache.commons.io.IOUtils;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class NetconfRemoteSchemaYangSourceProvider implements SchemaSourceProvider<YangTextSchemaSource> {
+
+ public static final QName GET_SCHEMA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING,"get-schema");
+ public static final QName GET_DATA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING, "data");
+
+ private static final Logger logger = LoggerFactory.getLogger(NetconfRemoteSchemaYangSourceProvider.class);
+
+ private static final ExceptionMapper<SchemaSourceException> MAPPER = new ExceptionMapper<SchemaSourceException>(
+ "schemaDownload", SchemaSourceException.class) {
+ @Override
+ protected SchemaSourceException newWithCause(final String s, final Throwable throwable) {
+ return new SchemaSourceException(s, throwable);
+ }
+ };
+
+ private final RpcImplementation rpc;
+ private final RemoteDeviceId id;
+
+ public NetconfRemoteSchemaYangSourceProvider(final RemoteDeviceId id, final RpcImplementation rpc) {
+ this.id = id;
+ this.rpc = Preconditions.checkNotNull(rpc);
+ }
+
+ private ImmutableCompositeNode createGetSchemaRequest(final String moduleName, final Optional<String> revision) {
+ final CompositeNodeBuilder<ImmutableCompositeNode> request = ImmutableCompositeNode.builder();
+ request.setQName(GET_SCHEMA_QNAME).addLeaf("identifier", moduleName);
+ if (revision.isPresent()) {
+ request.addLeaf("version", revision.get());
+ }
+ request.addLeaf("format", "yang");
+ return request.toInstance();
+ }
+
+ private static Optional<String> getSchemaFromRpc(final RemoteDeviceId id, final CompositeNode result) {
+ if (result == null) {
+ return Optional.absent();
+ }
+ final SimpleNode<?> simpleNode = result.getFirstSimpleByName(GET_DATA_QNAME.withoutRevision());
+
+ Preconditions.checkNotNull(simpleNode,
+ "%s Unexpected response to get-schema, expected response with one child %s, but was %s", id,
+ GET_DATA_QNAME.withoutRevision(), result);
+
+ final Object potential = simpleNode.getValue();
+ return potential instanceof String ? Optional.of((String) potential) : Optional.<String> absent();
+ }
+
+ @Override
+ public CheckedFuture<YangTextSchemaSource, SchemaSourceException> getSource(final SourceIdentifier sourceIdentifier) {
+ final String moduleName = sourceIdentifier.getName();
+
+ // If formatted revision is SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION, we have to omit it from request
+ final String formattedRevision = sourceIdentifier.getRevision().equals(SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION) ? null : sourceIdentifier.getRevision();
+ final Optional<String> revision = Optional.fromNullable(formattedRevision);
+ final ImmutableCompositeNode getSchemaRequest = createGetSchemaRequest(moduleName, revision);
+
+ logger.trace("{}: Loading YANG schema source for {}:{}", id, moduleName, revision);
+
+ final ListenableFuture<YangTextSchemaSource> transformed = Futures.transform(
+ rpc.invokeRpc(GET_SCHEMA_QNAME, getSchemaRequest),
+ new ResultToYangSourceTransformer(id, sourceIdentifier, moduleName, revision));
+
+ // FIXME remove this get, it is only present to wait until source is retrieved
+ // (goal is to limit concurrent schema download, since NetconfDevice listener does not handle concurrent messages properly)
+ try {
+ logger.trace("{}: Blocking for {}", id, sourceIdentifier);
+ transformed.get();
+ } catch (final InterruptedException e) {
+ throw new RuntimeException(e);
+ } catch (final ExecutionException e) {
+ throw new IllegalStateException(id + ": Failed while getting source: " + sourceIdentifier, e);
+ }
+
+ return Futures.makeChecked(transformed, MAPPER);
+ }
+
+ /**
+ * Transform composite node to string schema representation and then to ASTSchemaSource
+ */
+ private static final class ResultToYangSourceTransformer implements
+ Function<RpcResult<CompositeNode>, YangTextSchemaSource> {
+
+ private final RemoteDeviceId id;
+ private final SourceIdentifier sourceIdentifier;
+ private final String moduleName;
+ private final Optional<String> revision;
+
+ public ResultToYangSourceTransformer(final RemoteDeviceId id, final SourceIdentifier sourceIdentifier,
+ final String moduleName, final Optional<String> revision) {
+ this.id = id;
+ this.sourceIdentifier = sourceIdentifier;
+ this.moduleName = moduleName;
+ this.revision = revision;
+ }
+
+ @Override
+ public YangTextSchemaSource apply(final RpcResult<CompositeNode> input) {
+
+ if (input.isSuccessful()) {
+
+ final Optional<String> schemaString = getSchemaFromRpc(id, input.getResult());
+
+ Preconditions.checkState(schemaString.isPresent(),
+ "%s: Unexpected response to get-schema, schema not present in message for: %s", id, sourceIdentifier);
+
+ logger.debug("{}: YANG Schema successfully retrieved for {}:{}", id, moduleName, revision);
+
+ return new NetconfYangTextSchemaSource(id, sourceIdentifier, schemaString);
+ }
+
+ logger.warn("{}: YANG schema was not successfully retrieved for {}. Errors: {}", id, sourceIdentifier,
+ input.getErrors());
+
+ throw new IllegalStateException(String.format(
+ "%s: YANG schema was not successfully retrieved for %s. Errors: %s", id, sourceIdentifier,
+ input.getErrors()));
+
+ }
+
+ }
+
+ private static class NetconfYangTextSchemaSource extends YangTextSchemaSource {
+ private final RemoteDeviceId id;
+ private final Optional<String> schemaString;
+
+ public NetconfYangTextSchemaSource(final RemoteDeviceId id, final SourceIdentifier sId, final Optional<String> schemaString) {
+ super(sId);
+ this.id = id;
+ this.schemaString = schemaString;
+ }
+
+ @Override
+ protected Objects.ToStringHelper addToStringAttributes(final Objects.ToStringHelper toStringHelper) {
+ return toStringHelper.add("device", id);
+ }
+
+ @Override
+ public InputStream openStream() throws IOException {
+ return IOUtils.toInputStream(schemaString.get());
+ }
+ }
+}
public class NetconfMessageTransformUtil {
+ public static final String MESSAGE_ID_ATTR = "message-id";
+
private NetconfMessageTransformUtil() {}
public static final QName IETF_NETCONF_MONITORING = QName.create(NetconfState.QNAME, "ietf-netconf-monitoring");
public static void checkValidReply(final NetconfMessage input, final NetconfMessage output)
throws NetconfDocumentedException {
- final String inputMsgId = input.getDocument().getDocumentElement().getAttribute("message-id");
- final String outputMsgId = output.getDocument().getDocumentElement().getAttribute("message-id");
+ final String inputMsgId = input.getDocument().getDocumentElement().getAttribute(MESSAGE_ID_ATTR);
+ final String outputMsgId = output.getDocument().getDocumentElement().getAttribute(MESSAGE_ID_ATTR);
if(inputMsgId.equals(outputMsgId) == false) {
Map<String,String> errorInfo = ImmutableMap.<String,String>builder()
package org.opendaylight.controller.sal.connect.netconf;
import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyCollectionOf;
import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import com.google.common.base.Optional;
+import com.google.common.collect.HashMultimap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
import java.io.InputStream;
import java.util.concurrent.Executors;
import org.junit.Test;
import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.sal.connect.api.MessageTransformer;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleImport;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaResolutionException;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistration;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public static final String TEST_NAMESPACE = "test:namespace";
public static final String TEST_MODULE = "test-module";
public static final String TEST_REVISION = "2013-07-22";
- private NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
+ public static final SourceIdentifier TEST_SID = new SourceIdentifier(TEST_MODULE, Optional.of(TEST_REVISION));
+ public static final String TEST_CAPABILITY = TEST_NAMESPACE + "?module=" + TEST_MODULE + "&revision=" + TEST_REVISION;
+
+ public static final SourceIdentifier TEST_SID2 = new SourceIdentifier(TEST_MODULE + "2", Optional.of(TEST_REVISION));
+ public static final String TEST_CAPABILITY2 = TEST_NAMESPACE + "?module=" + TEST_MODULE + "2" + "&revision=" + TEST_REVISION;
+
+ private static final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
@Override
public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
};
@Test
- public void testNetconfDeviceWithoutMonitoring() throws Exception {
+ public void testNetconfDeviceFailFirstSchemaFailSecondEmpty() throws Exception {
+ final ArrayList<String> capList = Lists.newArrayList(TEST_CAPABILITY);
+
final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
- final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), getMessageTransformer(), getSchemaContextProviderFactory(), getSourceProviderFactory(), stateSchemasResolver);
- device.onRemoteSessionUp(getSessionCaps(false, Collections.<String>emptyList()), listener);
+ final SchemaContextFactory schemaFactory = getSchemaFactory();
+
+ // Make fallback attempt to fail due to empty resolved sources
+ final SchemaResolutionException schemaResolutionException
+ = new SchemaResolutionException("fail first",
+ Collections.<SourceIdentifier>emptyList(), HashMultimap.<SourceIdentifier, ModuleImport>create());
+ doReturn(Futures.immediateFailedCheckedFuture(
+ schemaResolutionException))
+ .when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+
+ final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ // Monitoring not supported
+ final NetconfSessionCapabilities sessionCaps = getSessionCaps(false, capList);
+ device.onRemoteSessionUp(sessionCaps, listener);
Mockito.verify(facade, Mockito.timeout(5000)).onDeviceDisconnected();
+ Mockito.verify(listener, Mockito.timeout(5000)).close();
+ Mockito.verify(schemaFactory, times(1)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+ }
+
+ @Test
+ public void testNetconfDeviceMissingSource() throws Exception {
+ final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
+ final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+
+ final SchemaContextFactory schemaFactory = getSchemaFactory();
+
+ // Make fallback attempt to fail due to empty resolved sources
+ final MissingSchemaSourceException schemaResolutionException = new MissingSchemaSourceException("fail first", TEST_SID);
+ doAnswer(new Answer() {
+ @Override
+ public Object answer(final InvocationOnMock invocation) throws Throwable {
+ if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
+ return Futures.immediateFailedCheckedFuture(schemaResolutionException);
+ } else {
+ return Futures.immediateCheckedFuture(getSchema());
+ }
+ }
+ }).when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+
+ final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+ // Monitoring supported
+ final NetconfSessionCapabilities sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
+ device.onRemoteSessionUp(sessionCaps, listener);
+
+ Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ Mockito.verify(schemaFactory, times(2)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+ }
+
+ private SchemaSourceRegistry getSchemaRegistry() {
+ final SchemaSourceRegistry mock = mock(SchemaSourceRegistry.class);
+ final SchemaSourceRegistration mockReg = mock(SchemaSourceRegistration.class);
+ doNothing().when(mockReg).close();
+ doReturn(mockReg).when(mock).registerSchemaSource(any(org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider.class), any(PotentialSchemaSource.class));
+ return mock;
}
@Test
final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
- final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), messageTransformer, getSchemaContextProviderFactory(), getSourceProviderFactory(), stateSchemasResolver);
+
+ final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), getSchemaFactory(), stateSchemasResolver);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
device.onNotification(netconfMessage);
device.onNotification(netconfMessage);
verify(facade, times(0)).onNotification(any(CompositeNode.class));
final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
- Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&revision=" + TEST_REVISION));
+ Lists.newArrayList(TEST_CAPABILITY));
device.onRemoteSessionUp(sessionCaps, listener);
final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
- final SchemaContextProviderFactory schemaContextProviderFactory = getSchemaContextProviderFactory();
- final SchemaSourceProviderFactory<InputStream> sourceProviderFactory = getSourceProviderFactory();
+ final SchemaContextFactory schemaContextProviderFactory = getSchemaFactory();
final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
- final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), messageTransformer, schemaContextProviderFactory, sourceProviderFactory, stateSchemasResolver);
+ final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+ = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaContextProviderFactory, stateSchemasResolver);
+ final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&revision=" + TEST_REVISION));
device.onRemoteSessionUp(sessionCaps, listener);
- verify(sourceProviderFactory, timeout(5000)).createSourceProvider(any(RpcImplementation.class));
- verify(schemaContextProviderFactory, timeout(5000)).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
+ verify(schemaContextProviderFactory, timeout(5000)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
device.onRemoteSessionDown();
verify(facade, timeout(5000)).onDeviceDisconnected();
device.onRemoteSessionUp(sessionCaps, listener);
- verify(sourceProviderFactory, timeout(5000).times(2)).createSourceProvider(any(RpcImplementation.class));
- verify(schemaContextProviderFactory, timeout(5000).times(2)).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
+ verify(schemaContextProviderFactory, timeout(5000).times(2)).createSchemaContext(any(Collection.class));
verify(messageTransformer, timeout(5000).times(2)).onGlobalContextUpdated(any(SchemaContext.class));
- verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
}
- private SchemaContextProviderFactory getSchemaContextProviderFactory() {
- final SchemaContextProviderFactory schemaContextProviderFactory = mockClass(SchemaContextProviderFactory.class);
- doReturn(new SchemaContextProvider() {
- @Override
- public SchemaContext getSchemaContext() {
- return getSchema();
- }
- }).when(schemaContextProviderFactory).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
- return schemaContextProviderFactory;
+ private SchemaContextFactory getSchemaFactory() {
+ final SchemaContextFactory schemaFactory = mockClass(SchemaContextFactory.class);
+ doReturn(Futures.immediateCheckedFuture(getSchema())).when(schemaFactory).createSchemaContext(any(Collection.class));
+ return schemaFactory;
}
public static SchemaContext getSchema() {
private RemoteDeviceHandler<NetconfSessionCapabilities> getFacade() throws Exception {
final RemoteDeviceHandler<NetconfSessionCapabilities> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
- doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+ doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
doNothing().when(remoteDeviceHandler).onDeviceDisconnected();
doNothing().when(remoteDeviceHandler).onNotification(any(CompositeNode.class));
return remoteDeviceHandler;
}
private static <T> T mockClass(final Class<T> remoteDeviceHandlerClass) {
- final T mock = Mockito.mock(remoteDeviceHandlerClass);
+ final T mock = mock(remoteDeviceHandlerClass);
Mockito.doReturn(remoteDeviceHandlerClass.getSimpleName()).when(mock).toString();
return mock;
}
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-osgi_${scala.version}</artifactId>
</dependency>
+
+ <dependency>
+ <groupId>com.typesafe.akka</groupId>
+ <artifactId>akka-slf4j_${scala.version}</artifactId>
+ </dependency>
<!-- SAL Dependencies -->
<dependency>
<scope>test</scope>
</dependency>
- <dependency>
+ <dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>${slf4j.version}</version>
import akka.japi.Function;
import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
import org.opendaylight.controller.remote.rpc.registry.ClusterWrapper;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistryOld;
import org.opendaylight.controller.sal.core.api.Broker;
import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
import org.opendaylight.yangtools.yang.common.QName;
private void createRpcActors() {
LOG.debug("Create rpc registry and broker actors");
- rpcRegistry = getContext().actorOf(RpcRegistry.props(clusterWrapper), ActorConstants.RPC_REGISTRY);
+ rpcRegistry = getContext().actorOf(RpcRegistryOld.props(clusterWrapper), ActorConstants.RPC_REGISTRY);
rpcBroker = getContext().actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext), ActorConstants.RPC_BROKER);
}
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-
package org.opendaylight.controller.remote.rpc.registry;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-public class RoutingTable<I, R> {
-
- private final Logger LOG = LoggerFactory.getLogger(RoutingTable.class);
+import akka.actor.ActorRef;
+import akka.japi.Option;
+import akka.japi.Pair;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Copier;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
- private ConcurrentMap<I,R> globalRpcMap = new ConcurrentHashMap<>();
- private ConcurrentMap<I, LinkedHashSet<R>> routedRpcMap = new ConcurrentHashMap<>();
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
- public ConcurrentMap<I, R> getGlobalRpcMap() {
- return globalRpcMap;
- }
+public class RoutingTable implements Copier<RoutingTable>, Serializable {
- public ConcurrentMap<I, LinkedHashSet<R>> getRoutedRpcMap() {
- return routedRpcMap;
- }
+ private Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
+ private ActorRef router;
- public R getGlobalRoute(final I routeId) {
- Preconditions.checkNotNull(routeId, "getGlobalRoute: routeId cannot be null!");
- return globalRpcMap.get(routeId);
- }
+ @Override
+ public RoutingTable copy() {
+ RoutingTable copy = new RoutingTable();
+ copy.setTable(new HashMap<>(table));
+ copy.setRouter(this.getRouter());
- public void addGlobalRoute(final I routeId, final R route) {
- Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
- Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
- LOG.debug("addGlobalRoute: adding a new route with id[{}] and value [{}]", routeId, route);
- if(globalRpcMap.putIfAbsent(routeId, route) != null) {
- LOG.debug("A route already exist for route id [{}] ", routeId);
+ return copy;
}
- }
- public void removeGlobalRoute(final I routeId) {
- Preconditions.checkNotNull(routeId, "removeGlobalRoute: routeId cannot be null!");
- LOG.debug("removeGlobalRoute: removing a new route with id [{}]", routeId);
- globalRpcMap.remove(routeId);
- }
+ public Option<Pair<ActorRef, Long>> getRouterFor(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+ Long updatedTime = table.get(routeId);
- public Set<R> getRoutedRpc(final I routeId) {
- Preconditions.checkNotNull(routeId, "getRoutes: routeId cannot be null!");
- Set<R> routes = routedRpcMap.get(routeId);
-
- if (routes == null) {
- return Collections.emptySet();
+ if (updatedTime == null || router == null)
+ return Option.none();
+ else
+ return Option.option(new Pair<>(router, updatedTime));
}
- return ImmutableSet.copyOf(routes);
- }
-
- public R getLastAddedRoutedRpc(final I routeId) {
-
- Set<R> routes = getRoutedRpc(routeId);
-
- if (routes.isEmpty()) {
- return null;
+ public void addRoute(RpcRouter.RouteIdentifier<?,?,?> routeId){
+ table.put(routeId, System.currentTimeMillis());
}
- R route = null;
- Iterator<R> iter = routes.iterator();
- while (iter.hasNext()) {
- route = iter.next();
+ public void removeRoute(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+ table.remove(routeId);
}
- return route;
- }
-
- public void addRoutedRpc(final I routeId, final R route) {
- Preconditions.checkNotNull(routeId, "addRoute: routeId cannot be null");
- Preconditions.checkNotNull(route, "addRoute: route cannot be null");
- LOG.debug("addRoute: adding a route with k/v [{}/{}]", routeId, route);
- threadSafeAdd(routeId, route);
- }
-
- public void addRoutedRpcs(final Set<I> routeIds, final R route) {
- Preconditions.checkNotNull(routeIds, "addRoutes: routeIds must not be null");
- for (I routeId : routeIds){
- addRoutedRpc(routeId, route);
+ public Boolean contains(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+ return table.containsKey(routeId);
}
- }
- public void removeRoute(final I routeId, final R route) {
- Preconditions.checkNotNull(routeId, "removeRoute: routeId cannot be null!");
- Preconditions.checkNotNull(route, "removeRoute: route cannot be null!");
-
- LinkedHashSet<R> routes = routedRpcMap.get(routeId);
- if (routes == null) {
- return;
+ public Boolean isEmpty(){
+ return table.isEmpty();
}
- LOG.debug("removeRoute: removing a new route with k/v [{}/{}]", routeId, route);
- threadSafeRemove(routeId, route);
- }
-
- public void removeRoutes(final Set<I> routeIds, final R route) {
- Preconditions.checkNotNull(routeIds, "removeRoutes: routeIds must not be null");
- for (I routeId : routeIds){
- removeRoute(routeId, route);
+ ///
+ /// Getter, Setters
+ ///
+ //TODO: Remove public
+ public Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> getTable() {
+ return table;
}
- }
-
- /**
- * This method guarantees that no 2 thread over write each other's changes.
- * Just so that we dont end up in infinite loop, it tries for 100 times then throw
- */
- private void threadSafeAdd(final I routeId, final R route) {
- for (int i=0;i<100;i++){
-
- LinkedHashSet<R> updatedRoutes = new LinkedHashSet<>();
- updatedRoutes.add(route);
- LinkedHashSet<R> oldRoutes = routedRpcMap.putIfAbsent(routeId, updatedRoutes);
- if (oldRoutes == null) {
- return;
- }
+ void setTable(Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table) {
+ this.table = table;
+ }
- updatedRoutes = new LinkedHashSet<>(oldRoutes);
- updatedRoutes.add(route);
+ public ActorRef getRouter() {
+ return router;
+ }
- if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
- return;
- }
+ public void setRouter(ActorRef router) {
+ this.router = router;
}
- //the method did not already return means it failed to add route in 100 attempts
- throw new IllegalStateException("Failed to add route [" + routeId + "]");
- }
-
- /**
- * This method guarantees that no 2 thread over write each other's changes.
- * Just so that we dont end up in infinite loop, it tries for 100 times then throw
- */
- private void threadSafeRemove(final I routeId, final R route) {
- LinkedHashSet<R> updatedRoutes = null;
- for (int i=0;i<100;i++){
- LinkedHashSet<R> oldRoutes = routedRpcMap.get(routeId);
-
- // if route to be deleted is the only entry in the set then remove routeId from the cache
- if ((oldRoutes.size() == 1) && oldRoutes.contains(route)){
- routedRpcMap.remove(routeId);
- return;
- }
-
- // if there are multiple routes for this routeId, remove the route to be deleted only from the set.
- updatedRoutes = new LinkedHashSet<>(oldRoutes);
- updatedRoutes.remove(route);
- if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
- return;
- }
+ @Override
+ public String toString() {
+ return "RoutingTable{" +
+ "table=" + table +
+ ", router=" + router +
+ '}';
}
- //the method did not already return means it failed to remove route in 100 attempts
- throw new IllegalStateException("Failed to remove route [" + routeId + "]");
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+public class RoutingTableOld<I, R> {
+
+ private final Logger LOG = LoggerFactory.getLogger(RoutingTableOld.class);
+
+ private ConcurrentMap<I,R> globalRpcMap = new ConcurrentHashMap<>();
+ private ConcurrentMap<I, LinkedHashSet<R>> routedRpcMap = new ConcurrentHashMap<>();
+
+ public ConcurrentMap<I, R> getGlobalRpcMap() {
+ return globalRpcMap;
+ }
+
+ public ConcurrentMap<I, LinkedHashSet<R>> getRoutedRpcMap() {
+ return routedRpcMap;
+ }
+
+ public R getGlobalRoute(final I routeId) {
+ Preconditions.checkNotNull(routeId, "getGlobalRoute: routeId cannot be null!");
+ return globalRpcMap.get(routeId);
+ }
+
+ public void addGlobalRoute(final I routeId, final R route) {
+ Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
+ Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
+ LOG.debug("addGlobalRoute: adding a new route with id[{}] and value [{}]", routeId, route);
+ if(globalRpcMap.putIfAbsent(routeId, route) != null) {
+ LOG.debug("A route already exist for route id [{}] ", routeId);
+ }
+ }
+
+ public void removeGlobalRoute(final I routeId) {
+ Preconditions.checkNotNull(routeId, "removeGlobalRoute: routeId cannot be null!");
+ LOG.debug("removeGlobalRoute: removing a new route with id [{}]", routeId);
+ globalRpcMap.remove(routeId);
+ }
+
+ public Set<R> getRoutedRpc(final I routeId) {
+ Preconditions.checkNotNull(routeId, "getRoutes: routeId cannot be null!");
+ Set<R> routes = routedRpcMap.get(routeId);
+
+ if (routes == null) {
+ return Collections.emptySet();
+ }
+
+ return ImmutableSet.copyOf(routes);
+ }
+
+ public R getLastAddedRoutedRpc(final I routeId) {
+
+ Set<R> routes = getRoutedRpc(routeId);
+
+ if (routes.isEmpty()) {
+ return null;
+ }
+
+ R route = null;
+ Iterator<R> iter = routes.iterator();
+ while (iter.hasNext()) {
+ route = iter.next();
+ }
+
+ return route;
+ }
+
+ public void addRoutedRpc(final I routeId, final R route) {
+ Preconditions.checkNotNull(routeId, "addRoute: routeId cannot be null");
+ Preconditions.checkNotNull(route, "addRoute: route cannot be null");
+ LOG.debug("addRoute: adding a route with k/v [{}/{}]", routeId, route);
+ threadSafeAdd(routeId, route);
+ }
+
+ public void addRoutedRpcs(final Set<I> routeIds, final R route) {
+ Preconditions.checkNotNull(routeIds, "addRoutes: routeIds must not be null");
+ for (I routeId : routeIds){
+ addRoutedRpc(routeId, route);
+ }
+ }
+
+ public void removeRoute(final I routeId, final R route) {
+ Preconditions.checkNotNull(routeId, "removeRoute: routeId cannot be null!");
+ Preconditions.checkNotNull(route, "removeRoute: route cannot be null!");
+
+ LinkedHashSet<R> routes = routedRpcMap.get(routeId);
+ if (routes == null) {
+ return;
+ }
+ LOG.debug("removeRoute: removing a new route with k/v [{}/{}]", routeId, route);
+ threadSafeRemove(routeId, route);
+ }
+
+ public void removeRoutes(final Set<I> routeIds, final R route) {
+ Preconditions.checkNotNull(routeIds, "removeRoutes: routeIds must not be null");
+ for (I routeId : routeIds){
+ removeRoute(routeId, route);
+ }
+ }
+
+ /**
+ * This method guarantees that no 2 thread over write each other's changes.
+ * Just so that we dont end up in infinite loop, it tries for 100 times then throw
+ */
+ private void threadSafeAdd(final I routeId, final R route) {
+
+ for (int i=0;i<100;i++){
+
+ LinkedHashSet<R> updatedRoutes = new LinkedHashSet<>();
+ updatedRoutes.add(route);
+ LinkedHashSet<R> oldRoutes = routedRpcMap.putIfAbsent(routeId, updatedRoutes);
+ if (oldRoutes == null) {
+ return;
+ }
+
+ updatedRoutes = new LinkedHashSet<>(oldRoutes);
+ updatedRoutes.add(route);
+
+ if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
+ return;
+ }
+ }
+ //the method did not already return means it failed to add route in 100 attempts
+ throw new IllegalStateException("Failed to add route [" + routeId + "]");
+ }
+
+ /**
+ * This method guarantees that no 2 thread over write each other's changes.
+ * Just so that we dont end up in infinite loop, it tries for 100 times then throw
+ */
+ private void threadSafeRemove(final I routeId, final R route) {
+ LinkedHashSet<R> updatedRoutes = null;
+ for (int i=0;i<100;i++){
+ LinkedHashSet<R> oldRoutes = routedRpcMap.get(routeId);
+
+ // if route to be deleted is the only entry in the set then remove routeId from the cache
+ if ((oldRoutes.size() == 1) && oldRoutes.contains(route)){
+ routedRpcMap.remove(routeId);
+ return;
+ }
+
+ // if there are multiple routes for this routeId, remove the route to be deleted only from the set.
+ updatedRoutes = new LinkedHashSet<>(oldRoutes);
+ updatedRoutes.remove(route);
+ if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
+ return;
+ }
+
+ }
+ //the method did not already return means it failed to remove route in 100 attempts
+ throw new IllegalStateException("Failed to remove route [" + routeId + "]");
+ }
+}
*/
package org.opendaylight.controller.remote.rpc.registry;
-import akka.actor.ActorSelection;
+import akka.actor.ActorRef;
import akka.actor.Address;
import akka.actor.Props;
-import akka.cluster.ClusterEvent;
-import akka.cluster.Member;
-import akka.japi.Creator;
-import org.opendaylight.controller.remote.rpc.AbstractUntypedActor;
-import org.opendaylight.controller.remote.rpc.ActorConstants;
-import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.AddRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.GetRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
-import org.opendaylight.controller.remote.rpc.messages.RoutingTableData;
+import akka.actor.UntypedActor;
+import akka.dispatch.Mapper;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Option;
+import akka.japi.Pair;
+import akka.pattern.Patterns;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.collection.JavaConversions;
+import scala.concurrent.Future;
-import java.util.LinkedHashSet;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
/**
- * This Actor maintains the routing table state and sync it with other nodes in the cluster.
- *
- * A scheduler runs after an interval of time, which pick a random member from the cluster
- * and send the current state of routing table to the member.
- *
- * when a message of routing table data is received, it gets merged with the local routing table
- * to keep the latest data.
+ * Registry to look up cluster nodes that have registered for a given rpc.
+ * <p/>
+ * It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} to maintain this
+ * cluster wide information.
*/
+public class RpcRegistry extends UntypedActor {
+
+ final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+ /**
+ * Store to keep the registry. Bucket store sync's it across nodes in the cluster
+ */
+ private ActorRef bucketStore;
-public class RpcRegistry extends AbstractUntypedActor {
-
- private static final Logger LOG = LoggerFactory.getLogger(RpcRegistry.class);
- private RoutingTable<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable;
- private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
- private final ClusterWrapper clusterWrapper;
- private final ScheduledFuture<?> syncScheduler;
-
- private RpcRegistry(ClusterWrapper clusterWrapper){
- this.routingTable = new RoutingTable<>();
- this.clusterWrapper = clusterWrapper;
- this.syncScheduler = scheduler.scheduleAtFixedRate(new SendRoutingTable(), 10, 10, TimeUnit.SECONDS);
- }
-
- public static Props props(final ClusterWrapper clusterWrapper){
- return Props.create(new Creator<RpcRegistry>(){
-
- @Override
- public RpcRegistry create() throws Exception {
- return new RpcRegistry(clusterWrapper);
- }
- });
- }
-
- @Override
- protected void handleReceive(Object message) throws Exception {
- LOG.debug("Received message {}", message);
- if(message instanceof RoutingTableData) {
- syncRoutingTable((RoutingTableData) message);
- } else if(message instanceof GetRoutedRpc) {
- getRoutedRpc((GetRoutedRpc) message);
- } else if(message instanceof GetRpc) {
- getRpc((GetRpc) message);
- } else if(message instanceof AddRpc) {
- addRpc((AddRpc) message);
- } else if(message instanceof RemoveRpc) {
- removeRpc((RemoveRpc) message);
- } else if(message instanceof AddRoutedRpc) {
- addRoutedRpc((AddRoutedRpc) message);
- } else if(message instanceof RemoveRoutedRpc) {
- removeRoutedRpc((RemoveRoutedRpc) message);
+ /**
+ * Rpc broker that would use the registry to route requests.
+ */
+ private ActorRef localRouter;
+
+ public RpcRegistry() {
+ bucketStore = getContext().actorOf(Props.create(BucketStore.class), "store");
+ }
+
+ public RpcRegistry(ActorRef bucketStore) {
+ this.bucketStore = bucketStore;
}
- }
- private void getRoutedRpc(GetRoutedRpc rpcMsg){
- LOG.debug("Get latest routed Rpc location from routing table {}", rpcMsg);
- String remoteActorPath = routingTable.getLastAddedRoutedRpc(rpcMsg.getRouteId());
- GetRoutedRpcReply routedRpcReply = new GetRoutedRpcReply(remoteActorPath);
+ @Override
+ public void onReceive(Object message) throws Exception {
+
+ log.debug("Received message: message [{}]", message);
- getSender().tell(routedRpcReply, self());
- }
+ //TODO: if sender is remote, reject message
- private void getRpc(GetRpc rpcMsg) {
- LOG.debug("Get global Rpc location from routing table {}", rpcMsg);
- String remoteActorPath = routingTable.getGlobalRoute(rpcMsg.getRouteId());
- GetRpcReply rpcReply = new GetRpcReply(remoteActorPath);
+ if (message instanceof SetLocalRouter)
+ receiveSetLocalRouter((SetLocalRouter) message);
- getSender().tell(rpcReply, self());
- }
+ if (message instanceof AddOrUpdateRoutes)
+ receiveAddRoutes((AddOrUpdateRoutes) message);
- private void addRpc(AddRpc rpcMsg) {
- LOG.debug("Add Rpc to routing table {}", rpcMsg);
- routingTable.addGlobalRoute(rpcMsg.getRouteId(), rpcMsg.getActorPath());
+ else if (message instanceof RemoveRoutes)
+ receiveRemoveRoutes((RemoveRoutes) message);
- getSender().tell("Success", self());
- }
+ else if (message instanceof Messages.FindRouters)
+ receiveGetRouter((FindRouters) message);
- private void removeRpc(RemoveRpc rpcMsg) {
- LOG.debug("Removing Rpc to routing table {}", rpcMsg);
- routingTable.removeGlobalRoute(rpcMsg.getRouteId());
+ else
+ unhandled(message);
+ }
+
+ /**
+ * Register's rpc broker
+ *
+ * @param message contains {@link akka.actor.ActorRef} for rpc broker
+ */
+ private void receiveSetLocalRouter(SetLocalRouter message) {
+ localRouter = message.getRouter();
+ }
- getSender().tell("Success", self());
- }
+ /**
+ * @param msg
+ */
+ private void receiveAddRoutes(AddOrUpdateRoutes msg) {
- private void addRoutedRpc(AddRoutedRpc rpcMsg) {
- routingTable.addRoutedRpcs(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
- getSender().tell("Success", self());
- }
+ Preconditions.checkState(localRouter != null, "Router must be set first");
+
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), 1000);
+ futureReply.map(getMapperToAddRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
+ }
- private void removeRoutedRpc(RemoveRoutedRpc rpcMsg) {
- routingTable.removeRoutes(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
- getSender().tell("Success", self());
- }
+ /**
+ * @param msg contains list of route ids to remove
+ */
+ private void receiveRemoveRoutes(RemoveRoutes msg) {
- private void syncRoutingTable(RoutingTableData routingTableData) {
- LOG.debug("Syncing routing table {}", routingTableData);
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), 1000);
+ futureReply.map(getMapperToRemoveRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
- Map<RpcRouter.RouteIdentifier<?, ?, ?>, String> newRpcMap = routingTableData.getRpcMap();
- Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = newRpcMap.keySet();
- for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
- routingTable.addGlobalRoute(routeId, newRpcMap.get(routeId));
}
- Map<RpcRouter.RouteIdentifier<?, ?, ?>, LinkedHashSet<String>> newRoutedRpcMap =
- routingTableData.getRoutedRpcMap();
- routeIds = newRoutedRpcMap.keySet();
+ /**
+ * Finds routers for the given rpc.
+ *
+ * @param msg
+ */
+ private void receiveGetRouter(FindRouters msg) {
+ final ActorRef sender = getSender();
- for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
- Set<String> routeAddresses = newRoutedRpcMap.get(routeId);
- for(String routeAddress : routeAddresses) {
- routingTable.addRoutedRpc(routeId, routeAddress);
- }
+ Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), 1000);
+ futureReply.map(getMapperToGetRouter(msg.getRouteIdentifier(), sender), getContext().dispatcher());
}
- }
-
- private ActorSelection getRandomRegistryActor() {
- ClusterEvent.CurrentClusterState clusterState = clusterWrapper.getState();
- ActorSelection actor = null;
- Set<Member> members = JavaConversions.asJavaSet(clusterState.members());
- int memberSize = members.size();
- // Don't select yourself
- if(memberSize > 1) {
- Address currentNodeAddress = clusterWrapper.getAddress();
- int index = new Random().nextInt(memberSize);
- int i = 0;
- // keeping previous member, in case when random index member is same as current actor
- // and current actor member is last in set
- Member previousMember = null;
- for(Member member : members){
- if(i == index-1) {
- previousMember = member;
- }
- if(i == index) {
- if(!currentNodeAddress.equals(member.address())) {
- actor = this.context().actorSelection(member.address() + ActorConstants.RPC_REGISTRY_PATH);
- break;
- } else if(index < memberSize-1){ // pick the next element in the set
- index++;
- }
+
+ /**
+ * Helper to create empty reply when no routers are found
+ *
+ * @return
+ */
+ private Messages.FindRoutersReply createEmptyReply() {
+ List<Pair<ActorRef, Long>> routerWithUpdateTime = Collections.emptyList();
+ return new Messages.FindRoutersReply(routerWithUpdateTime);
+ }
+
+ /**
+ * Helper to create a reply when routers are found for the given rpc
+ *
+ * @param buckets
+ * @param routeId
+ * @return
+ */
+ private Messages.FindRoutersReply createReplyWithRouters(Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
+
+ List<Pair<ActorRef, Long>> routers = new ArrayList<>();
+ Option<Pair<ActorRef, Long>> routerWithUpdateTime = null;
+
+ for (Bucket bucket : buckets.values()) {
+
+ RoutingTable table = (RoutingTable) bucket.getData();
+ if (table == null)
+ continue;
+
+ routerWithUpdateTime = table.getRouterFor(routeId);
+ if (routerWithUpdateTime.isEmpty())
+ continue;
+
+ routers.add(routerWithUpdateTime.get());
}
- i++;
- }
- if(actor == null && previousMember != null) {
- actor = this.context().actorSelection(previousMember.address() + ActorConstants.RPC_REGISTRY_PATH);
- }
+
+ return new Messages.FindRoutersReply(routers);
}
- return actor;
- }
- private class SendRoutingTable implements Runnable {
- @Override
- public void run() {
- RoutingTableData routingTableData =
- new RoutingTableData(routingTable.getGlobalRpcMap(), routingTable.getRoutedRpcMap());
- LOG.debug("Sending routing table for sync {}", routingTableData);
- ActorSelection actor = getRandomRegistryActor();
- if(actor != null) {
- actor.tell(routingTableData, self());
- }
+ ///
+ ///private factories to create Mapper
+ ///
+
+ /**
+ * Receives all buckets returned from bucket store and finds routers for the buckets where given rpc(routeId) is found
+ *
+ * @param routeId the rpc
+ * @param sender client who asked to find the routers.
+ * @return
+ */
+ private Mapper<Object, Void> getMapperToGetRouter(final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object replyMessage) {
+
+ if (replyMessage instanceof GetAllBucketsReply) {
+
+ GetAllBucketsReply reply = (GetAllBucketsReply) replyMessage;
+ Map<Address, Bucket> buckets = reply.getBuckets();
+
+ if (buckets == null || buckets.isEmpty()) {
+ sender.tell(createEmptyReply(), getSelf());
+ return null;
+ }
+
+ sender.tell(createReplyWithRouters(buckets, routeId), getSelf());
+ }
+ return null;
+ }
+ };
+ }
+
+ /**
+ * Receives local bucket from bucket store and updates routing table in it by removing the route. Subsequently,
+ * it updates the local bucket in bucket store.
+ *
+ * @param routeIds rpc to remote
+ * @return
+ */
+ private Mapper<Object, Void> getMapperToRemoveRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object replyMessage) {
+ if (replyMessage instanceof GetLocalBucketReply) {
+
+ GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
+ Bucket<RoutingTable> bucket = reply.getBucket();
+
+ if (bucket == null) {
+ log.debug("Local bucket is null");
+ return null;
+ }
+
+ RoutingTable table = bucket.getData();
+ if (table == null)
+ table = new RoutingTable();
+
+ table.setRouter(localRouter);
+
+ if (!table.isEmpty()) {
+ for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+ table.removeRoute(routeId);
+ }
+ }
+ bucket.setData(table);
+
+ UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
+ bucketStore.tell(updateBucketMessage, getSelf());
+ }
+ return null;
+ }
+ };
+ }
+
+ /**
+ * Receives local bucket from bucket store and updates routing table in it by adding the route. Subsequently,
+ * it updates the local bucket in bucket store.
+ *
+ * @param routeIds rpc to add
+ * @return
+ */
+ private Mapper<Object, Void> getMapperToAddRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
+
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object replyMessage) {
+ if (replyMessage instanceof GetLocalBucketReply) {
+
+ GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
+ Bucket<RoutingTable> bucket = reply.getBucket();
+
+ if (bucket == null) {
+ log.debug("Local bucket is null");
+ return null;
+ }
+
+ RoutingTable table = bucket.getData();
+ if (table == null)
+ table = new RoutingTable();
+
+ table.setRouter(localRouter);
+ for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+ table.addRoute(routeId);
+ }
+
+ bucket.setData(table);
+
+ UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
+ bucketStore.tell(updateBucketMessage, getSelf());
+ }
+
+ return null;
+ }
+ };
+ }
+
+ /**
+ * All messages used by the RpcRegistry
+ */
+ public static class Messages {
+
+
+ public static class ContainsRoute {
+ final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers;
+
+ public ContainsRoute(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+ Preconditions.checkArgument(routeIdentifiers != null &&
+ !routeIdentifiers.isEmpty(),
+ "Route Identifiers must be supplied");
+ this.routeIdentifiers = routeIdentifiers;
+ }
+
+ public List<RpcRouter.RouteIdentifier<?, ?, ?>> getRouteIdentifiers() {
+ return this.routeIdentifiers;
+ }
+
+ @Override
+ public String toString() {
+ return "ContainsRoute{" +
+ "routeIdentifiers=" + routeIdentifiers +
+ '}';
+ }
+ }
+
+ public static class AddOrUpdateRoutes extends ContainsRoute {
+
+ public AddOrUpdateRoutes(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+ super(routeIdentifiers);
+ }
+ }
+
+ public static class RemoveRoutes extends ContainsRoute {
+
+ public RemoveRoutes(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+ super(routeIdentifiers);
+ }
+ }
+
+ public static class SetLocalRouter {
+ private final ActorRef router;
+
+ public SetLocalRouter(ActorRef router) {
+ Preconditions.checkArgument(router != null, "Router must not be null");
+ this.router = router;
+ }
+
+ public ActorRef getRouter() {
+ return this.router;
+ }
+
+ @Override
+ public String toString() {
+ return "SetLocalRouter{" +
+ "router=" + router +
+ '}';
+ }
+ }
+
+ public static class FindRouters {
+ private final RpcRouter.RouteIdentifier<?, ?, ?> routeIdentifier;
+
+ public FindRouters(RpcRouter.RouteIdentifier<?, ?, ?> routeIdentifier) {
+ Preconditions.checkArgument(routeIdentifier != null, "Route must not be null");
+ this.routeIdentifier = routeIdentifier;
+ }
+
+ public RpcRouter.RouteIdentifier<?, ?, ?> getRouteIdentifier() {
+ return routeIdentifier;
+ }
+
+ @Override
+ public String toString() {
+ return "FindRouters{" +
+ "routeIdentifier=" + routeIdentifier +
+ '}';
+ }
+ }
+
+ public static class FindRoutersReply {
+ final List<Pair<ActorRef, Long>> routerWithUpdateTime;
+
+ public FindRoutersReply(List<Pair<ActorRef, Long>> routerWithUpdateTime) {
+ Preconditions.checkArgument(routerWithUpdateTime != null, "List of routers found must not be null");
+ this.routerWithUpdateTime = routerWithUpdateTime;
+ }
+
+ public List<Pair<ActorRef, Long>> getRouterWithUpdateTime() {
+ return routerWithUpdateTime;
+ }
+
+ @Override
+ public String toString() {
+ return "FindRoutersReply{" +
+ "routerWithUpdateTime=" + routerWithUpdateTime +
+ '}';
+ }
+ }
}
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import akka.actor.ActorSelection;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.cluster.ClusterEvent;
+import akka.cluster.Member;
+import akka.japi.Creator;
+import org.opendaylight.controller.remote.rpc.AbstractUntypedActor;
+import org.opendaylight.controller.remote.rpc.ActorConstants;
+import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.AddRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.GetRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
+import org.opendaylight.controller.remote.rpc.messages.RoutingTableData;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.collection.JavaConversions;
+
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This Actor maintains the routing table state and sync it with other nodes in the cluster.
+ *
+ * A scheduler runs after an interval of time, which pick a random member from the cluster
+ * and send the current state of routing table to the member.
+ *
+ * when a message of routing table data is received, it gets merged with the local routing table
+ * to keep the latest data.
+ */
+
+public class RpcRegistryOld extends AbstractUntypedActor {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RpcRegistryOld.class);
+ private RoutingTableOld<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable;
+ private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
+ private final ClusterWrapper clusterWrapper;
+ private final ScheduledFuture<?> syncScheduler;
+
+ private RpcRegistryOld(ClusterWrapper clusterWrapper){
+ this.routingTable = new RoutingTableOld<>();
+ this.clusterWrapper = clusterWrapper;
+ this.syncScheduler = scheduler.scheduleAtFixedRate(new SendRoutingTable(), 10, 10, TimeUnit.SECONDS);
+ }
+
+ public static Props props(final ClusterWrapper clusterWrapper){
+ return Props.create(new Creator<RpcRegistryOld>(){
+
+ @Override
+ public RpcRegistryOld create() throws Exception {
+ return new RpcRegistryOld(clusterWrapper);
+ }
+ });
+ }
+
+ @Override
+ protected void handleReceive(Object message) throws Exception {
+ LOG.debug("Received message {}", message);
+ if(message instanceof RoutingTableData) {
+ syncRoutingTable((RoutingTableData) message);
+ } else if(message instanceof GetRoutedRpc) {
+ getRoutedRpc((GetRoutedRpc) message);
+ } else if(message instanceof GetRpc) {
+ getRpc((GetRpc) message);
+ } else if(message instanceof AddRpc) {
+ addRpc((AddRpc) message);
+ } else if(message instanceof RemoveRpc) {
+ removeRpc((RemoveRpc) message);
+ } else if(message instanceof AddRoutedRpc) {
+ addRoutedRpc((AddRoutedRpc) message);
+ } else if(message instanceof RemoveRoutedRpc) {
+ removeRoutedRpc((RemoveRoutedRpc) message);
+ }
+ }
+
+ private void getRoutedRpc(GetRoutedRpc rpcMsg){
+ LOG.debug("Get latest routed Rpc location from routing table {}", rpcMsg);
+ String remoteActorPath = routingTable.getLastAddedRoutedRpc(rpcMsg.getRouteId());
+ GetRoutedRpcReply routedRpcReply = new GetRoutedRpcReply(remoteActorPath);
+
+ getSender().tell(routedRpcReply, self());
+ }
+
+ private void getRpc(GetRpc rpcMsg) {
+ LOG.debug("Get global Rpc location from routing table {}", rpcMsg);
+ String remoteActorPath = routingTable.getGlobalRoute(rpcMsg.getRouteId());
+ GetRpcReply rpcReply = new GetRpcReply(remoteActorPath);
+
+ getSender().tell(rpcReply, self());
+ }
+
+ private void addRpc(AddRpc rpcMsg) {
+ LOG.debug("Add Rpc to routing table {}", rpcMsg);
+ routingTable.addGlobalRoute(rpcMsg.getRouteId(), rpcMsg.getActorPath());
+
+ getSender().tell("Success", self());
+ }
+
+ private void removeRpc(RemoveRpc rpcMsg) {
+ LOG.debug("Removing Rpc to routing table {}", rpcMsg);
+ routingTable.removeGlobalRoute(rpcMsg.getRouteId());
+
+ getSender().tell("Success", self());
+ }
+
+ private void addRoutedRpc(AddRoutedRpc rpcMsg) {
+ routingTable.addRoutedRpcs(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
+ getSender().tell("Success", self());
+ }
+
+ private void removeRoutedRpc(RemoveRoutedRpc rpcMsg) {
+ routingTable.removeRoutes(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
+ getSender().tell("Success", self());
+ }
+
+ private void syncRoutingTable(RoutingTableData routingTableData) {
+ LOG.debug("Syncing routing table {}", routingTableData);
+
+ Map<RpcRouter.RouteIdentifier<?, ?, ?>, String> newRpcMap = routingTableData.getRpcMap();
+ Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = newRpcMap.keySet();
+ for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+ routingTable.addGlobalRoute(routeId, newRpcMap.get(routeId));
+ }
+
+ Map<RpcRouter.RouteIdentifier<?, ?, ?>, LinkedHashSet<String>> newRoutedRpcMap =
+ routingTableData.getRoutedRpcMap();
+ routeIds = newRoutedRpcMap.keySet();
+
+ for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+ Set<String> routeAddresses = newRoutedRpcMap.get(routeId);
+ for(String routeAddress : routeAddresses) {
+ routingTable.addRoutedRpc(routeId, routeAddress);
+ }
+ }
+ }
+
+ private ActorSelection getRandomRegistryActor() {
+ ClusterEvent.CurrentClusterState clusterState = clusterWrapper.getState();
+ ActorSelection actor = null;
+ Set<Member> members = JavaConversions.asJavaSet(clusterState.members());
+ int memberSize = members.size();
+ // Don't select yourself
+ if(memberSize > 1) {
+ Address currentNodeAddress = clusterWrapper.getAddress();
+ int index = new Random().nextInt(memberSize);
+ int i = 0;
+ // keeping previous member, in case when random index member is same as current actor
+ // and current actor member is last in set
+ Member previousMember = null;
+ for(Member member : members){
+ if(i == index-1) {
+ previousMember = member;
+ }
+ if(i == index) {
+ if(!currentNodeAddress.equals(member.address())) {
+ actor = this.context().actorSelection(member.address() + ActorConstants.RPC_REGISTRY_PATH);
+ break;
+ } else if(index < memberSize-1){ // pick the next element in the set
+ index++;
+ }
+ }
+ i++;
+ }
+ if(actor == null && previousMember != null) {
+ actor = this.context().actorSelection(previousMember.address() + ActorConstants.RPC_REGISTRY_PATH);
+ }
+ }
+ return actor;
+ }
+
+ private class SendRoutingTable implements Runnable {
+
+ @Override
+ public void run() {
+ RoutingTableData routingTableData =
+ new RoutingTableData(routingTable.getGlobalRpcMap(), routingTable.getRoutedRpcMap());
+ LOG.debug("Sending routing table for sync {}", routingTableData);
+ ActorSelection actor = getRandomRegistryActor();
+ if(actor != null) {
+ actor.tell(routingTableData, self());
+ }
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+
+public interface Bucket<T extends Copier<T>> {
+ public Long getVersion();
+ public T getData();
+ public void setData(T data);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import java.io.Serializable;
+
+public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
+
+ private Long version = System.currentTimeMillis();;
+
+ private T data;
+
+ @Override
+ public Long getVersion() {
+ return version;
+ }
+
+ @Override
+ public T getData() {
+ if (this.data == null)
+ return null;
+
+ return data.copy();
+ }
+
+ public void setData(T data){
+ this.version = System.currentTimeMillis()+1;
+ this.data = data;
+ }
+
+ @Override
+ public String toString() {
+ return "BucketImpl{" +
+ "version=" + version +
+ ", data=" + data +
+ '}';
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.actor.UntypedActor;
+import akka.cluster.Cluster;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+
+/**
+ * A store that syncs its data across nodes in the cluster.
+ * It maintains a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket} per node. Buckets are versioned.
+ * A node can write ONLY to its bucket. This way, write conflicts are avoided.
+ * <p>
+ * Buckets are sync'ed across nodes using Gossip protocol (http://en.wikipedia.org/wiki/Gossip_protocol)<p>
+ * This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
+ *
+ */
+public class BucketStore extends UntypedActor {
+
+ final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+ /**
+ * Bucket owned by the node
+ */
+ private BucketImpl localBucket = new BucketImpl();;
+
+ /**
+ * Buckets ownded by other known nodes in the cluster
+ */
+ private ConcurrentMap<Address, Bucket> remoteBuckets = new ConcurrentHashMap<>();
+
+ /**
+ * Bucket version for every known node in the cluster including this node
+ */
+ private ConcurrentMap<Address, Long> versions = new ConcurrentHashMap<>();
+
+ /**
+ * Cluster address for this node
+ */
+ private final Address selfAddress = Cluster.get(getContext().system()).selfAddress();
+
+ /**
+ * Our private gossiper
+ */
+ private ActorRef gossiper;
+
+ public BucketStore(){
+ gossiper = getContext().actorOf(Props.create(Gossiper.class), "gossiper");
+ }
+
+ /**
+ * This constructor is useful for testing.
+ * TODO: Pass Props instead of ActorRef
+ *
+ * @param gossiper
+ */
+ public BucketStore(ActorRef gossiper){
+ this.gossiper = gossiper;
+ }
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+
+ log.debug("Received message: node[{}], message[{}]", selfAddress, message);
+
+ if (message instanceof UpdateBucket)
+ receiveUpdateBucket(((UpdateBucket) message).getBucket());
+
+ else if (message instanceof GetAllBuckets)
+ receiveGetAllBucket();
+
+ else if (message instanceof GetLocalBucket)
+ receiveGetLocalBucket();
+
+ else if (message instanceof GetBucketsByMembers)
+ receiveGetBucketsByMembers(((GetBucketsByMembers) message).getMembers());
+
+ else if (message instanceof GetBucketVersions)
+ receiveGetBucketVersions();
+
+ else if (message instanceof UpdateRemoteBuckets)
+ receiveUpdateRemoteBuckets(((UpdateRemoteBuckets) message).getBuckets());
+
+ else {
+ log.debug("Unhandled message [{}]", message);
+ unhandled(message);
+ }
+
+ }
+
+ /**
+ * Returns a copy of bucket owned by this node
+ */
+ private void receiveGetLocalBucket() {
+ final ActorRef sender = getSender();
+ GetLocalBucketReply reply = new GetLocalBucketReply(localBucket);
+ sender.tell(reply, getSelf());
+ }
+
+ /**
+ * Updates the bucket owned by this node
+ *
+ * @param updatedBucket
+ */
+ void receiveUpdateBucket(Bucket updatedBucket){
+
+ localBucket = (BucketImpl) updatedBucket;
+ versions.put(selfAddress, localBucket.getVersion());
+ }
+
+ /**
+ * Returns all the buckets the this node knows about, self owned + remote
+ */
+ void receiveGetAllBucket(){
+ final ActorRef sender = getSender();
+ sender.tell(new GetAllBucketsReply(getAllBuckets()), getSelf());
+ }
+
+ /**
+ * Helper to collect all known buckets
+ *
+ * @return self owned + remote buckets
+ */
+ Map<Address, Bucket> getAllBuckets(){
+ Map<Address, Bucket> all = new HashMap<>(remoteBuckets.size() + 1);
+
+ //first add the local bucket
+ all.put(selfAddress, localBucket);
+
+ //then get all remote buckets
+ all.putAll(remoteBuckets);
+
+ return all;
+ }
+
+ /**
+ * Returns buckets for requested members that this node knows about
+ *
+ * @param members requested members
+ */
+ void receiveGetBucketsByMembers(Set<Address> members){
+ final ActorRef sender = getSender();
+ Map<Address, Bucket> buckets = getBucketsByMembers(members);
+ sender.tell(new GetBucketsByMembersReply(buckets), getSelf());
+ }
+
+ /**
+ * Helper to collect buckets for requested memebers
+ *
+ * @param members requested members
+ * @return buckets for requested memebers
+ */
+ Map<Address, Bucket> getBucketsByMembers(Set<Address> members) {
+ Map<Address, Bucket> buckets = new HashMap<>();
+
+ //first add the local bucket if asked
+ if (members.contains(selfAddress))
+ buckets.put(selfAddress, localBucket);
+
+ //then get buckets for requested remote nodes
+ for (Address address : members){
+ if (remoteBuckets.containsKey(address))
+ buckets.put(address, remoteBuckets.get(address));
+ }
+
+ return buckets;
+ }
+
+ /**
+ * Returns versions for all buckets known
+ */
+ void receiveGetBucketVersions(){
+ final ActorRef sender = getSender();
+ GetBucketVersionsReply reply = new GetBucketVersionsReply(versions);
+ sender.tell(reply, getSelf());
+ }
+
+ /**
+ * Update local copy of remote buckets where local copy's version is older
+ *
+ * @param receivedBuckets buckets sent by remote
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}
+ */
+ void receiveUpdateRemoteBuckets(Map<Address, Bucket> receivedBuckets){
+
+ if (receivedBuckets == null || receivedBuckets.isEmpty())
+ return; //nothing to do
+
+ //Remote cant update self's bucket
+ receivedBuckets.remove(selfAddress);
+
+ for (Map.Entry<Address, Bucket> entry : receivedBuckets.entrySet()){
+
+ Long localVersion = versions.get(entry.getKey());
+ if (localVersion == null) localVersion = -1L;
+
+ Bucket receivedBucket = entry.getValue();
+
+ if (receivedBucket == null)
+ continue;
+
+ Long remoteVersion = receivedBucket.getVersion();
+ if (remoteVersion == null) remoteVersion = -1L;
+
+ //update only if remote version is newer
+ if ( remoteVersion > localVersion ) {
+ remoteBuckets.put(entry.getKey(), receivedBucket);
+ versions.put(entry.getKey(), remoteVersion);
+ }
+ }
+
+ log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+ }
+
+ ///
+ ///Getter Setters
+ ///
+
+ BucketImpl getLocalBucket() {
+ return localBucket;
+ }
+
+ void setLocalBucket(BucketImpl localBucket) {
+ this.localBucket = localBucket;
+ }
+
+ ConcurrentMap<Address, Bucket> getRemoteBuckets() {
+ return remoteBuckets;
+ }
+
+ void setRemoteBuckets(ConcurrentMap<Address, Bucket> remoteBuckets) {
+ this.remoteBuckets = remoteBuckets;
+ }
+
+ ConcurrentMap<Address, Long> getVersions() {
+ return versions;
+ }
+
+ void setVersions(ConcurrentMap<Address, Long> versions) {
+ this.versions = versions;
+ }
+
+ Address getSelfAddress() {
+ return selfAddress;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+/**
+ * Type of data that goes in {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket}.
+ * The implementers should do deep cloning in copy() method.
+ */
+public interface Copier<T> {
+ public T copy();
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Address;
+import akka.actor.Cancellable;
+import akka.actor.UntypedActor;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent;
+import akka.cluster.Member;
+import akka.dispatch.Mapper;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.pattern.Patterns;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+
+/**
+ * Gossiper that syncs bucket store across nodes in the cluster.
+ * <p/>
+ * It keeps a local scheduler that periodically sends Gossip ticks to
+ * itself to send bucket store's bucket versions to a randomly selected remote
+ * gossiper.
+ * <p/>
+ * When bucket versions are received from a remote gossiper, it is compared
+ * with bucket store's bucket versions. Which ever buckets are newer
+ * locally, are sent to remote gossiper. If any bucket is older in bucket store,
+ * a gossip status is sent to remote gossiper so that it can send the newer buckets.
+ * <p/>
+ * When a bucket is received from a remote gossiper, its sent to the bucket store
+ * for update.
+ *
+ */
+
+public class Gossiper extends UntypedActor {
+
+ final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+ Cluster cluster = Cluster.get(getContext().system());
+
+ /**
+ * ActorSystem's address for the current cluster node.
+ */
+ private Address selfAddress = cluster.selfAddress();
+
+ /**
+ * All known cluster members
+ */
+ private List<Address> clusterMembers = new ArrayList<>();
+
+ private Cancellable gossipTask;
+
+ private Boolean autoStartGossipTicks = true;
+
+ public Gossiper(){}
+
+ /**
+ * Helpful for testing
+ * @param autoStartGossipTicks used for turning off gossip ticks during testing.
+ * Gossip tick can be manually sent.
+ */
+ public Gossiper(Boolean autoStartGossipTicks){
+ this.autoStartGossipTicks = autoStartGossipTicks;
+ }
+
+ @Override
+ public void preStart(){
+
+ cluster.subscribe(getSelf(),
+ ClusterEvent.initialStateAsEvents(),
+ ClusterEvent.MemberEvent.class,
+ ClusterEvent.UnreachableMember.class);
+
+ if (autoStartGossipTicks) {
+ gossipTask = getContext().system().scheduler().schedule(
+ new FiniteDuration(1, TimeUnit.SECONDS), //initial delay
+ new FiniteDuration(500, TimeUnit.MILLISECONDS), //interval
+ getSelf(), //target
+ new Messages.GossiperMessages.GossipTick(), //message
+ getContext().dispatcher(), //execution context
+ getSelf() //sender
+ );
+ }
+ }
+
+ @Override
+ public void postStop(){
+ if (cluster != null)
+ cluster.unsubscribe(getSelf());
+ if (gossipTask != null)
+ gossipTask.cancel();
+ }
+
+ @Override
+ public void onReceive(Object message) throws Exception {
+
+ log.debug("Received message: node[{}], message[{}]", selfAddress, message);
+
+ //Usually sent by self via gossip task defined above. But its not enforced.
+ //These ticks can be sent by another actor as well which is esp. useful while testing
+ if (message instanceof GossipTick)
+ receiveGossipTick();
+
+ //Message from remote gossiper with its bucket versions
+ else if (message instanceof GossipStatus)
+ receiveGossipStatus((GossipStatus) message);
+
+ //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
+ //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
+ //message with its local versions
+ else if (message instanceof GossipEnvelope)
+ receiveGossip((GossipEnvelope) message);
+
+ else if (message instanceof ClusterEvent.MemberUp) {
+ receiveMemberUp(((ClusterEvent.MemberUp) message).member());
+
+ } else if (message instanceof ClusterEvent.MemberRemoved) {
+ receiveMemberRemoveOrUnreachable(((ClusterEvent.MemberRemoved) message).member());
+
+ } else if ( message instanceof ClusterEvent.UnreachableMember){
+ receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
+
+ } else
+ unhandled(message);
+ }
+
+ /**
+ * Remove member from local copy of member list. If member down is self, then stop the actor
+ *
+ * @param member who went down
+ */
+ void receiveMemberRemoveOrUnreachable(Member member) {
+ //if its self, then stop itself
+ if (selfAddress.equals(member.address())){
+ getContext().stop(getSelf());
+ return;
+ }
+
+ clusterMembers.remove(member.address());
+ log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
+
+ /**
+ * Add member to the local copy of member list if it doesnt already
+ * @param member
+ */
+ void receiveMemberUp(Member member) {
+
+ if (selfAddress.equals(member.address()))
+ return; //ignore up notification for self
+
+ if (!clusterMembers.contains(member.address()))
+ clusterMembers.add(member.address());
+
+ log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+ }
+
+ /**
+ * Sends Gossip status to other members in the cluster. <br/>
+ * 1. If there are no member, ignore the tick. </br>
+ * 2. If there's only 1 member, send gossip status (bucket versions) to it. <br/>
+ * 3. If there are more than one member, randomly pick one and send gossip status (bucket versions) to it.
+ */
+ void receiveGossipTick(){
+ if (clusterMembers.size() == 0) return; //no members to send gossip status to
+
+ Address remoteMemberToGossipTo = null;
+
+ if (clusterMembers.size() == 1)
+ remoteMemberToGossipTo = clusterMembers.get(0);
+ else {
+ Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
+ remoteMemberToGossipTo = clusterMembers.get(randomIndex);
+ }
+
+ log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+ getLocalStatusAndSendTo(remoteMemberToGossipTo);
+ }
+
+ /**
+ * Process gossip status received from a remote gossiper. Remote versions are compared with
+ * the local copy. <p>
+ *
+ * For each bucket
+ * <ul>
+ * <li>If local copy is newer, the newer buckets are sent in GossipEnvelope to remote</li>
+ * <li>If local is older, GossipStatus is sent to remote so that it can reply with GossipEnvelope</li>
+ * <li>If both are same, noop</li>
+ * </ul>
+ *
+ * @param status bucket versions from a remote member
+ */
+ void receiveGossipStatus(GossipStatus status){
+ //Don't accept messages from non-members
+ if (!clusterMembers.contains(status.from()))
+ return;
+
+ final ActorRef sender = getSender();
+ Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketVersions(), 1000);
+ futureReply.map(getMapperToProcessRemoteStatus(sender, status), getContext().dispatcher());
+
+ }
+
+ /**
+ * Sends the received buckets in the envelope to the parent Bucket store.
+ *
+ * @param envelope contains buckets from a remote gossiper
+ */
+ void receiveGossip(GossipEnvelope envelope){
+ //TODO: Add more validations
+ if (!selfAddress.equals(envelope.to())) {
+ log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+ return;
+ }
+
+ updateRemoteBuckets(envelope.getBuckets());
+
+ }
+
+ /**
+ * Helper to send received buckets to bucket store
+ *
+ * @param buckets
+ */
+ void updateRemoteBuckets(Map<Address, Bucket> buckets) {
+
+ UpdateRemoteBuckets updateRemoteBuckets = new UpdateRemoteBuckets(buckets);
+ getContext().parent().tell(updateRemoteBuckets, getSelf());
+ }
+
+ /**
+ * Gets the buckets from bucket store for the given node addresses and sends them to remote gossiper
+ *
+ * @param remote remote node to send Buckets to
+ * @param addresses node addresses whose buckets needs to be sent
+ */
+ void sendGossipTo(final ActorRef remote, final Set<Address> addresses){
+
+ Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), 1000);
+ futureReply.map(getMapperToSendGossip(remote), getContext().dispatcher());
+ }
+
+ /**
+ * Gets bucket versions from bucket store and sends to the supplied address
+ *
+ * @param remoteActorSystemAddress remote gossiper to send to
+ */
+ void getLocalStatusAndSendTo(Address remoteActorSystemAddress){
+
+ //Get local status from bucket store and send to remote
+ Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketVersions(), 1000);
+ ActorSelection remoteRef = getContext().system().actorSelection(
+ remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
+
+ log.debug("Sending bucket versions to [{}]", remoteRef);
+
+ futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
+
+ }
+
+ /**
+ * Helper to send bucket versions received from local store
+ * @param remote remote gossiper to send versions to
+ * @param localVersions bucket versions received from local store
+ */
+ void sendGossipStatusTo(ActorRef remote, Map<Address, Long> localVersions){
+
+ GossipStatus status = new GossipStatus(selfAddress, localVersions);
+ remote.tell(status, getSelf());
+ }
+
+ void sendGossipStatusTo(ActorSelection remote, Map<Address, Long> localVersions){
+
+ GossipStatus status = new GossipStatus(selfAddress, localVersions);
+ remote.tell(status, getSelf());
+ }
+
+ ///
+ /// Private factories to create mappers
+ ///
+
+ private Mapper<Object, Void> getMapperToSendLocalStatus(final ActorSelection remote){
+
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object replyMessage) {
+ if (replyMessage instanceof GetBucketVersionsReply) {
+ GetBucketVersionsReply reply = (GetBucketVersionsReply) replyMessage;
+ Map<Address, Long> localVersions = reply.getVersions();
+
+ sendGossipStatusTo(remote, localVersions);
+
+ }
+ return null;
+ }
+ };
+ }
+
+ /**
+ * Process bucket versions received from
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore}.
+ * Then this method compares remote bucket versions with local bucket versions.
+ * <ul>
+ * <li>The buckets that are newer locally, send
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope}
+ * to remote
+ * <li>The buckets that are older locally, send
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus}
+ * to remote so that remote sends GossipEnvelop.
+ * </ul>
+ *
+ * @param sender the remote member
+ * @param status bucket versions from a remote member
+ * @return a {@link akka.dispatch.Mapper} that gets evaluated in future
+ *
+ */
+ private Mapper<Object, Void> getMapperToProcessRemoteStatus(final ActorRef sender, final GossipStatus status){
+
+ final Map<Address, Long> remoteVersions = status.getVersions();
+
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object replyMessage) {
+ if (replyMessage instanceof GetBucketVersionsReply) {
+ GetBucketVersionsReply reply = (GetBucketVersionsReply) replyMessage;
+ Map<Address, Long> localVersions = reply.getVersions();
+
+ //diff between remote list and local
+ Set<Address> localIsOlder = new HashSet<>();
+ localIsOlder.addAll(remoteVersions.keySet());
+ localIsOlder.removeAll(localVersions.keySet());
+
+ //diff between local list and remote
+ Set<Address> localIsNewer = new HashSet<>();
+ localIsNewer.addAll(localVersions.keySet());
+ localIsNewer.removeAll(remoteVersions.keySet());
+
+
+ for (Address address : remoteVersions.keySet()){
+
+ if (localVersions.get(address) == null || remoteVersions.get(address) == null)
+ continue; //this condition is taken care of by above diffs
+ if (localVersions.get(address) < remoteVersions.get(address))
+ localIsOlder.add(address);
+ else if (localVersions.get(address) > remoteVersions.get(address))
+ localIsNewer.add(address);
+ else
+ continue;
+ }
+
+ if (!localIsOlder.isEmpty())
+ sendGossipStatusTo(sender, localVersions );
+
+ if (!localIsNewer.isEmpty())
+ sendGossipTo(sender, localIsNewer);//send newer buckets to remote
+
+ }
+ return null;
+ }
+ };
+ }
+
+ /**
+ * Processes the message from {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore}
+ * that contains {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket}.
+ * These buckets are sent to a remote member encapsulated in
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope}
+ *
+ * @param sender the remote member that sent
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus}
+ * in reply to which bucket is being sent back
+ * @return a {@link akka.dispatch.Mapper} that gets evaluated in future
+ *
+ */
+ private Mapper<Object, Void> getMapperToSendGossip(final ActorRef sender) {
+
+ return new Mapper<Object, Void>() {
+ @Override
+ public Void apply(Object msg) {
+ if (msg instanceof GetBucketsByMembersReply) {
+ Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
+ log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+ GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
+ sender.tell(envelope, getSelf());
+ }
+ return null;
+ }
+ };
+ }
+
+ ///
+ ///Getter Setters
+ ///
+ List<Address> getClusterMembers() {
+ return clusterMembers;
+ }
+
+ void setClusterMembers(List<Address> clusterMembers) {
+ this.clusterMembers = clusterMembers;
+ }
+
+ Address getSelfAddress() {
+ return selfAddress;
+ }
+
+ void setSelfAddress(Address selfAddress) {
+ this.selfAddress = selfAddress;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.Address;
+import com.google.common.base.Preconditions;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBuckets;
+
+
+/**
+ * These messages are used by {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} and
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper} actors.
+ */
+public class Messages {
+
+ public static class BucketStoreMessages{
+
+ public static class GetLocalBucket implements Serializable{}
+
+ public static class ContainsBucket implements Serializable {
+ final private Bucket bucket;
+
+ public ContainsBucket(Bucket bucket){
+ Preconditions.checkArgument(bucket != null, "bucket can not be null");
+ this.bucket = bucket;
+ }
+
+ public Bucket getBucket(){
+ return bucket;
+ }
+
+ }
+
+ public static class UpdateBucket extends ContainsBucket implements Serializable {
+ public UpdateBucket(Bucket bucket){
+ super(bucket);
+ }
+ }
+
+ public static class GetLocalBucketReply extends ContainsBucket implements Serializable {
+ public GetLocalBucketReply(Bucket bucket){
+ super(bucket);
+ }
+ }
+
+ public static class GetAllBuckets implements Serializable{}
+
+ public static class GetBucketsByMembers implements Serializable{
+ private Set<Address> members;
+
+ public GetBucketsByMembers(Set<Address> members){
+ Preconditions.checkArgument(members != null, "members can not be null");
+ this.members = members;
+ }
+
+ public Set<Address> getMembers() {
+ return new HashSet<>(members);
+ }
+ }
+
+ public static class ContainsBuckets implements Serializable{
+ private Map<Address, Bucket> buckets;
+
+ public ContainsBuckets(Map<Address, Bucket> buckets){
+ Preconditions.checkArgument(buckets != null, "buckets can not be null");
+ this.buckets = buckets;
+ }
+
+ public Map<Address, Bucket> getBuckets() {
+ Map<Address, Bucket> copy = new HashMap<>(buckets.size());
+
+ for (Map.Entry<Address, Bucket> entry : buckets.entrySet()){
+ //ignore null entries
+ if ( (entry.getKey() == null) || (entry.getValue() == null) )
+ continue;
+ copy.put(entry.getKey(), entry.getValue());
+ }
+ return new HashMap<>(copy);
+ }
+ }
+
+ public static class GetAllBucketsReply extends ContainsBuckets implements Serializable{
+ public GetAllBucketsReply(Map<Address, Bucket> buckets) {
+ super(buckets);
+ }
+ }
+
+ public static class GetBucketsByMembersReply extends ContainsBuckets implements Serializable{
+ public GetBucketsByMembersReply(Map<Address, Bucket> buckets) {
+ super(buckets);
+ }
+ }
+
+ public static class GetBucketVersions implements Serializable{}
+
+ public static class ContainsBucketVersions implements Serializable{
+ Map<Address, Long> versions;
+
+ public ContainsBucketVersions(Map<Address, Long> versions) {
+ Preconditions.checkArgument(versions != null, "versions can not be null or empty");
+
+ this.versions = versions;
+ }
+
+ public Map<Address, Long> getVersions() {
+ return Collections.unmodifiableMap(versions);
+ }
+
+ }
+
+ public static class GetBucketVersionsReply extends ContainsBucketVersions implements Serializable{
+ public GetBucketVersionsReply(Map<Address, Long> versions) {
+ super(versions);
+ }
+ }
+
+ public static class UpdateRemoteBuckets extends ContainsBuckets implements Serializable{
+ public UpdateRemoteBuckets(Map<Address, Bucket> buckets) {
+ super(buckets);
+ }
+ }
+ }
+
+ public static class GossiperMessages{
+ public static class Tick implements Serializable {}
+
+ public static final class GossipTick extends Tick {}
+
+ public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
+ private Address from;
+
+ public GossipStatus(Address from, Map<Address, Long> versions) {
+ super(versions);
+ this.from = from;
+ }
+
+ public Address from() {
+ return from;
+ }
+ }
+
+ public static final class GossipEnvelope extends ContainsBuckets implements Serializable {
+ private final Address from;
+ private final Address to;
+
+ public GossipEnvelope(Address from, Address to, Map<Address, Bucket> buckets) {
+ super(buckets);
+ Preconditions.checkArgument(to != null, "Recipient of message must not be null");
+ this.to = to;
+ this.from = from;
+ }
+
+ public Address from() {
+ return from;
+ }
+
+ public Address to() {
+ return to;
+ }
+ }
+ }
+}
import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
import org.opendaylight.controller.remote.rpc.registry.ClusterWrapper;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistryOld;
import org.opendaylight.controller.sal.common.util.Rpcs;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import org.opendaylight.controller.sal.core.api.Broker;
@Test
public void testInvokeRpcError() throws URISyntaxException {
new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
Broker.ProviderSession brokerSession = Mockito.mock(Broker.ProviderSession.class);
SchemaContext schemaContext = mock(SchemaContext.class);
ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@Test
public void testInvokeRpc() throws URISyntaxException {
new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(mock(ClusterWrapper.class)));
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(mock(ClusterWrapper.class)));
Broker.ProviderSession brokerSession = mock(Broker.ProviderSession.class);
SchemaContext schemaContext = mock(SchemaContext.class);
ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@Test
public void testInvokeRoutedRpcError() throws URISyntaxException {
new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
Broker.ProviderSession brokerSession = Mockito.mock(Broker.ProviderSession.class);
SchemaContext schemaContext = mock(SchemaContext.class);
ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@Test
public void testInvokeRoutedRpc() throws URISyntaxException {
new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(mock(ClusterWrapper.class)));
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(mock(ClusterWrapper.class)));
Broker.ProviderSession brokerSession = mock(Broker.ProviderSession.class);
SchemaContext schemaContext = mock(SchemaContext.class);
ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
import java.util.HashSet;
import java.util.Set;
-public class RoutingTableTest {
+public class RoutingTableOldTest {
- private RoutingTable<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable =
- new RoutingTable<>();
+ private RoutingTableOld<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable =
+ new RoutingTableOld<>();
@Test
public void addGlobalRouteNullRouteIdTest() {
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+import junit.framework.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
+import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.AddRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.GetRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
+import org.opendaylight.yangtools.yang.common.QName;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class RpcRegistryOldTest {
+
+ static ActorSystem system;
+
+
+ @BeforeClass
+ public static void setup() {
+ system = ActorSystem.create();
+ }
+
+ @AfterClass
+ public static void teardown() {
+ JavaTestKit.shutdownActorSystem(system);
+ system = null;
+ }
+
+ /**
+ This test add, read and remove an entry in global rpc
+ */
+ @Test
+ public void testGlobalRpc() throws URISyntaxException {
+ new JavaTestKit(system) {{
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
+ QName type = new QName(new URI("actor1"), "actor1");
+ RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
+ final String route = "actor1";
+
+ AddRpc rpcMsg = new AddRpc(routeId, route);
+ rpcRegistry.tell(rpcMsg, getRef());
+ expectMsgEquals(duration("2 second"), "Success");
+
+ GetRpc getRpc = new GetRpc(routeId);
+ rpcRegistry.tell(getRpc, getRef());
+
+ Boolean getMsg = new ExpectMsg<Boolean>("GetRpcReply") {
+ protected Boolean match(Object in) {
+ if (in instanceof GetRpcReply) {
+ GetRpcReply reply = (GetRpcReply)in;
+ return route.equals(reply.getRoutePath());
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ Assert.assertTrue(getMsg);
+
+ RemoveRpc removeMsg = new RemoveRpc(routeId);
+ rpcRegistry.tell(removeMsg, getRef());
+ expectMsgEquals(duration("2 second"), "Success");
+
+ rpcRegistry.tell(getRpc, getRef());
+
+ Boolean getNullMsg = new ExpectMsg<Boolean>("GetRpcReply") {
+ protected Boolean match(Object in) {
+ if (in instanceof GetRpcReply) {
+ GetRpcReply reply = (GetRpcReply)in;
+ return reply.getRoutePath() == null;
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get();
+ Assert.assertTrue(getNullMsg);
+ }};
+
+ }
+
+ /**
+ This test add, read and remove an entry in routed rpc
+ */
+ @Test
+ public void testRoutedRpc() throws URISyntaxException {
+ new JavaTestKit(system) {{
+ ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
+ QName type = new QName(new URI("actor1"), "actor1");
+ RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
+ final String route = "actor1";
+
+ Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = new HashSet<>();
+ routeIds.add(routeId);
+
+ AddRoutedRpc rpcMsg = new AddRoutedRpc(routeIds, route);
+ rpcRegistry.tell(rpcMsg, getRef());
+ expectMsgEquals(duration("2 second"), "Success");
+
+ GetRoutedRpc getRpc = new GetRoutedRpc(routeId);
+ rpcRegistry.tell(getRpc, getRef());
+
+ Boolean getMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
+ protected Boolean match(Object in) {
+ if (in instanceof GetRoutedRpcReply) {
+ GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
+ return route.equals(reply.getRoutePath());
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get(); // this extracts the received message
+
+ Assert.assertTrue(getMsg);
+
+ RemoveRoutedRpc removeMsg = new RemoveRoutedRpc(routeIds, route);
+ rpcRegistry.tell(removeMsg, getRef());
+ expectMsgEquals(duration("2 second"), "Success");
+
+ rpcRegistry.tell(getRpc, getRef());
+
+ Boolean getNullMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
+ protected Boolean match(Object in) {
+ if (in instanceof GetRoutedRpcReply) {
+ GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
+ return reply.getRoutePath() == null;
+ } else {
+ throw noMatch();
+ }
+ }
+ }.get();
+ Assert.assertTrue(getNullMsg);
+ }};
+
+ }
+
+}
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
package org.opendaylight.controller.remote.rpc.registry;
+import akka.actor.ActorPath;
import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
+import akka.actor.ChildActorPath;
+import akka.actor.Props;
+import akka.japi.Pair;
import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mockito.Mockito;
import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
-import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.AddRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.GetRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
import org.opendaylight.controller.sal.connector.api.RpcRouter;
import org.opendaylight.yangtools.yang.common.QName;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
import java.net.URI;
import java.net.URISyntaxException;
-import java.util.HashSet;
-import java.util.Set;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRoutersReply;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
public class RpcRegistryTest {
- static ActorSystem system;
-
-
- @BeforeClass
- public static void setup() {
- system = ActorSystem.create();
- }
-
- @AfterClass
- public static void teardown() {
- JavaTestKit.shutdownActorSystem(system);
- system = null;
- }
-
- /**
- This test add, read and remove an entry in global rpc
- */
- @Test
- public void testGlobalRpc() throws URISyntaxException {
- new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
- QName type = new QName(new URI("actor1"), "actor1");
- RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
- final String route = "actor1";
-
- AddRpc rpcMsg = new AddRpc(routeId, route);
- rpcRegistry.tell(rpcMsg, getRef());
- expectMsgEquals(duration("2 second"), "Success");
-
- GetRpc getRpc = new GetRpc(routeId);
- rpcRegistry.tell(getRpc, getRef());
-
- Boolean getMsg = new ExpectMsg<Boolean>("GetRpcReply") {
- protected Boolean match(Object in) {
- if (in instanceof GetRpcReply) {
- GetRpcReply reply = (GetRpcReply)in;
- return route.equals(reply.getRoutePath());
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ private static ActorSystem node1;
+ private static ActorSystem node2;
+ private static ActorSystem node3;
- Assert.assertTrue(getMsg);
+ private ActorRef registry1;
+ private ActorRef registry2;
+ private ActorRef registry3;
- RemoveRpc removeMsg = new RemoveRpc(routeId);
- rpcRegistry.tell(removeMsg, getRef());
- expectMsgEquals(duration("2 second"), "Success");
+ @BeforeClass
+ public static void setup() throws InterruptedException {
+ Thread.sleep(1000); //give some time for previous test to close netty ports
+ node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
+ node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
+ node3 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberC"));
+ }
- rpcRegistry.tell(getRpc, getRef());
+ @AfterClass
+ public static void teardown(){
+ JavaTestKit.shutdownActorSystem(node1);
+ JavaTestKit.shutdownActorSystem(node2);
+ JavaTestKit.shutdownActorSystem(node3);
+ if (node1 != null)
+ node1.shutdown();
+ if (node2 != null)
+ node2.shutdown();
+ if (node3 != null)
+ node3.shutdown();
- Boolean getNullMsg = new ExpectMsg<Boolean>("GetRpcReply") {
- protected Boolean match(Object in) {
- if (in instanceof GetRpcReply) {
- GetRpcReply reply = (GetRpcReply)in;
- return reply.getRoutePath() == null;
- } else {
- throw noMatch();
- }
- }
- }.get();
- Assert.assertTrue(getNullMsg);
- }};
-
- }
-
- /**
- This test add, read and remove an entry in routed rpc
- */
- @Test
- public void testRoutedRpc() throws URISyntaxException {
- new JavaTestKit(system) {{
- ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
- QName type = new QName(new URI("actor1"), "actor1");
- RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
- final String route = "actor1";
-
- Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = new HashSet<>();
- routeIds.add(routeId);
-
- AddRoutedRpc rpcMsg = new AddRoutedRpc(routeIds, route);
- rpcRegistry.tell(rpcMsg, getRef());
- expectMsgEquals(duration("2 second"), "Success");
-
- GetRoutedRpc getRpc = new GetRoutedRpc(routeId);
- rpcRegistry.tell(getRpc, getRef());
-
- Boolean getMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
- protected Boolean match(Object in) {
- if (in instanceof GetRoutedRpcReply) {
- GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
- return route.equals(reply.getRoutePath());
- } else {
- throw noMatch();
- }
+ }
+
+ @Before
+ public void createRpcRegistry() throws InterruptedException {
+ registry1 = node1.actorOf(Props.create(RpcRegistry.class));
+ registry2 = node2.actorOf(Props.create(RpcRegistry.class));
+ registry3 = node3.actorOf(Props.create(RpcRegistry.class));
+ }
+
+ @After
+ public void stopRpcRegistry() throws InterruptedException {
+ if (registry1 != null)
+ node1.stop(registry1);
+ if (registry2 != null)
+ node2.stop(registry2);
+ if (registry3 != null)
+ node3.stop(registry3);
+ }
+
+ /**
+ * One node cluster.
+ * 1. Register rpc, ensure router can be found
+ * 2. Then remove rpc, ensure its deleted
+ *
+ * @throws URISyntaxException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testAddRemoveRpcOnSameNode() throws URISyntaxException, InterruptedException {
+
+ final JavaTestKit mockBroker = new JavaTestKit(node1);
+
+ //Add rpc on node 1
+ registry1.tell(new SetLocalRouter(mockBroker.getRef()), mockBroker.getRef());
+ registry1.tell(getAddRouteMessage(), mockBroker.getRef());
+
+ Thread.sleep(1000);//
+
+ //find the route on node 1's registry
+ registry1.tell(new FindRouters(createRouteId()), mockBroker.getRef());
+ FindRoutersReply message = mockBroker.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+ List<Pair<ActorRef, Long>> pairs = message.getRouterWithUpdateTime();
+
+ validateRouterReceived(pairs, mockBroker.getRef());
+
+ //Now remove rpc
+ registry1.tell(getRemoveRouteMessage(), mockBroker.getRef());
+ Thread.sleep(1000);
+ //find the route on node 1's registry
+ registry1.tell(new FindRouters(createRouteId()), mockBroker.getRef());
+ message = mockBroker.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+ pairs = message.getRouterWithUpdateTime();
+
+ Assert.assertTrue(pairs.isEmpty());
+ }
+
+ /**
+ * Three node cluster.
+ * 1. Register rpc on 1 node, ensure its router can be found on other 2.
+ * 2. Remove rpc on 1 node, ensure its removed on other 2.
+ *
+ * @throws URISyntaxException
+ * @throws InterruptedException
+ */
+ @Test
+ public void testRpcAddRemoveInCluster() throws URISyntaxException, InterruptedException {
+
+ validateSystemStartup();
+
+ final JavaTestKit mockBroker1 = new JavaTestKit(node1);
+ final JavaTestKit mockBroker2 = new JavaTestKit(node2);
+ final JavaTestKit mockBroker3 = new JavaTestKit(node3);
+
+ //Add rpc on node 1
+ registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
+ registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
+
+ Thread.sleep(1000);// give some time for bucket store data sync
+
+ //find the route in node 2's registry
+ List<Pair<ActorRef, Long>> pairs = findRouters(registry2, mockBroker2);
+ validateRouterReceived(pairs, mockBroker1.getRef());
+
+ //find the route in node 3's registry
+ pairs = findRouters(registry3, mockBroker3);
+ validateRouterReceived(pairs, mockBroker1.getRef());
+
+ //Now remove
+ registry1.tell(getRemoveRouteMessage(), mockBroker1.getRef());
+ Thread.sleep(1000);// give some time for bucket store data sync
+
+ pairs = findRouters(registry2, mockBroker2);
+ Assert.assertTrue(pairs.isEmpty());
+
+ pairs = findRouters(registry3, mockBroker3);
+ Assert.assertTrue(pairs.isEmpty());
+ }
+
+ /**
+ * Three node cluster.
+ * Register rpc on 2 nodes. Ensure 2 routers are found on 3rd.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testAnRpcAddedOnMultiNodesShouldReturnMultiRouter() throws Exception {
+
+ validateSystemStartup();
+
+ final JavaTestKit mockBroker1 = new JavaTestKit(node1);
+ final JavaTestKit mockBroker2 = new JavaTestKit(node2);
+ final JavaTestKit mockBroker3 = new JavaTestKit(node3);
+
+ //Thread.sleep(5000);//let system come up
+
+ //Add rpc on node 1
+ registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
+ registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
+
+ //Add same rpc on node 2
+ registry2.tell(new SetLocalRouter(mockBroker2.getRef()), mockBroker2.getRef());
+ registry2.tell(getAddRouteMessage(), mockBroker2.getRef());
+
+ registry3.tell(new SetLocalRouter(mockBroker3.getRef()), mockBroker3.getRef());
+ Thread.sleep(1000);// give some time for bucket store data sync
+
+ //find the route in node 3's registry
+ registry3.tell(new FindRouters(createRouteId()), mockBroker3.getRef());
+ FindRoutersReply message = mockBroker3.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+ List<Pair<ActorRef, Long>> pairs = message.getRouterWithUpdateTime();
+
+ validateMultiRouterReceived(pairs, mockBroker1.getRef(), mockBroker2.getRef());
+
+ }
+
+ private List<Pair<ActorRef, Long>> findRouters(ActorRef registry, JavaTestKit receivingActor) throws URISyntaxException {
+ registry.tell(new FindRouters(createRouteId()), receivingActor.getRef());
+ FindRoutersReply message = receivingActor.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+ return message.getRouterWithUpdateTime();
+ }
+
+ private void validateMultiRouterReceived(List<Pair<ActorRef, Long>> actual, ActorRef... expected) {
+ Assert.assertTrue(actual != null);
+ Assert.assertTrue(actual.size() == expected.length);
+ }
+
+ private void validateRouterReceived(List<Pair<ActorRef, Long>> actual, ActorRef expected){
+ Assert.assertTrue(actual != null);
+ Assert.assertTrue(actual.size() == 1);
+
+ for (Pair<ActorRef, Long> pair : actual){
+ Assert.assertTrue(expected.path().uid() == pair.first().path().uid());
}
- }.get(); // this extracts the received message
+ }
+
+ private void validateSystemStartup() throws InterruptedException {
+
+ Thread.sleep(5000);
+ ActorPath gossiper1Path = new ChildActorPath(new ChildActorPath(registry1.path(), "store"), "gossiper");
+ ActorPath gossiper2Path = new ChildActorPath(new ChildActorPath(registry2.path(), "store"), "gossiper");
+ ActorPath gossiper3Path = new ChildActorPath(new ChildActorPath(registry3.path(), "store"), "gossiper");
+
+ ActorSelection gossiper1 = node1.actorSelection(gossiper1Path);
+ ActorSelection gossiper2 = node2.actorSelection(gossiper2Path);
+ ActorSelection gossiper3 = node3.actorSelection(gossiper3Path);
+
- Assert.assertTrue(getMsg);
+ if (!resolveReference(gossiper1, gossiper2, gossiper3))
+ Assert.fail("Could not find gossipers");
+ }
- RemoveRoutedRpc removeMsg = new RemoveRoutedRpc(routeIds, route);
- rpcRegistry.tell(removeMsg, getRef());
- expectMsgEquals(duration("2 second"), "Success");
+ private Boolean resolveReference(ActorSelection... gossipers) throws InterruptedException {
- rpcRegistry.tell(getRpc, getRef());
+ Boolean resolved = true;
- Boolean getNullMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
- protected Boolean match(Object in) {
- if (in instanceof GetRoutedRpcReply) {
- GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
- return reply.getRoutePath() == null;
- } else {
- throw noMatch();
- }
+ for (int i=0; i< 5; i++) {
+ Thread.sleep(1000);
+ for (ActorSelection gossiper : gossipers) {
+ Future<ActorRef> future = gossiper.resolveOne(new FiniteDuration(5000, TimeUnit.MILLISECONDS));
+
+ ActorRef ref = null;
+ try {
+ ref = Await.result(future, new FiniteDuration(10000, TimeUnit.MILLISECONDS));
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ if (ref == null)
+ resolved = false;
+ }
+
+ if (resolved) break;
}
- }.get();
- Assert.assertTrue(getNullMsg);
- }};
+ return resolved;
+ }
+
+ private AddOrUpdateRoutes getAddRouteMessage() throws URISyntaxException {
+ return new AddOrUpdateRoutes(createRouteIds());
+ }
+
+ private RemoveRoutes getRemoveRouteMessage() throws URISyntaxException {
+ return new RemoveRoutes(createRouteIds());
+ }
- }
+ private List<RpcRouter.RouteIdentifier<?,?,?>> createRouteIds() throws URISyntaxException {
+ QName type = new QName(new URI("/mockrpc"), "mockrpc");
+ List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
+ routeIds.add(new RouteIdentifierImpl(null, type, null));
+ return routeIds;
+ }
-}
+ private RpcRouter.RouteIdentifier<?,?,?> createRouteId() throws URISyntaxException {
+ QName type = new QName(new URI("/mockrpc"), "mockrpc");
+ return new RouteIdentifierImpl(null, type, null);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import akka.testkit.TestProbe;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.TerminationMonitor;
+
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+
+public class BucketStoreTest {
+
+ private static ActorSystem system;
+ private static BucketStore store;
+
+ private BucketStore mockStore;
+
+ @BeforeClass
+ public static void setup() {
+
+ system = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("unit-test"));
+ system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+ store = createStore();
+ }
+
+ @AfterClass
+ public static void teardown() {
+ system.shutdown();
+ }
+
+ @Before
+ public void createMocks(){
+ mockStore = spy(store);
+ }
+
+ @After
+ public void resetMocks(){
+ reset(mockStore);
+ }
+
+ @Test
+ public void testReceiveUpdateBucket_WhenInputBucketShouldUpdateVersion(){
+ Bucket bucket = new BucketImpl();
+ Long expectedVersion = bucket.getVersion();
+
+ mockStore.receiveUpdateBucket(bucket);
+
+ Assert.assertEquals(bucket, mockStore.getLocalBucket());
+ Assert.assertEquals(expectedVersion, mockStore.getLocalBucket().getVersion());
+ }
+
+ /**
+ * Create BucketStore actor and returns the underlying instance of BucketStore class.
+ *
+ * @return instance of BucketStore class
+ */
+ private static BucketStore createStore(){
+ TestProbe mockActor = new TestProbe(system);
+ ActorRef mockGossiper = mockActor.ref();
+ final Props props = Props.create(BucketStore.class, mockGossiper);
+ final TestActorRef<BucketStore> testRef = TestActorRef.create(system, props, "testStore");
+
+ return testRef.underlyingActor();
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.TerminationMonitor;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyMap;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+
+
+public class GossiperTest {
+
+ private static ActorSystem system;
+ private static Gossiper gossiper;
+
+ private Gossiper mockGossiper;
+
+ @BeforeClass
+ public static void setup() throws InterruptedException {
+ Thread.sleep(1000);//give some time for previous test to stop the system. Netty port conflict arises otherwise.
+ system = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("unit-test"));
+ system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+ gossiper = createGossiper();
+ }
+
+ @AfterClass
+ public static void teardown() {
+ if (system != null)
+ system.shutdown();
+ }
+
+ @Before
+ public void createMocks(){
+ mockGossiper = spy(gossiper);
+ }
+
+ @After
+ public void resetMocks(){
+ reset(mockGossiper);
+
+ }
+
+ @Test
+ public void testReceiveGossipTick_WhenNoRemoteMemberShouldIgnore(){
+
+ mockGossiper.setClusterMembers(Collections.EMPTY_LIST);
+ doNothing().when(mockGossiper).getLocalStatusAndSendTo(any(Address.class));
+ mockGossiper.receiveGossipTick();
+ verify(mockGossiper, times(0)).getLocalStatusAndSendTo(any(Address.class));
+ }
+
+ @Test
+ public void testReceiveGossipTick_WhenRemoteMemberExistsShouldSendStatus(){
+ List<Address> members = new ArrayList<>();
+ Address remote = new Address("tcp", "member");
+ members.add(remote);
+
+ mockGossiper.setClusterMembers(members);
+ doNothing().when(mockGossiper).getLocalStatusAndSendTo(any(Address.class));
+ mockGossiper.receiveGossipTick();
+ verify(mockGossiper, times(1)).getLocalStatusAndSendTo(any(Address.class));
+ }
+
+ @Test
+ public void testReceiveGossipStatus_WhenSenderIsNonMemberShouldIgnore(){
+
+ Address nonMember = new Address("tcp", "non-member");
+ GossipStatus remoteStatus = new GossipStatus(nonMember, mock(Map.class));
+
+ //add a member
+ List<Address> members = new ArrayList<>();
+ members.add(new Address("tcp", "member"));
+
+ mockGossiper.setClusterMembers(members);
+ mockGossiper.receiveGossipStatus(remoteStatus);
+ verify(mockGossiper, times(0)).getSender();
+ }
+
+ @Test
+ public void testReceiveGossip_WhenNotAddressedToSelfShouldIgnore(){
+ Address notSelf = new Address("tcp", "not-self");
+
+ GossipEnvelope envelope = new GossipEnvelope(notSelf, notSelf, mock(Map.class));
+ doNothing().when(mockGossiper).updateRemoteBuckets(anyMap());
+ mockGossiper.receiveGossip(envelope);
+ verify(mockGossiper, times(0)).updateRemoteBuckets(anyMap());
+ }
+
+ /**
+ * Create Gossiper actor and return the underlying instance of Gossiper class.
+ *
+ * @return instance of Gossiper class
+ */
+ private static Gossiper createGossiper(){
+
+ final Props props = Props.create(Gossiper.class, false);
+ final TestActorRef<Gossiper> testRef = TestActorRef.create(system, props, "testGossiper");
+
+ return testRef.underlyingActor();
+ }
+}
\ No newline at end of file
--- /dev/null
+odl-cluster{
+ akka {
+ loglevel = "INFO"
+ #log-config-on-start = on
+
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+ debug{
+ #autoreceive = on
+ #lifecycle = on
+
+ }
+ }
+ remote {
+ log-received-messages = on
+ log-sent-messages = on
+
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "localhost"
+ port = 2551
+ }
+ }
+
+ cluster {
+ seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+ auto-down-unreachable-after = 10s
+ }
+ }
+}
+unit-test{
+ akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+ }
+ }
+}
+
+memberA{
+ akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+ }
+ remote {
+ log-received-messages = off
+ log-sent-messages = off
+
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "localhost"
+ port = 2551
+ }
+ }
+
+ cluster {
+ seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+ auto-down-unreachable-after = 10s
+ }
+ }
+}
+memberB{
+ akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+ }
+ remote {
+ log-received-messages = off
+ log-sent-messages = off
+
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "localhost"
+ port = 2552
+ }
+ }
+
+ cluster {
+ seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+ auto-down-unreachable-after = 10s
+ }
+ }
+}
+memberC{
+ akka {
+ loglevel = "INFO"
+ loggers = ["akka.event.slf4j.Slf4jLogger"]
+ actor {
+ provider = "akka.cluster.ClusterActorRefProvider"
+ }
+ remote {
+ log-received-messages = off
+ log-sent-messages = off
+
+ log-remote-lifecycle-events = off
+ netty.tcp {
+ hostname = "localhost"
+ port = 2553
+ }
+ }
+
+ cluster {
+ seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+ auto-down-unreachable-after = 10s
+ }
+ }
+}
\ No newline at end of file
<artifactId>sal-rest-connector-config</artifactId>
<description>Configuration files for sal-rest-connector</description>
<packaging>jar</packaging>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <phase>package</phase>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/initial/10-rest-connector.xml</file>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
</project>
<module>
<type xmlns:rest="urn:opendaylight:params:xml:ns:yang:controller:md:sal:rest:connector">rest:rest-connector-impl</type>
<name>rest-connector-default-impl</name>
- <websocket-port>8181</websocket-port>
+ <websocket-port>8185</websocket-port>
<dom-broker>
<type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
<name>dom-broker</name>
</services>
</data>
</configuration>
+ <required-capabilities>
+ <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:rest:connector?module=opendaylight-rest-connector&revision=2014-07-24</capability>
+ </required-capabilities>
</snapshot>
*/
package org.opendaylight.controller.sal.restconf.impl;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import javax.ws.rs.core.Response.Status;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import javax.ws.rs.core.Response.Status;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+
public class BrokerFacade {
private final static Logger LOG = LoggerFactory.getLogger(BrokerFacade.class);
currentArguments.add(currentArg);
YangInstanceIdentifier currentPath = YangInstanceIdentifier.create(currentArguments);
- final Optional<NormalizedNode<?, ?>> datastoreData;
+ final Boolean exists;
+
try {
- datastoreData = rwTx.read(store, currentPath).get();
- } catch (InterruptedException | ExecutionException e) {
+
+ CheckedFuture<Boolean, ReadFailedException> future =
+ rwTx.exists(store, currentPath);
+ exists = future.checkedGet();
+ } catch (ReadFailedException e) {
LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
throw new IllegalStateException("Failed to read pre-existing data", e);
}
- if (!datastoreData.isPresent() && iterator.hasNext()) {
+
+ if (!exists && iterator.hasNext()) {
rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
}
}
NormalizedNode<?, ?> data = null;
YangInstanceIdentifier normalizedII;
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
data = broker.readConfigurationData(mountPoint, normalizedII);
} else {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
NormalizedNode<?, ?> data = null;
YangInstanceIdentifier normalizedII;
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
data = broker.readOperationalData(mountPoint, normalizedII);
} else {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
try {
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
broker.commitConfigurationDataPut(mountPoint, normalizedII, datastoreNormalizedNode).get();
} else {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
try {
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
broker.commitConfigurationDataPost(mountPoint, normalizedII, datastoreNormalizedData);
} else {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
try {
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
broker.commitConfigurationDataPost(mountPoint, normalizedII, datastoreNormalizedData);
} else {
try {
if (mountPoint != null) {
- normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+ normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+ .getInstanceIdentifier());
broker.commitConfigurationDataDelete(mountPoint, normalizedII);
} else {
normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
iiBuilder = YangInstanceIdentifier.builder(iiOriginal);
}
- iiBuilder.node(schemaOfData.getQName());
+ if ((schemaOfData instanceof ListSchemaNode)) {
+ HashMap<QName, Object> keys = this.resolveKeysFromData(((ListSchemaNode) schemaOfData), data);
+ iiBuilder.nodeWithKey(schemaOfData.getQName(), keys);
+ } else {
+ iiBuilder.node(schemaOfData.getQName());
+ }
YangInstanceIdentifier instance = iiBuilder.toInstance();
DOMMountPoint mountPoint = null;
return new InstanceIdWithSchemaNode(instance, schemaOfData, mountPoint);
}
+ private HashMap<QName, Object> resolveKeysFromData(final ListSchemaNode listNode, final CompositeNode dataNode) {
+ final HashMap<QName, Object> keyValues = new HashMap<QName, Object>();
+ List<QName> _keyDefinition = listNode.getKeyDefinition();
+ for (final QName key : _keyDefinition) {
+ SimpleNode<? extends Object> head = null;
+ String localName = key.getLocalName();
+ List<SimpleNode<? extends Object>> simpleNodesByName = dataNode.getSimpleNodesByName(localName);
+ if (simpleNodesByName != null) {
+ head = Iterables.getFirst(simpleNodesByName, null);
+ }
+
+ Object dataNodeKeyValueObject = null;
+ if (head != null) {
+ dataNodeKeyValueObject = head.getValue();
+ }
+
+ if (dataNodeKeyValueObject == null) {
+ throw new RestconfDocumentedException("Data contains list \"" + dataNode.getNodeType().getLocalName()
+ + "\" which does not contain key: \"" + key.getLocalName() + "\"", ErrorType.PROTOCOL,
+ ErrorTag.INVALID_VALUE);
+ }
+
+ keyValues.put(key, dataNodeKeyValueObject);
+ }
+
+ return keyValues;
+ }
+
private boolean endsWithMountPoint(final String identifier) {
return identifier.endsWith(ControllerContext.MOUNT) || identifier.endsWith(ControllerContext.MOUNT + "/");
}
"It wasn't possible to correctly interpret data."));
}
- private NormalizedNode<?, ?> compositeNodeToDatastoreNormalizedNode(final CompositeNode compNode, final DataSchemaNode schema) {
+ private NormalizedNode<?, ?> compositeNodeToDatastoreNormalizedNode(final CompositeNode compNode,
+ final DataSchemaNode schema) {
List<Node<?>> lst = new ArrayList<Node<?>>();
lst.add(compNode);
if (schema instanceof ContainerSchemaNode) {
"It wasn't possible to translate specified data to datastore readable form."));
}
- private InstanceIdWithSchemaNode normalizeInstanceIdentifierWithSchemaNode(final InstanceIdWithSchemaNode iiWithSchemaNode) {
+ private InstanceIdWithSchemaNode normalizeInstanceIdentifierWithSchemaNode(
+ final InstanceIdWithSchemaNode iiWithSchemaNode) {
return normalizeInstanceIdentifierWithSchemaNode(iiWithSchemaNode, false);
}
iiWithSchemaNode.getMountPoint());
}
- private YangInstanceIdentifier instanceIdentifierToReadableFormForNormalizeNode(final YangInstanceIdentifier instIdentifier,
- final boolean unwrapLastListNode) {
+ private YangInstanceIdentifier instanceIdentifierToReadableFormForNormalizeNode(
+ final YangInstanceIdentifier instIdentifier, final boolean unwrapLastListNode) {
Preconditions.checkNotNull(instIdentifier, "Instance identifier can't be null");
final List<PathArgument> result = new ArrayList<PathArgument>();
final Iterator<PathArgument> iter = instIdentifier.getPathArguments().iterator();
package org.opendaylight.controller.sal.restconf.impl.test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
-import java.util.concurrent.Future;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
/**
* Unit tests for BrokerFacade.
*
return Futures.immediateCheckedFuture(Optional.<NormalizedNode<?, ?>> of(dummyNode));
}
+ private CheckedFuture<Boolean,ReadFailedException> wrapExistence(Boolean exists) {
+ return Futures.immediateCheckedFuture(exists);
+ }
+
+
/**
* Value of this node shouldn't be important for testing purposes
*/
CheckedFuture<Void, TransactionCommitFailedException> expFuture = mock(CheckedFuture.class);
NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
+
when(rwTransaction.read(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
wrapDummyNode(dummyNode2));
+
+ when(rwTransaction.exists(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
+ wrapExistence(true));
+
+
when(rwTransaction.submit()).thenReturn(expFuture);
CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade.commitConfigurationDataPost(
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-api</artifactId>
+ </dependency>
</dependencies>
<build>
*/
package org.opendaylight.controller.sal.rest.doc.impl;
+import com.google.common.base.Preconditions;
import javax.ws.rs.core.UriInfo;
-
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.rest.doc.swagger.ApiDeclaration;
import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-
/**
- * This class gathers all yang defined {@link Module}s and generates Swagger
- * compliant documentation.
+ * This class gathers all yang defined {@link Module}s and generates Swagger compliant documentation.
*/
public class ApiDocGenerator extends BaseYangSwaggerGenerator {
*/
package org.opendaylight.controller.sal.rest.doc.impl;
+import static org.opendaylight.controller.sal.rest.doc.util.RestDocgenUtil.resolvePathArgumentsName;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
+import com.google.common.base.Preconditions;
import java.io.IOException;
import java.net.URI;
import java.text.DateFormat;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
-
import javax.ws.rs.core.UriInfo;
-
import org.json.JSONException;
import org.json.JSONObject;
import org.opendaylight.controller.sal.rest.doc.model.builder.OperationBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
-import com.google.common.base.Preconditions;
-
public class BaseYangSwaggerGenerator {
private static Logger _logger = LoggerFactory.getLogger(BaseYangSwaggerGenerator.class);
* @param operType
* @return list of modules converted to swagger compliant resource list.
*/
- public ResourceList getResourceListing(UriInfo uriInfo, SchemaContext schemaContext,
- String context) {
+ public ResourceList getResourceListing(UriInfo uriInfo, SchemaContext schemaContext, String context) {
ResourceList resourceList = createResourceList();
for (Module module : modules) {
String revisionString = SIMPLE_DATE_FORMAT.format(module.getRevision());
-
Resource resource = new Resource();
_logger.debug("Working on [{},{}]...", module.getName(), revisionString);
- ApiDeclaration doc = getApiDeclaration(module.getName(), revisionString, uriInfo,
- schemaContext, context);
+ ApiDeclaration doc = getApiDeclaration(module.getName(), revisionString, uriInfo, schemaContext, context);
if (doc != null) {
resource.setPath(generatePath(uriInfo, module.getName(), revisionString));
return uri.toASCIIString();
}
- public ApiDeclaration getApiDeclaration(String module, String revision, UriInfo uriInfo,
- SchemaContext schemaContext, String context) {
+ public ApiDeclaration getApiDeclaration(String module, String revision, UriInfo uriInfo, SchemaContext schemaContext, String context) {
Date rev = null;
try {
rev = SIMPLE_DATE_FORMAT.parse(revision);
throw new IllegalArgumentException(e);
}
Module m = schemaContext.findModuleByName(module, rev);
- Preconditions.checkArgument(m != null, "Could not find module by name,revision: " + module
- + "," + revision);
+ Preconditions.checkArgument(m != null, "Could not find module by name,revision: " + module + "," + revision);
- return getApiDeclaration(m, rev, uriInfo, schemaContext, context);
+ return getApiDeclaration(m, rev, uriInfo, context, schemaContext);
}
- public ApiDeclaration getApiDeclaration(Module module, Date revision, UriInfo uriInfo,
- SchemaContext schemaContext, String context) {
+ public ApiDeclaration getApiDeclaration(Module module, Date revision, UriInfo uriInfo, String context, SchemaContext schemaContext) {
String basePath = createBasePathFromUriInfo(uriInfo);
- ApiDeclaration doc = getSwaggerDocSpec(module, basePath, context);
+ ApiDeclaration doc = getSwaggerDocSpec(module, basePath, context, schemaContext);
if (doc != null) {
return doc;
}
portPart = ":" + port;
}
String basePath = new StringBuilder(uriInfo.getBaseUri().getScheme()).append("://")
- .append(uriInfo.getBaseUri().getHost()).append(portPart).append("/")
- .append(RESTCONF_CONTEXT_ROOT).toString();
+ .append(uriInfo.getBaseUri().getHost()).append(portPart).append("/").append(RESTCONF_CONTEXT_ROOT)
+ .toString();
return basePath;
}
- public ApiDeclaration getSwaggerDocSpec(Module m, String basePath, String context) {
+ public ApiDeclaration getSwaggerDocSpec(Module m, String basePath, String context, SchemaContext schemaContext) {
ApiDeclaration doc = createApiDeclaration(basePath);
List<Api> apis = new ArrayList<Api>();
for (DataSchemaNode node : dataSchemaNodes) {
if ((node instanceof ListSchemaNode) || (node instanceof ContainerSchemaNode)) {
- _logger.debug("Is Configuration node [{}] [{}]", node.isConfiguration(), node
- .getQName().getLocalName());
+ _logger.debug("Is Configuration node [{}] [{}]", node.isConfiguration(), node.getQName().getLocalName());
List<Parameter> pathParams = new ArrayList<Parameter>();
- String resourcePath = getDataStorePath("/config/", context) + m.getName() + ":";
- addApis(node, apis, resourcePath, pathParams, true);
+ String resourcePath = getDataStorePath("/config/", context);
+ addApis(node, apis, resourcePath, pathParams, schemaContext, true);
pathParams = new ArrayList<Parameter>();
- resourcePath = getDataStorePath("/operational/", context) + m.getName() + ":";
- addApis(node, apis, resourcePath, pathParams, false);
+ resourcePath = getDataStorePath("/operational/", context);
+ addApis(node, apis, resourcePath, pathParams, schemaContext, false);
}
Set<RpcDefinition> rpcs = m.getRpcs();
for (RpcDefinition rpcDefinition : rpcs) {
- String resourcePath = getDataStorePath("/operations/", context) + m.getName() + ":";
- addRpcs(rpcDefinition, apis, resourcePath);
+ String resourcePath = getDataStorePath("/operations/", context);
+ addRpcs(rpcDefinition, apis, resourcePath, schemaContext);
}
}
JSONObject models = null;
try {
- models = jsonConverter.convertToJsonSchema(m);
+ models = jsonConverter.convertToJsonSchema(m, schemaContext);
doc.setModels(models);
if (_logger.isDebugEnabled()) {
_logger.debug(mapper.writeValueAsString(doc));
return module + "(" + revision + ")";
}
- private void addApis(DataSchemaNode node, List<Api> apis, String parentPath,
- List<Parameter> parentPathParams, boolean addConfigApi) {
+ private void addApis(DataSchemaNode node, List<Api> apis, String parentPath, List<Parameter> parentPathParams, SchemaContext schemaContext,
+ boolean addConfigApi) {
Api api = new Api();
List<Parameter> pathParams = new ArrayList<Parameter>(parentPathParams);
- String resourcePath = parentPath + createPath(node, pathParams) + "/";
+ String resourcePath = parentPath + createPath(node, pathParams, schemaContext) + "/";
_logger.debug("Adding path: [{}]", resourcePath);
api.setPath(resourcePath);
api.setOperations(operations(node, pathParams, addConfigApi));
if (childNode instanceof ListSchemaNode || childNode instanceof ContainerSchemaNode) {
// keep config and operation attributes separate.
if (childNode.isConfiguration() == addConfigApi) {
- addApis(childNode, apis, resourcePath, pathParams, addConfigApi);
+ addApis(childNode, apis, resourcePath, pathParams, schemaContext, addConfigApi);
}
}
}
* @param pathParams
* @return
*/
- private List<Operation> operations(DataSchemaNode node, List<Parameter> pathParams,
- boolean isConfig) {
+ private List<Operation> operations(DataSchemaNode node, List<Parameter> pathParams, boolean isConfig) {
List<Operation> operations = new ArrayList<>();
OperationBuilder.Get getBuilder = new OperationBuilder.Get(node, isConfig);
return operations;
}
- private String createPath(final DataSchemaNode schemaNode, List<Parameter> pathParams) {
+ private String createPath(final DataSchemaNode schemaNode, List<Parameter> pathParams, SchemaContext schemaContext) {
ArrayList<LeafSchemaNode> pathListParams = new ArrayList<LeafSchemaNode>();
StringBuilder path = new StringBuilder();
- QName _qName = schemaNode.getQName();
- String localName = _qName.getLocalName();
+ String localName = resolvePathArgumentsName(schemaNode, schemaContext);
path.append(localName);
if ((schemaNode instanceof ListSchemaNode)) {
final List<QName> listKeys = ((ListSchemaNode) schemaNode).getKeyDefinition();
for (final QName listKey : listKeys) {
- {
- DataSchemaNode _dataChildByName = ((DataNodeContainer) schemaNode)
- .getDataChildByName(listKey);
- pathListParams.add(((LeafSchemaNode) _dataChildByName));
-
- String pathParamIdentifier = new StringBuilder("/{")
- .append(listKey.getLocalName()).append("}").toString();
- path.append(pathParamIdentifier);
-
- Parameter pathParam = new Parameter();
- pathParam.setName(listKey.getLocalName());
- pathParam.setDescription(_dataChildByName.getDescription());
- pathParam.setType("string");
- pathParam.setParamType("path");
-
- pathParams.add(pathParam);
- }
+ DataSchemaNode _dataChildByName = ((DataNodeContainer) schemaNode).getDataChildByName(listKey);
+ pathListParams.add(((LeafSchemaNode) _dataChildByName));
+
+ String pathParamIdentifier = new StringBuilder("/{").append(listKey.getLocalName()).append("}")
+ .toString();
+ path.append(pathParamIdentifier);
+
+ Parameter pathParam = new Parameter();
+ pathParam.setName(listKey.getLocalName());
+ pathParam.setDescription(_dataChildByName.getDescription());
+ pathParam.setType("string");
+ pathParam.setParamType("path");
+
+ pathParams.add(pathParam);
}
}
return path.toString();
}
- protected void addRpcs(RpcDefinition rpcDefn, List<Api> apis, String parentPath) {
+ protected void addRpcs(RpcDefinition rpcDefn, List<Api> apis, String parentPath, SchemaContext schemaContext) {
Api rpc = new Api();
- String resourcePath = parentPath + rpcDefn.getQName().getLocalName();
+ String resourcePath = parentPath + resolvePathArgumentsName(rpcDefn, schemaContext);
rpc.setPath(resourcePath);
Operation operationSpec = new Operation();
}
return sortedModules;
}
+
}
*/
package org.opendaylight.controller.sal.rest.doc.impl;
+import static org.opendaylight.controller.sal.rest.doc.util.RestDocgenUtil.resolveNodesName;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import org.json.JSONException;
import org.json.JSONObject;
import org.opendaylight.controller.sal.rest.doc.model.builder.OperationBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
import org.opendaylight.yangtools.yang.model.api.ChoiceNode;
import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
import org.opendaylight.yangtools.yang.model.api.type.BinaryTypeDefinition;
import org.opendaylight.yangtools.yang.model.api.type.BitsTypeDefinition;
private static final String NUMBER = "number";
private static final String BOOLEAN = "boolean";
private static final String STRING = "string";
- private static final String ID_KEY = "id";
- private static final String SUB_TYPES_KEY = "subTypes";
+ private static final String ID_KEY = "id";
+ private static final String SUB_TYPES_KEY = "subTypes";
private static final Map<Class<? extends TypeDefinition<?>>, String> YANG_TYPE_TO_JSON_TYPE_MAPPING;
YANG_TYPE_TO_JSON_TYPE_MAPPING = Collections.unmodifiableMap(tempMap1);
}
+ private Module topLevelModule;
+
public ModelGenerator() {
}
- public JSONObject convertToJsonSchema(Module module) throws IOException, JSONException {
+ public JSONObject convertToJsonSchema(Module module, SchemaContext schemaContext) throws IOException, JSONException {
JSONObject models = new JSONObject();
- processContainers(module, models);
- processRPCs(module, models);
- processIdentities(module, models);
+ topLevelModule = module;
+ processContainers(module, models, schemaContext);
+ processRPCs(module, models, schemaContext);
+ processIdentities(module, models);
return models;
}
- private void processContainers(Module module, JSONObject models) throws IOException,
+ private void processContainers(Module module, JSONObject models, SchemaContext schemaContext) throws IOException,
JSONException {
String moduleName = module.getName();
* For every container in the module
*/
if (childNode instanceof ContainerSchemaNode) {
- configModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName,
- true, models, true);
- operationalModuleJSON = processContainer((ContainerSchemaNode) childNode,
- moduleName, true, models, false);
+ configModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName, true, models, true,
+ schemaContext);
+ operationalModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName, true, models,
+ false, schemaContext);
}
if (configModuleJSON != null) {
}
/**
- * Process the RPCs for a Module Spits out a file each of the name
- * <rpcName>-input.json and <rpcName>-output.json for each RPC that contains
- * input & output elements
+ * Process the RPCs for a Module Spits out a file each of the name <rpcName>-input.json and <rpcName>-output.json
+ * for each RPC that contains input & output elements
*
* @param module
* @throws JSONException
* @throws IOException
*/
- private void processRPCs(Module module, JSONObject models) throws JSONException, IOException {
+ private void processRPCs(Module module, JSONObject models, SchemaContext schemaContext) throws JSONException,
+ IOException {
Set<RpcDefinition> rpcs = module.getRpcs();
String moduleName = module.getName();
ContainerSchemaNode input = rpc.getInput();
if (input != null) {
- JSONObject inputJSON = processContainer(input, moduleName, true, models);
+ JSONObject inputJSON = processContainer(input, moduleName, true, models, schemaContext);
String filename = "(" + rpc.getQName().getLocalName() + ")input";
inputJSON.put("id", filename);
// writeToFile(filename, inputJSON.toString(2), moduleName);
ContainerSchemaNode output = rpc.getOutput();
if (output != null) {
- JSONObject outputJSON = processContainer(output, moduleName, true, models);
+ JSONObject outputJSON = processContainer(output, moduleName, true, models, schemaContext);
String filename = "(" + rpc.getQName().getLocalName() + ")output";
outputJSON.put("id", filename);
models.put(filename, outputJSON);
}
}
- /**
- * Processes the 'identity' statement in a yang model
- * and maps it to a 'model' in the Swagger JSON spec.
- *
- * @param module The module from which the identity stmt will be processed
- * @param models The JSONObject in which the parsed identity will be put as a 'model' obj
- * @throws JSONException
- */
- private void processIdentities(Module module, JSONObject models) throws JSONException {
-
- String moduleName = module.getName();
- Set<IdentitySchemaNode> idNodes = module.getIdentities();
- _logger.debug("Processing Identities for module {} . Found {} identity statements", moduleName, idNodes.size());
-
- for(IdentitySchemaNode idNode : idNodes){
- JSONObject identityObj=new JSONObject();
- String identityName = idNode.getQName().getLocalName();
- _logger.debug("Processing Identity: {}", identityName);
-
- identityObj.put(ID_KEY, identityName);
- identityObj.put(DESCRIPTION_KEY, idNode.getDescription());
-
- JSONObject props = new JSONObject();
- IdentitySchemaNode baseId = idNode.getBaseIdentity();
+ /**
+ * Processes the 'identity' statement in a yang model and maps it to a 'model' in the Swagger JSON spec.
+ *
+ * @param module
+ * The module from which the identity stmt will be processed
+ * @param models
+ * The JSONObject in which the parsed identity will be put as a 'model' obj
+ * @throws JSONException
+ */
+ private void processIdentities(Module module, JSONObject models) throws JSONException {
+ String moduleName = module.getName();
+ Set<IdentitySchemaNode> idNodes = module.getIdentities();
+ _logger.debug("Processing Identities for module {} . Found {} identity statements", moduleName, idNodes.size());
+
+ for (IdentitySchemaNode idNode : idNodes) {
+ JSONObject identityObj = new JSONObject();
+ String identityName = idNode.getQName().getLocalName();
+ _logger.debug("Processing Identity: {}", identityName);
+
+ identityObj.put(ID_KEY, identityName);
+ identityObj.put(DESCRIPTION_KEY, idNode.getDescription());
+
+ JSONObject props = new JSONObject();
+ IdentitySchemaNode baseId = idNode.getBaseIdentity();
+
+ if (baseId == null) {
+ /**
+ * This is a base identity. So lets see if it has sub types. If it does, then add them to the model
+ * definition.
+ */
+ Set<IdentitySchemaNode> derivedIds = idNode.getDerivedIdentities();
+
+ if (derivedIds != null) {
+ JSONArray subTypes = new JSONArray();
+ for (IdentitySchemaNode derivedId : derivedIds) {
+ subTypes.put(derivedId.getQName().getLocalName());
+ }
+ identityObj.put(SUB_TYPES_KEY, subTypes);
+ }
+ } else {
+ /**
+ * This is a derived entity. Add it's base type & move on.
+ */
+ props.put(TYPE_KEY, baseId.getQName().getLocalName());
+ }
- if(baseId==null) {
- /**
- * This is a base identity. So lets see if
- * it has sub types. If it does, then add them to the model definition.
- */
- Set<IdentitySchemaNode> derivedIds = idNode.getDerivedIdentities();
-
- if(derivedIds != null) {
- JSONArray subTypes = new JSONArray();
- for(IdentitySchemaNode derivedId : derivedIds){
- subTypes.put(derivedId.getQName().getLocalName());
- }
- identityObj.put(SUB_TYPES_KEY, subTypes);
+ // Add the properties. For a base type, this will be an empty object as required by the Swagger spec.
+ identityObj.put(PROPERTIES_KEY, props);
+ models.put(identityName, identityObj);
}
- } else {
- /**
- * This is a derived entity. Add it's base type & move on.
- */
- props.put(TYPE_KEY, baseId.getQName().getLocalName());
- }
-
- //Add the properties. For a base type, this will be an empty object as required by the Swagger spec.
- identityObj.put(PROPERTIES_KEY, props);
- models.put(identityName, identityObj);
}
- }
+
/**
* Processes the container node and populates the moduleJSON
*
* @throws JSONException
* @throws IOException
*/
- private JSONObject processContainer(ContainerSchemaNode container, String moduleName,
- boolean addSchemaStmt, JSONObject models) throws JSONException, IOException {
- return processContainer(container, moduleName, addSchemaStmt, models, (Boolean) null);
+ private JSONObject processContainer(ContainerSchemaNode container, String moduleName, boolean addSchemaStmt,
+ JSONObject models, SchemaContext schemaContext) throws JSONException, IOException {
+ return processContainer(container, moduleName, addSchemaStmt, models, (Boolean) null, schemaContext);
}
- private JSONObject processContainer(ContainerSchemaNode container, String moduleName,
- boolean addSchemaStmt, JSONObject models, Boolean isConfig) throws JSONException,
- IOException {
+ private JSONObject processContainer(ContainerSchemaNode container, String moduleName, boolean addSchemaStmt,
+ JSONObject models, Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
JSONObject moduleJSON = getSchemaTemplate();
if (addSchemaStmt) {
moduleJSON = getSchemaTemplate();
String containerDescription = container.getDescription();
moduleJSON.put(DESCRIPTION_KEY, containerDescription);
- JSONObject properties = processChildren(container.getChildNodes(), moduleName, models, isConfig);
+ JSONObject properties = processChildren(container.getChildNodes(), container.getQName(), moduleName, models,
+ isConfig, schemaContext);
moduleJSON.put(PROPERTIES_KEY, properties);
return moduleJSON;
}
- private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
- JSONObject models) throws JSONException, IOException {
- return processChildren(nodes, moduleName, models, null);
+ private JSONObject processChildren(Iterable<DataSchemaNode> nodes, QName parentQName, String moduleName,
+ JSONObject models, SchemaContext schemaContext) throws JSONException, IOException {
+ return processChildren(nodes, parentQName, moduleName, models, null, schemaContext);
}
/**
* Processes the nodes
*
* @param nodes
+ * @param parentQName
* @param moduleName
* @param isConfig
* @return
* @throws JSONException
* @throws IOException
*/
- private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
- JSONObject models, Boolean isConfig) throws JSONException, IOException {
+ private JSONObject processChildren(Iterable<DataSchemaNode> nodes, QName parentQName, String moduleName,
+ JSONObject models, Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
JSONObject properties = new JSONObject();
for (DataSchemaNode node : nodes) {
if (isConfig == null || node.isConfiguration() == isConfig) {
- String name = node.getQName().getLocalName();
+ String name = resolveNodesName(node, topLevelModule, schemaContext);
JSONObject property = null;
if (node instanceof LeafSchemaNode) {
property = processLeafNode((LeafSchemaNode) node);
} else if (node instanceof ListSchemaNode) {
- property = processListSchemaNode((ListSchemaNode) node, moduleName, models, isConfig);
+ property = processListSchemaNode((ListSchemaNode) node, moduleName, models, isConfig, schemaContext);
} else if (node instanceof LeafListSchemaNode) {
property = processLeafListNode((LeafListSchemaNode) node);
} else if (node instanceof ChoiceNode) {
- property = processChoiceNode((ChoiceNode) node, moduleName, models);
+ property = processChoiceNode((ChoiceNode) node, moduleName, models, schemaContext);
} else if (node instanceof AnyXmlSchemaNode) {
property = processAnyXMLNode((AnyXmlSchemaNode) node);
} else if (node instanceof ContainerSchemaNode) {
- property = processContainer((ContainerSchemaNode) node, moduleName, false,
- models, isConfig);
+ property = processContainer((ContainerSchemaNode) node, moduleName, false, models, isConfig,
+ schemaContext);
} else {
- throw new IllegalArgumentException("Unknown DataSchemaNode type: "
- + node.getClass());
+ throw new IllegalArgumentException("Unknown DataSchemaNode type: " + node.getClass());
}
property.putOpt(DESCRIPTION_KEY, node.getDescription());
* @throws JSONException
* @throws IOException
*/
- private JSONObject processChoiceNode(ChoiceNode choiceNode, String moduleName, JSONObject models)
- throws JSONException, IOException {
+ private JSONObject processChoiceNode(ChoiceNode choiceNode, String moduleName, JSONObject models,
+ SchemaContext schemaContext) throws JSONException, IOException {
Set<ChoiceCaseNode> cases = choiceNode.getCases();
JSONArray choiceProps = new JSONArray();
for (ChoiceCaseNode choiceCase : cases) {
String choiceName = choiceCase.getQName().getLocalName();
- JSONObject choiceProp = processChildren(choiceCase.getChildNodes(), moduleName, models);
+ JSONObject choiceProp = processChildren(choiceCase.getChildNodes(), choiceCase.getQName(), moduleName,
+ models, schemaContext);
JSONObject choiceObj = new JSONObject();
choiceObj.put(choiceName, choiceProp);
choiceObj.put(TYPE_KEY, OBJECT_TYPE);
* @param props
* @throws JSONException
*/
- private void processConstraints(ConstraintDefinition constraints, JSONObject props)
- throws JSONException {
+ private void processConstraints(ConstraintDefinition constraints, JSONObject props) throws JSONException {
boolean isMandatory = constraints.isMandatory();
props.put(REQUIRED_KEY, isMandatory);
/**
* Parses a ListSchema node.
*
- * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in
- * a separate JSON schema file. Hence, we have to write some properties to a
- * new file, while continuing to process the rest.
+ * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in a separate JSON schema file. Hence, we
+ * have to write some properties to a new file, while continuing to process the rest.
*
* @param listNode
* @param moduleName
* @throws JSONException
* @throws IOException
*/
- private JSONObject processListSchemaNode(ListSchemaNode listNode, String moduleName,
- JSONObject models, Boolean isConfig) throws JSONException, IOException {
+ private JSONObject processListSchemaNode(ListSchemaNode listNode, String moduleName, JSONObject models,
+ Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
- String fileName = (BooleanUtils.isNotFalse(isConfig)?OperationBuilder.CONFIG:OperationBuilder.OPERATIONAL) +
- listNode.getQName().getLocalName();
+ String fileName = (BooleanUtils.isNotFalse(isConfig) ? OperationBuilder.CONFIG : OperationBuilder.OPERATIONAL)
+ + listNode.getQName().getLocalName();
- JSONObject childSchemaProperties = processChildren(listNode.getChildNodes(), moduleName, models);
+ JSONObject childSchemaProperties = processChildren(listNode.getChildNodes(), listNode.getQName(), moduleName,
+ models, schemaContext);
JSONObject childSchema = getSchemaTemplate();
childSchema.put(TYPE_KEY, OBJECT_TYPE);
childSchema.put(PROPERTIES_KEY, childSchemaProperties);
/*
- * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must
- * be in a separate JSON schema file. Hence, we have to write some
- * properties to a new file, while continuing to process the rest.
+ * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in a separate JSON schema file. Hence,
+ * we have to write some properties to a new file, while continuing to process the rest.
*/
// writeToFile(fileName, childSchema.toString(2), moduleName);
childSchema.put("id", fileName);
* @param property
* @throws JSONException
*/
- private void processTypeDef(TypeDefinition<?> leafTypeDef, JSONObject property)
- throws JSONException {
+ private void processTypeDef(TypeDefinition<?> leafTypeDef, JSONObject property) throws JSONException {
if (leafTypeDef instanceof ExtendedType) {
processExtendedType(leafTypeDef, property);
processUnionType((UnionTypeDefinition) leafTypeDef, property);
} else if (leafTypeDef instanceof IdentityrefTypeDefinition) {
- property.putOpt(TYPE_KEY, ((IdentityrefTypeDefinition) leafTypeDef).getIdentity().getQName().getLocalName());
+ property.putOpt(TYPE_KEY, ((IdentityrefTypeDefinition) leafTypeDef).getIdentity().getQName().getLocalName());
} else if (leafTypeDef instanceof BinaryTypeDefinition) {
processBinaryType((BinaryTypeDefinition) leafTypeDef, property);
} else {
* @param property
* @throws JSONException
*/
- private void processExtendedType(TypeDefinition<?> leafTypeDef, JSONObject property)
- throws JSONException {
+ private void processExtendedType(TypeDefinition<?> leafTypeDef, JSONObject property) throws JSONException {
Object leafBaseType = leafTypeDef.getBaseType();
if (leafBaseType instanceof ExtendedType) {
// recursively process an extended type until we hit a base type
processExtendedType((TypeDefinition<?>) leafBaseType, property);
} else {
- List<LengthConstraint> lengthConstraints = ((ExtendedType) leafTypeDef)
- .getLengthConstraints();
+ List<LengthConstraint> lengthConstraints = ((ExtendedType) leafTypeDef).getLengthConstraints();
for (LengthConstraint lengthConstraint : lengthConstraints) {
Number min = lengthConstraint.getMin();
Number max = lengthConstraint.getMax();
/*
*
*/
- private void processBinaryType(BinaryTypeDefinition binaryType, JSONObject property)
- throws JSONException {
+ private void processBinaryType(BinaryTypeDefinition binaryType, JSONObject property) throws JSONException {
property.put(TYPE_KEY, STRING);
JSONObject media = new JSONObject();
media.put(BINARY_ENCODING_KEY, BASE_64);
* @param property
* @throws JSONException
*/
- private void processEnumType(EnumerationType enumLeafType, JSONObject property)
- throws JSONException {
+ private void processEnumType(EnumerationType enumLeafType, JSONObject property) throws JSONException {
List<EnumPair> enumPairs = enumLeafType.getValues();
List<String> enumNames = new ArrayList<String>();
for (EnumPair enumPair : enumPairs) {
* @param property
* @throws JSONException
*/
- private void processBitsType(BitsTypeDefinition bitsType, JSONObject property)
- throws JSONException {
+ private void processBitsType(BitsTypeDefinition bitsType, JSONObject property) throws JSONException {
property.put(TYPE_KEY, ARRAY_TYPE);
property.put(MIN_ITEMS, 0);
property.put(UNIQUE_ITEMS_KEY, true);
* @param property
* @throws JSONException
*/
- private void processUnionType(UnionTypeDefinition unionType, JSONObject property)
- throws JSONException {
+ private void processUnionType(UnionTypeDefinition unionType, JSONObject property) throws JSONException {
StringBuilder type = new StringBuilder();
- for (TypeDefinition<?> typeDef : unionType.getTypes() ) {
- if( type.length() > 0 ){
- type.append( " or " );
+ for (TypeDefinition<?> typeDef : unionType.getTypes()) {
+ if (type.length() > 0) {
+ type.append(" or ");
}
type.append(YANG_TYPE_TO_JSON_TYPE_MAPPING.get(typeDef.getClass()));
}
- property.put(TYPE_KEY, type );
+ property.put(TYPE_KEY, type);
}
/**
return schemaJSON;
}
+
}
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
-
import javax.ws.rs.core.UriInfo;
-
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.rest.doc.util;
+
+import java.net.URI;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaNode;
+
+public class RestDocgenUtil {
+
+ private RestDocgenUtil() {
+ }
+
+ private static Map<URI, Map<Date, Module>> namespaceAndRevisionToModule = new HashMap<URI, Map<Date, Module>>();
+
+ /**
+ * Resolve path argument name for {@code node}.
+ *
+ * The name can contain also prefix which consists of module name followed by colon. The module prefix is presented
+ * if namespace of {@code node} and its parent is different. In other cases only name of {@code node} is returned.
+ *
+ * @return name of {@code node}
+ */
+ public static String resolvePathArgumentsName(final SchemaNode node, final SchemaContext schemaContext) {
+ Iterable<QName> schemaPath = node.getPath().getPathTowardsRoot();
+ Iterator<QName> it = schemaPath.iterator();
+ QName nodeQName = it.next();
+
+ QName parentQName = null;
+ if (it.hasNext()) {
+ parentQName = it.next();
+ }
+ if (isEqualNamespaceAndRevision(parentQName, nodeQName)) {
+ return node.getQName().getLocalName();
+ } else {
+ return resolveFullNameFromNode(node, schemaContext);
+ }
+ }
+
+ private synchronized static String resolveFullNameFromNode(final SchemaNode node, final SchemaContext schemaContext) {
+ final URI namespace = node.getQName().getNamespace();
+ final Date revision = node.getQName().getRevision();
+
+ Map<Date, Module> revisionToModule = namespaceAndRevisionToModule.get(namespace);
+ if (revisionToModule == null) {
+ revisionToModule = new HashMap<>();
+ namespaceAndRevisionToModule.put(namespace, revisionToModule);
+ }
+ Module module = revisionToModule.get(revision);
+ if (module == null) {
+ module = schemaContext.findModuleByNamespaceAndRevision(namespace, revision);
+ revisionToModule.put(revision, module);
+ }
+ if (module != null) {
+ return module.getName() + ":" + node.getQName().getLocalName();
+ }
+ return node.getQName().getLocalName();
+ }
+
+ public static String resolveNodesName(final SchemaNode node, final Module module, final SchemaContext schemaContext) {
+ if (node.getQName().getNamespace().equals(module.getQNameModule().getNamespace())
+ && node.getQName().getRevision().equals(module.getQNameModule().getRevision())) {
+ return node.getQName().getLocalName();
+ } else {
+ return resolveFullNameFromNode(node, schemaContext);
+ }
+ }
+
+ private static boolean isEqualNamespaceAndRevision(QName parentQName, QName nodeQName) {
+ if (parentQName == null) {
+ if (nodeQName == null) {
+ return true;
+ }
+ return false;
+ }
+ return parentQName.getNamespace().equals(nodeQName.getNamespace())
+ && parentQName.getRevision().equals(nodeQName.getRevision());
+ }
+}
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import com.google.common.base.Preconditions;
import java.io.File;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeSet;
-
import javax.ws.rs.core.UriInfo;
-
import junit.framework.Assert;
-
+import org.json.JSONException;
+import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.rest.doc.swagger.Resource;
import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
import org.opendaylight.yangtools.yang.model.api.Module;
-
-import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
/**
*
public static final String HTTP_HOST = "http://host";
private ApiDocGenerator generator;
private DocGenTestHelper helper;
+ private SchemaContext schemaContext;
@Before
public void setUp() throws Exception {
generator = new ApiDocGenerator();
helper = new DocGenTestHelper();
helper.setUp();
+ schemaContext = new YangParserImpl().resolveSchemaContext(new HashSet<Module>(helper.getModules().values()));
}
@After
for (Entry<File, Module> m : helper.getModules().entrySet()) {
if (m.getKey().getAbsolutePath().endsWith("toaster_short.yang")) {
ApiDeclaration doc = generator.getSwaggerDocSpec(m.getValue(),
- "http://localhost:8080/restconf", "");
+ "http://localhost:8080/restconf", "",schemaContext);
validateToaster(doc);
+ validateTosterDocContainsModulePrefixes(doc);
Assert.assertNotNull(doc);
}
}
for (Entry<File, Module> m : helper.getModules().entrySet()) {
if (m.getKey().getAbsolutePath().endsWith("toaster.yang")) {
ApiDeclaration doc = generator.getSwaggerDocSpec(m.getValue(),
- "http://localhost:8080/restconf", "");
+ "http://localhost:8080/restconf", "",schemaContext);
Assert.assertNotNull(doc);
//testing bugs.opendaylight.org bug 1290. UnionType model type.
}
}
+ /**
+ * Tests whether from yang files are generated all required paths for HTTP operations (GET, DELETE, PUT, POST)
+ *
+ * If container | list is augmented then in path there should be specified module name followed with collon (e. g.
+ * "/config/module1:element1/element2/module2:element3")
+ *
+ * @param doc
+ * @throws Exception
+ */
private void validateToaster(ApiDeclaration doc) throws Exception {
Set<String> expectedUrls = new TreeSet<>(Arrays.asList(new String[] {
"/config/toaster2:toaster/", "/operational/toaster2:toaster/",
"/operations/toaster2:cancel-toast", "/operations/toaster2:make-toast",
- "/operations/toaster2:restock-toaster" }));
+ "/operations/toaster2:restock-toaster",
+ "/config/toaster2:toaster/toasterSlot/{slotId}/toaster-augmented:slotInfo/" }));
Set<String> actualUrls = new TreeSet<>();
@Test
public void testGetResourceListing() throws Exception {
UriInfo info = helper.createMockUriInfo(HTTP_HOST);
- SchemaService mockSchemaService = helper.createMockSchemaService();
+ SchemaService mockSchemaService = helper.createMockSchemaService(schemaContext);
generator.setSchemaService(mockSchemaService);
assertEquals(HTTP_HOST + "/toaster2(2009-11-20)", toaster2.getPath());
}
+ private void validateTosterDocContainsModulePrefixes(ApiDeclaration doc) {
+ JSONObject topLevelJson = doc.getModels();
+ try {
+ JSONObject configToaster = topLevelJson.getJSONObject("(config)toaster");
+ assertNotNull("(config)toaster JSON object missing", configToaster);
+ //without module prefix
+ containsProperties(configToaster, "toasterSlot");
+
+ JSONObject toasterSlot = topLevelJson.getJSONObject("(config)toasterSlot");
+ assertNotNull("(config)toasterSlot JSON object missing", toasterSlot);
+ //with module prefix
+ containsProperties(toasterSlot, "toaster-augmented:slotInfo");
+
+ } catch (JSONException e) {
+ fail("Json exception while reading JSON object. Original message "+e.getMessage());
+ }
+ }
+
+ private void containsProperties(final JSONObject jsonObject,final String...properties) throws JSONException {
+ for (String property : properties) {
+ JSONObject propertiesObject = jsonObject.getJSONObject("properties");
+ assertNotNull("Properties object missing in ", propertiesObject);
+ JSONObject concretePropertyObject = propertiesObject.getJSONObject(property);
+ assertNotNull(property + " is missing",concretePropertyObject);
+ }
+ }
}
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
import java.io.File;
import java.io.FileNotFoundException;
import java.net.URI;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-
import javax.ws.rs.core.UriBuilder;
import javax.ws.rs.core.UriInfo;
-
import org.mockito.ArgumentCaptor;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.parser.api.YangModelParser;
+import org.opendaylight.yangtools.yang.model.parser.api.YangContextParser;
import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
-
public class DocGenTestHelper {
private Map<File, Module> modules;
URISyntaxException {
URI resourceDirUri = getClass().getResource(resourceDirectory).toURI();
- final YangModelParser parser = new YangParserImpl();
+ final YangContextParser parser = new YangParserImpl();
final File testDir = new File(resourceDirUri);
final String[] fileList = testDir.list();
final List<File> testFiles = new ArrayList<>();
final ArgumentCaptor<String> moduleCapture = ArgumentCaptor.forClass(String.class);
final ArgumentCaptor<Date> dateCapture = ArgumentCaptor.forClass(Date.class);
+ final ArgumentCaptor<URI> namespaceCapture = ArgumentCaptor.forClass(URI.class);
when(mockContext.findModuleByName(moduleCapture.capture(), dateCapture.capture())).then(
new Answer<Module>() {
@Override
return null;
}
});
+ when(mockContext.findModuleByNamespaceAndRevision(namespaceCapture.capture(), dateCapture.capture())).then(
+ new Answer<Module>() {
+ @Override
+ public Module answer(InvocationOnMock invocation) throws Throwable {
+ URI namespace = namespaceCapture.getValue();
+ Date date = dateCapture.getValue();
+ for (Module m : modules.values()) {
+ if (m.getNamespace().equals(namespace) && m.getRevision().equals(date)) {
+ return m;
+ }
+ }
+ return null;
+ }
+ });
return mockContext;
}
import java.net.URISyntaxException;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
-
import javax.ws.rs.core.UriInfo;
-
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
public class MountPointSwaggerTest {
private static final String INSTANCE_URL = "nodes/node/123/";
private MountPointSwagger swagger;
private DocGenTestHelper helper;
+ private SchemaContext schemaContext;
@Before
public void setUp() throws Exception {
swagger = new MountPointSwagger();
helper = new DocGenTestHelper();
helper.setUp();
+ schemaContext = new YangParserImpl().resolveSchemaContext(new HashSet<Module>(helper.getModules().values()));
}
@Test()
--- /dev/null
+module toaster-augmented {
+
+ yang-version 1;
+
+ namespace
+ "http://netconfcentral.org/ns/toaster/augmented";
+
+ prefix toast;
+ import toaster2 {prefix tst; revision-date 2009-11-20;}
+
+ revision "2014-7-14" {
+ }
+
+ augment "/tst:toaster/tst:toasterSlot" {
+ container slotInfo {
+ leaf numberOfToastPrepared {
+ type uint32;
+ }
+ }
+ }
+}
\ No newline at end of file
Microsoft Toaster.";
}
+ list toasterSlot {
+ key "slotId";
+ leaf slotId {
+ type string;
+ }
+ }
+
leaf toasterModelNumber {
type DisplayString;
config false;
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal</artifactId>
+ <artifactId>sal-binding-api</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>sal-binding-api</artifactId>
+ <artifactId>liblldp</artifactId>
</dependency>
<dependency>
<groupId>org.opendaylight.controller.model</groupId>
import java.nio.charset.Charset;
-import org.opendaylight.controller.sal.packet.Ethernet;
-import org.opendaylight.controller.sal.packet.LLDP;
-import org.opendaylight.controller.sal.packet.LLDPTLV;
-import org.opendaylight.controller.sal.utils.NetUtils;
+import org.opendaylight.controller.liblldp.Ethernet;
+import org.opendaylight.controller.liblldp.LLDP;
+import org.opendaylight.controller.liblldp.LLDPTLV;
+import org.opendaylight.controller.liblldp.NetUtils;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import static com.google.common.base.Preconditions.checkNotNull;
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
+
import javax.annotation.concurrent.Immutable;
+import javax.management.MBeanServerConnection;
+
import org.opendaylight.controller.config.api.ConflictingVersionException;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
+
@Immutable
-public class ConfigPusher {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPusher.class);
+public class ConfigPusherImpl implements ConfigPusher {
+ private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
private final long maxWaitForCapabilitiesMillis;
private final long conflictingVersionTimeoutMillis;
private final NetconfOperationServiceFactory configNetconfConnector;
+ private static final int QUEUE_SIZE = 100;
+ private BlockingQueue<List<? extends ConfigSnapshotHolder>> queue = new LinkedBlockingQueue<List<? extends ConfigSnapshotHolder>>(QUEUE_SIZE);
- public ConfigPusher(NetconfOperationServiceFactory configNetconfConnector, long maxWaitForCapabilitiesMillis,
+ public ConfigPusherImpl(NetconfOperationServiceFactory configNetconfConnector, long maxWaitForCapabilitiesMillis,
long conflictingVersionTimeoutMillis) {
this.configNetconfConnector = configNetconfConnector;
this.maxWaitForCapabilitiesMillis = maxWaitForCapabilitiesMillis;
this.conflictingVersionTimeoutMillis = conflictingVersionTimeoutMillis;
}
- public synchronized LinkedHashMap<ConfigSnapshotHolder, EditAndCommitResponse> pushConfigs(List<ConfigSnapshotHolder> configs) throws NetconfDocumentedException {
+ public void process(List<AutoCloseable> autoCloseables, MBeanServerConnection platformMBeanServer, Persister persisterAggregator) throws InterruptedException {
+ List<? extends ConfigSnapshotHolder> configs;
+ while(true) {
+ configs = queue.take();
+ try {
+ internalPushConfigs(configs);
+ ConfigPersisterNotificationHandler jmxNotificationHandler = new ConfigPersisterNotificationHandler(platformMBeanServer, persisterAggregator);
+ synchronized (autoCloseables) {
+ autoCloseables.add(jmxNotificationHandler);
+ }
+ /*
+ * We have completed initial configuration. At this point
+ * it is good idea to perform garbage collection to prune
+ * any garbage we have accumulated during startup.
+ */
+ logger.debug("Running post-initialization garbage collection...");
+ System.gc();
+ logger.debug("Post-initialization garbage collection completed.");
+ logger.debug("ConfigPusher has pushed configs {}, gc completed", configs);
+ }
+ catch (NetconfDocumentedException e) {
+ logger.error("Error pushing configs {}",configs);
+ throw new IllegalStateException(e);
+ }
+ }
+ }
+
+ public void pushConfigs(List<? extends ConfigSnapshotHolder> configs) throws InterruptedException {
+ logger.debug("Requested to push configs {}", configs);
+ this.queue.put(configs);
+ }
+
+ private LinkedHashMap<? extends ConfigSnapshotHolder, EditAndCommitResponse> internalPushConfigs(List<? extends ConfigSnapshotHolder> configs) throws NetconfDocumentedException {
logger.debug("Last config snapshots to be pushed to netconf: {}", configs);
LinkedHashMap<ConfigSnapshotHolder, EditAndCommitResponse> result = new LinkedHashMap<>();
// start pushing snapshots:
private static NetconfMessage getCommitMessage() {
String resource = "/netconfOp/commit.xml";
- try (InputStream stream = ConfigPusher.class.getResourceAsStream(resource)) {
+ try (InputStream stream = ConfigPusherImpl.class.getResourceAsStream(resource)) {
checkNotNull(stream, "Unable to load resource " + resource);
return new NetconfMessage(XmlUtil.readXmlToDocument(stream));
} catch (SAXException | IOException e) {
package org.opendaylight.controller.netconf.persist.impl.osgi;
-import com.google.common.annotations.VisibleForTesting;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import javax.management.MBeanServer;
+
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-import org.opendaylight.controller.netconf.persist.impl.ConfigPersisterNotificationHandler;
-import org.opendaylight.controller.netconf.persist.impl.ConfigPusher;
+import org.opendaylight.controller.netconf.persist.impl.ConfigPusherImpl;
import org.opendaylight.controller.netconf.persist.impl.PersisterAggregator;
import org.opendaylight.controller.netconf.util.CloseableUtil;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.Filter;
import org.osgi.framework.InvalidSyntaxException;
import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
import org.osgi.util.tracker.ServiceTracker;
import org.osgi.util.tracker.ServiceTrackerCustomizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.management.MBeanServer;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
+import com.google.common.annotations.VisibleForTesting;
public class ConfigPersisterActivator implements BundleActivator {
public static final String STORAGE_ADAPTER_CLASS_PROP_SUFFIX = "storageAdapterClass";
private List<AutoCloseable> autoCloseables;
+ private volatile BundleContext context;
+ ServiceRegistration<?> registration;
@Override
public void start(final BundleContext context) throws Exception {
logger.debug("ConfigPersister starting");
+ this.context = context;
+
autoCloseables = new ArrayList<>();
PropertiesProviderBaseImpl propertiesProvider = new PropertiesProviderBaseImpl(context);
}
@Override
- public synchronized void stop(BundleContext context) throws Exception {
- CloseableUtil.closeAll(autoCloseables);
+ public void stop(BundleContext context) throws Exception {
+ synchronized(autoCloseables) {
+ CloseableUtil.closeAll(autoCloseables);
+ if (registration != null) {
+ registration.unregister();
+ }
+ this.context = null;
+ }
}
logger.trace("Got InnerCustomizer.addingService {}", reference);
NetconfOperationServiceFactory service = reference.getBundle().getBundleContext().getService(reference);
- final ConfigPusher configPusher = new ConfigPusher(service, maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
+ logger.debug("Creating new job queue");
+
+ final ConfigPusherImpl configPusher = new ConfigPusherImpl(service, maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
logger.debug("Configuration Persister got {}", service);
+ logger.debug("Context was {}", context);
+ logger.debug("Registration was {}", registration);
+
final Thread pushingThread = new Thread(new Runnable() {
@Override
public void run() {
try {
- configPusher.pushConfigs(configs);
- } catch (NetconfDocumentedException e) {
- logger.error("Error pushing configs {}",configs);
- throw new IllegalStateException(e);
+ if(configs != null && !configs.isEmpty()) {
+ configPusher.pushConfigs(configs);
+ }
+ registration = context.registerService(ConfigPusher.class.getName(), configPusher, null);
+ configPusher.process(autoCloseables, platformMBeanServer, persisterAggregator);
+ } catch (InterruptedException e) {
+ logger.info("ConfigPusher thread stopped",e);
}
logger.info("Configuration Persister initialization completed.");
-
- /*
- * We have completed initial configuration. At this point
- * it is good idea to perform garbage collection to prune
- * any garbage we have accumulated during startup.
- */
- logger.debug("Running post-initialization garbage collection...");
- System.gc();
- logger.debug("Post-initialization garbage collection completed.");
-
- ConfigPersisterNotificationHandler jmxNotificationHandler = new ConfigPersisterNotificationHandler(platformMBeanServer, persisterAggregator);
- synchronized (ConfigPersisterActivator.this) {
- autoCloseables.add(jmxNotificationHandler);
- }
}
}, "config-pusher");
- synchronized (ConfigPersisterActivator.this) {
+ synchronized (autoCloseables) {
autoCloseables.add(new AutoCloseable() {
@Override
public void close() {
*/
package org.opendaylight.controller.netconf.persist.impl.osgi;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Dictionary;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
import org.opendaylight.controller.config.persist.api.Persister;
import org.opendaylight.controller.config.persist.api.PropertiesProvider;
import org.osgi.framework.Filter;
import org.osgi.framework.ServiceListener;
import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
final class MockedBundleContext {
@Mock
NetconfOperationServiceFactory serviceFactory;
@Mock
private NetconfOperationService service;
+ @Mock
+ private ServiceRegistration<?> registration;
MockedBundleContext(long maxWaitForCapabilitiesMillis, long conflictingVersionTimeoutMillis) throws Exception {
MockitoAnnotations.initMocks(this);
doReturn(Collections.emptySet()).when(service).getCapabilities();
doNothing().when(service).close();
doReturn("serviceFactoryMock").when(serviceFactory).toString();
+
+ doNothing().when(registration).unregister();
+ doReturn(registration).when(context).registerService(
+ eq(ConfigPusher.class.getName()), any(Closeable.class),
+ any(Dictionary.class));
}
public BundleContext getBundleContext() {
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
import org.opendaylight.controller.sal.core.api.RpcImplementation;
import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* Implementation of RemoteDeviceHandler. Integrates cli with
}
@Override
- public synchronized void onDeviceConnected(final SchemaContextProvider contextProvider,
+ public synchronized void onDeviceConnected(final SchemaContext context,
final NetconfSessionCapabilities capabilities, final RpcImplementation rpcImplementation) {
console.enterRootContext(new ConsoleContext() {
// possible
// TODO detect netconf base version
// TODO detect inet types version
- commandDispatcher.addRemoteCommands(rpcImplementation, contextProvider.getSchemaContext());
- schemaContextRegistry.setRemoteSchemaContext(contextProvider.getSchemaContext());
+ commandDispatcher.addRemoteCommands(rpcImplementation, context);
+ schemaContextRegistry.setRemoteSchemaContext(context);
up = true;
this.notify();
}
<modules>
<module>netconf-api</module>
- <module>netconf-cli</module>
+ <!--FIXME make compilable-->
+ <!--<module>netconf-cli</module>-->
<module>netconf-config</module>
<module>netconf-impl</module>
<module>config-netconf-connector</module>
import org.opendaylight.controller.networkconfig.neutron.INeutronFirewallPolicyCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronFirewallRuleCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronFloatingIPCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronNetworkCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronPortCRUD;
import org.opendaylight.controller.networkconfig.neutron.INeutronRouterCRUD;
NeutronSecurityRuleInterface.class,
NeutronFirewallInterface.class,
NeutronFirewallPolicyInterface.class,
- NeutronFirewallRuleInterface.class};
+ NeutronFirewallRuleInterface.class,
+ NeutronLoadBalancerInterface.class,
+ NeutronLoadBalancerPoolInterface.class,
+ NeutronLoadBalancerListenerInterface.class,
+ NeutronLoadBalancerHealthMonitorInterface.class,
+ NeutronLoadBalancerPoolMemberInterface.class};
return res;
}
"setConfigurationContainerService",
"unsetConfigurationContainerService").setRequired(true));
}
+ if (imp.equals(NeutronLoadBalancerInterface.class)) {
+ // export the service
+ c.setInterface(
+ new String[] { INeutronLoadBalancerCRUD.class.getName(),
+ IConfigurationContainerAware.class.getName()}, null);
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "neutron");
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName).setService(
+ IConfigurationContainerService.class).setCallbacks(
+ "setConfigurationContainerService",
+ "unsetConfigurationContainerService").setRequired(true));
+ }
+ if (imp.equals(NeutronLoadBalancerListenerInterface.class)) {
+ // export the service
+ c.setInterface(
+ new String[] { INeutronLoadBalancerListenerCRUD.class.getName(),
+ IConfigurationContainerAware.class.getName()}, null);
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "neutron");
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName).setService(
+ IConfigurationContainerService.class).setCallbacks(
+ "setConfigurationContainerService",
+ "unsetConfigurationContainerService").setRequired(true));
+ }
+ if (imp.equals(NeutronLoadBalancerPoolInterface.class)) {
+ // export the service
+ c.setInterface(
+ new String[] { INeutronLoadBalancerPoolCRUD.class.getName(),
+ IConfigurationContainerAware.class.getName()}, null);
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "neutron");
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName).setService(
+ IConfigurationContainerService.class).setCallbacks(
+ "setConfigurationContainerService",
+ "unsetConfigurationContainerService").setRequired(true));
+ }
+ if (imp.equals(NeutronLoadBalancerHealthMonitorInterface.class)) {
+ // export the service
+ c.setInterface(
+ new String[] { INeutronLoadBalancerHealthMonitorCRUD.class.getName(),
+ IConfigurationContainerAware.class.getName()}, null);
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "neutron");
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName).setService(
+ IConfigurationContainerService.class).setCallbacks(
+ "setConfigurationContainerService",
+ "unsetConfigurationContainerService").setRequired(true));
+ }
+ if (imp.equals(NeutronLoadBalancerPoolMemberInterface.class)) {
+ // export the service
+ c.setInterface(
+ new String[] { INeutronLoadBalancerPoolMemberCRUD.class.getName(),
+ IConfigurationContainerAware.class.getName()}, null);
+ Dictionary<String, String> props = new Hashtable<String, String>();
+ props.put("salListenerName", "neutron");
+ c.add(createContainerServiceDependency(containerName)
+ .setService(IClusterContainerServices.class)
+ .setCallbacks("setClusterContainerService",
+ "unsetClusterContainerService").setRequired(true));
+ c.add(createContainerServiceDependency(containerName).setService(
+ IConfigurationContainerService.class).setCallbacks(
+ "setConfigurationContainerService",
+ "unsetConfigurationContainerService").setRequired(true));
+ }
}
}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerHealthMonitorInterface implements INeutronLoadBalancerHealthMonitorCRUD, IConfigurationContainerAware,
+ IObjectReader {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerHealthMonitorInterface.class);
+ private static final String FILE_NAME = "neutron.loadBalancerHealthMonitor.conf";
+ private String containerName = null;
+
+ private IClusterContainerServices clusterContainerService = null;
+ private IConfigurationContainerService configurationService;
+ private ConcurrentMap<String, NeutronLoadBalancerHealthMonitor> loadBalancerHealthMonitorDB;
+
+ // methods needed for creating caches
+ void setClusterContainerService(IClusterContainerServices s) {
+ logger.debug("Cluster Service set");
+ clusterContainerService = s;
+ }
+
+ void unsetClusterContainerService(IClusterContainerServices s) {
+ if (clusterContainerService == s) {
+ logger.debug("Cluster Service removed!");
+ clusterContainerService = null;
+ }
+ }
+
+ public void setConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service set: {}", service);
+ configurationService = service;
+ }
+
+ public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service removed: {}", service);
+ configurationService = null;
+ }
+
+ private void allocateCache() {
+ if (this.clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+ logger.debug("Creating Cache for Neutron LoadBalancerHealthMonitor");
+ try {
+ // neutron caches
+ this.clusterContainerService.createCache("neutronLoadBalancerHealthMonitors",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheConfigException cce) {
+ logger.error("Cache couldn't be created for Neutron LoadBalancerHealthMonitor - check cache mode");
+ } catch (CacheExistException cce) {
+ logger.error("Cache for Neutron LoadBalancerHealthMonitor already exists, destroy and recreate");
+ }
+ logger.debug("Cache successfully created for Neutron LoadBalancerHealthMonitor");
+ }
+
+ @SuppressWarnings ({"unchecked"})
+ private void retrieveCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
+ return;
+ }
+
+ logger.debug("Retrieving cache for Neutron LoadBalancerHealthMonitor");
+ loadBalancerHealthMonitorDB = (ConcurrentMap<String, NeutronLoadBalancerHealthMonitor>) clusterContainerService
+ .getCache("neutronLoadBalancerHealthMonitors");
+ if (loadBalancerHealthMonitorDB == null) {
+ logger.error("Cache couldn't be retrieved for Neutron LoadBalancerHealthMonitor");
+ }
+ logger.debug("Cache was successfully retrieved for Neutron LoadBalancerHealthMonitor");
+ }
+
+ private void destroyCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterMger, can't destroy cache");
+ return;
+ }
+ logger.debug("Destroying Cache for LoadBalancerHealthMonitor");
+ clusterContainerService.destroyCache("neutronLoadBalancerHealthMonitors");
+ }
+
+ private void startUp() {
+ allocateCache();
+ retrieveCache();
+ loadConfiguration();
+ }
+
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ */
+ void init(Component c) {
+ Dictionary<?, ?> props = c.getServiceProperties();
+ if (props != null) {
+ this.containerName = (String) props.get("containerName");
+ logger.debug("Running containerName: {}", this.containerName);
+ } else {
+ // In the Global instance case the containerName is empty
+ this.containerName = "";
+ }
+ startUp();
+ }
+
+ /**
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ */
+ void destroy() {
+ destroyCache();
+ }
+
+ /**
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ */
+ void start() {
+ }
+
+ /**
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ */
+ void stop() {
+ }
+
+ // this method uses reflection to update an object from it's delta.
+
+ private boolean overwrite(Object target, Object delta) {
+ Method[] methods = target.getClass().getMethods();
+
+ for (Method toMethod : methods) {
+ if (toMethod.getDeclaringClass().equals(target.getClass())
+ && toMethod.getName().startsWith("set")) {
+
+ String toName = toMethod.getName();
+ String fromName = toName.replace("set", "get");
+
+ try {
+ Method fromMethod = delta.getClass().getMethod(fromName);
+ Object value = fromMethod.invoke(delta, (Object[]) null);
+ if (value != null) {
+ toMethod.invoke(target, value);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean neutronLoadBalancerHealthMonitorExists(String uuid) {
+ return loadBalancerHealthMonitorDB.containsKey(uuid);
+ }
+
+ @Override
+ public NeutronLoadBalancerHealthMonitor getNeutronLoadBalancerHealthMonitor(String uuid) {
+ if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+ logger.debug("No LoadBalancerHealthMonitor has Been Defined");
+ return null;
+ }
+ return loadBalancerHealthMonitorDB.get(uuid);
+ }
+
+ @Override
+ public List<NeutronLoadBalancerHealthMonitor> getAllNeutronLoadBalancerHealthMonitors() {
+ Set<NeutronLoadBalancerHealthMonitor> allLoadBalancerHealthMonitors = new HashSet<NeutronLoadBalancerHealthMonitor>();
+ for (Entry<String, NeutronLoadBalancerHealthMonitor> entry : loadBalancerHealthMonitorDB.entrySet()) {
+ NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor = entry.getValue();
+ allLoadBalancerHealthMonitors.add(loadBalancerHealthMonitor);
+ }
+ logger.debug("Exiting getLoadBalancerHealthMonitors, Found {} OpenStackLoadBalancerHealthMonitor", allLoadBalancerHealthMonitors.size());
+ List<NeutronLoadBalancerHealthMonitor> ans = new ArrayList<NeutronLoadBalancerHealthMonitor>();
+ ans.addAll(allLoadBalancerHealthMonitors);
+ return ans;
+ }
+
+ @Override
+ public boolean addNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor input) {
+ if (neutronLoadBalancerHealthMonitorExists(input.getLoadBalancerHealthMonitorID())) {
+ return false;
+ }
+ loadBalancerHealthMonitorDB.putIfAbsent(input.getLoadBalancerHealthMonitorID(), input);
+ //TODO: add code to find INeutronLoadBalancerHealthMonitorAware services and call newtorkCreated on them
+ return true;
+ }
+
+ @Override
+ public boolean removeNeutronLoadBalancerHealthMonitor(String uuid) {
+ if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+ return false;
+ }
+ loadBalancerHealthMonitorDB.remove(uuid);
+ //TODO: add code to find INeutronLoadBalancerHealthMonitorAware services and call newtorkDeleted on them
+ return true;
+ }
+
+ @Override
+ public boolean updateNeutronLoadBalancerHealthMonitor(String uuid, NeutronLoadBalancerHealthMonitor delta) {
+ if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+ return false;
+ }
+ NeutronLoadBalancerHealthMonitor target = loadBalancerHealthMonitorDB.get(uuid);
+ return overwrite(target, delta);
+ }
+
+ @Override
+ public boolean neutronLoadBalancerHealthMonitorInUse(String loadBalancerHealthMonitorUUID) {
+ return !neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorUUID);
+ }
+
+ private void loadConfiguration() {
+ for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+ NeutronLoadBalancerHealthMonitor nn = (NeutronLoadBalancerHealthMonitor) conf;
+ loadBalancerHealthMonitorDB.put(nn.getLoadBalancerHealthMonitorID(), nn);
+ }
+ }
+
+ @Override
+ public Status saveConfiguration() {
+ return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerHealthMonitorDB.values()),
+ FILE_NAME);
+ }
+
+ @Override
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+ return ois.readObject();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerInterface implements INeutronLoadBalancerCRUD, IConfigurationContainerAware,
+ IObjectReader {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerInterface.class);
+ private static final String FILE_NAME = "neutron.loadBalancer.conf";
+ private String containerName = null;
+
+ private IClusterContainerServices clusterContainerService = null;
+ private IConfigurationContainerService configurationService;
+ private ConcurrentMap<String, NeutronLoadBalancer> loadBalancerDB;
+
+ // methods needed for creating caches
+ void setClusterContainerService(IClusterContainerServices s) {
+ logger.debug("Cluster Service set");
+ clusterContainerService = s;
+ }
+
+ void unsetClusterContainerService(IClusterContainerServices s) {
+ if (clusterContainerService == s) {
+ logger.debug("Cluster Service removed!");
+ clusterContainerService = null;
+ }
+ }
+
+ public void setConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service set: {}", service);
+ configurationService = service;
+ }
+
+ public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service removed: {}", service);
+ configurationService = null;
+ }
+
+ private void allocateCache() {
+ if (this.clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+ logger.debug("Creating Cache for Neutron LoadBalancer");
+ try {
+ // neutron caches
+ this.clusterContainerService.createCache("neutronLoadBalancers",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheConfigException cce) {
+ logger.error("Cache couldn't be created for Neutron LoadBalancer - check cache mode");
+ } catch (CacheExistException cce) {
+ logger.error("Cache for Neutron LoadBalancer already exists, destroy and recreate");
+ }
+ logger.debug("Cache successfully created for Neutron LoadBalancer");
+ }
+
+ @SuppressWarnings ({"unchecked"})
+ private void retrieveCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
+ return;
+ }
+
+ logger.debug("Retrieving cache for Neutron LoadBalancer");
+ loadBalancerDB = (ConcurrentMap<String, NeutronLoadBalancer>) clusterContainerService
+ .getCache("neutronLoadBalancers");
+ if (loadBalancerDB == null) {
+ logger.error("Cache couldn't be retrieved for Neutron LoadBalancer");
+ }
+ logger.debug("Cache was successfully retrieved for Neutron LoadBalancer");
+ }
+
+ private void destroyCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterMger, can't destroy cache");
+ return;
+ }
+ logger.debug("Destroying Cache for LoadBalancer");
+ clusterContainerService.destroyCache("neutronLoadBalancers");
+ }
+
+ private void startUp() {
+ allocateCache();
+ retrieveCache();
+ loadConfiguration();
+ }
+
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ */
+ void init(Component c) {
+ Dictionary<?, ?> props = c.getServiceProperties();
+ if (props != null) {
+ this.containerName = (String) props.get("containerName");
+ logger.debug("Running containerName: {}", this.containerName);
+ } else {
+ // In the Global instance case the containerName is empty
+ this.containerName = "";
+ }
+ startUp();
+ }
+
+ /**
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ */
+ void destroy() {
+ destroyCache();
+ }
+
+ /**
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ */
+ void start() {
+ }
+
+ /**
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ */
+ void stop() {
+ }
+
+ // this method uses reflection to update an object from it's delta.
+
+ private boolean overwrite(Object target, Object delta) {
+ Method[] methods = target.getClass().getMethods();
+
+ for (Method toMethod : methods) {
+ if (toMethod.getDeclaringClass().equals(target.getClass())
+ && toMethod.getName().startsWith("set")) {
+
+ String toName = toMethod.getName();
+ String fromName = toName.replace("set", "get");
+
+ try {
+ Method fromMethod = delta.getClass().getMethod(fromName);
+ Object value = fromMethod.invoke(delta, (Object[]) null);
+ if (value != null) {
+ toMethod.invoke(target, value);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean neutronLoadBalancerExists(String uuid) {
+ return loadBalancerDB.containsKey(uuid);
+ }
+
+ @Override
+ public NeutronLoadBalancer getNeutronLoadBalancer(String uuid) {
+ if (!neutronLoadBalancerExists(uuid)) {
+ logger.debug("No LoadBalancer Have Been Defined");
+ return null;
+ }
+ return loadBalancerDB.get(uuid);
+ }
+
+ @Override
+ public List<NeutronLoadBalancer> getAllNeutronLoadBalancers() {
+ Set<NeutronLoadBalancer> allLoadBalancers = new HashSet<NeutronLoadBalancer>();
+ for (Entry<String, NeutronLoadBalancer> entry : loadBalancerDB.entrySet()) {
+ NeutronLoadBalancer loadBalancer = entry.getValue();
+ allLoadBalancers.add(loadBalancer);
+ }
+ logger.debug("Exiting getLoadBalancers, Found {} OpenStackLoadBalancer", allLoadBalancers.size());
+ List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
+ ans.addAll(allLoadBalancers);
+ return ans;
+ }
+
+ @Override
+ public boolean addNeutronLoadBalancer(NeutronLoadBalancer input) {
+ if (neutronLoadBalancerExists(input.getLoadBalancerID())) {
+ return false;
+ }
+ loadBalancerDB.putIfAbsent(input.getLoadBalancerID(), input);
+ //TODO: add code to find INeutronLoadBalancerAware services and call newtorkCreated on them
+ return true;
+ }
+
+ @Override
+ public boolean removeNeutronLoadBalancer(String uuid) {
+ if (!neutronLoadBalancerExists(uuid)) {
+ return false;
+ }
+ loadBalancerDB.remove(uuid);
+ //TODO: add code to find INeutronLoadBalancerAware services and call newtorkDeleted on them
+ return true;
+ }
+
+ @Override
+ public boolean updateNeutronLoadBalancer(String uuid, NeutronLoadBalancer delta) {
+ if (!neutronLoadBalancerExists(uuid)) {
+ return false;
+ }
+ NeutronLoadBalancer target = loadBalancerDB.get(uuid);
+ return overwrite(target, delta);
+ }
+
+ @Override
+ public boolean neutronLoadBalancerInUse(String loadBalancerUUID) {
+ return !neutronLoadBalancerExists(loadBalancerUUID);
+ }
+
+ private void loadConfiguration() {
+ for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+ NeutronLoadBalancer nn = (NeutronLoadBalancer) conf;
+ loadBalancerDB.put(nn.getLoadBalancerID(), nn);
+ }
+ }
+
+ @Override
+ public Status saveConfiguration() {
+ return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerDB.values()),
+ FILE_NAME);
+ }
+
+ @Override
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+ return ois.readObject();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerListenerInterface implements INeutronLoadBalancerListenerCRUD, IConfigurationContainerAware,
+ IObjectReader {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerListenerInterface.class);
+ private static final String FILE_NAME = "neutron.loadBalancerListener.conf";
+ private String containerName = null;
+
+ private IClusterContainerServices clusterContainerService = null;
+ private IConfigurationContainerService configurationService;
+ private ConcurrentMap<String, NeutronLoadBalancerListener> loadBalancerListenerDB;
+
+ // methods needed for creating caches
+ void setClusterContainerService(IClusterContainerServices s) {
+ logger.debug("Cluster Service set");
+ clusterContainerService = s;
+ }
+
+ void unsetClusterContainerService(IClusterContainerServices s) {
+ if (clusterContainerService == s) {
+ logger.debug("Cluster Service removed!");
+ clusterContainerService = null;
+ }
+ }
+
+ public void setConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service set: {}", service);
+ configurationService = service;
+ }
+
+ public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service removed: {}", service);
+ configurationService = null;
+ }
+
+ private void allocateCache() {
+ if (this.clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+ logger.debug("Creating Cache for Neutron LoadBalancerListener");
+ try {
+ // neutron caches
+ this.clusterContainerService.createCache("neutronLoadBalancerListeners",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheConfigException cce) {
+ logger.error("Cache couldn't be created for Neutron LoadBalancerListener - check cache mode");
+ } catch (CacheExistException cce) {
+ logger.error("Cache for Neutron LoadBalancerListener already exists, destroy and recreate");
+ }
+ logger.debug("Cache successfully created for Neutron LoadBalancerListener");
+ }
+
+ @SuppressWarnings ({"unchecked"})
+ private void retrieveCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
+ return;
+ }
+
+ logger.debug("Retrieving cache for Neutron LoadBalancerListener");
+ loadBalancerListenerDB = (ConcurrentMap<String, NeutronLoadBalancerListener>) clusterContainerService
+ .getCache("neutronLoadBalancerListeners");
+ if (loadBalancerListenerDB == null) {
+ logger.error("Cache couldn't be retrieved for Neutron LoadBalancerListener");
+ }
+ logger.debug("Cache was successfully retrieved for Neutron LoadBalancerListener");
+ }
+
+ private void destroyCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterMger, can't destroy cache");
+ return;
+ }
+ logger.debug("Destroying Cache for LoadBalancerListener");
+ clusterContainerService.destroyCache("neutronLoadBalancerListeners");
+ }
+
+ private void startUp() {
+ allocateCache();
+ retrieveCache();
+ loadConfiguration();
+ }
+
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ */
+ void init(Component c) {
+ Dictionary<?, ?> props = c.getServiceProperties();
+ if (props != null) {
+ this.containerName = (String) props.get("containerName");
+ logger.debug("Running containerName: {}", this.containerName);
+ } else {
+ // In the Global instance case the containerName is empty
+ this.containerName = "";
+ }
+ startUp();
+ }
+
+ /**
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ */
+ void destroy() {
+ destroyCache();
+ }
+
+ /**
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ */
+ void start() {
+ }
+
+ /**
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ */
+ void stop() {
+ }
+
+ // this method uses reflection to update an object from it's delta.
+
+ private boolean overwrite(Object target, Object delta) {
+ Method[] methods = target.getClass().getMethods();
+
+ for (Method toMethod : methods) {
+ if (toMethod.getDeclaringClass().equals(target.getClass())
+ && toMethod.getName().startsWith("set")) {
+
+ String toName = toMethod.getName();
+ String fromName = toName.replace("set", "get");
+
+ try {
+ Method fromMethod = delta.getClass().getMethod(fromName);
+ Object value = fromMethod.invoke(delta, (Object[]) null);
+ if (value != null) {
+ toMethod.invoke(target, value);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean neutronLoadBalancerListenerExists(String uuid) {
+ return loadBalancerListenerDB.containsKey(uuid);
+ }
+
+ @Override
+ public NeutronLoadBalancerListener getNeutronLoadBalancerListener(String uuid) {
+ if (!neutronLoadBalancerListenerExists(uuid)) {
+ logger.debug("No LoadBalancerListener Have Been Defined");
+ return null;
+ }
+ return loadBalancerListenerDB.get(uuid);
+ }
+
+ @Override
+ public List<NeutronLoadBalancerListener> getAllNeutronLoadBalancerListeners() {
+ Set<NeutronLoadBalancerListener> allLoadBalancerListeners = new HashSet<NeutronLoadBalancerListener>();
+ for (Entry<String, NeutronLoadBalancerListener> entry : loadBalancerListenerDB.entrySet()) {
+ NeutronLoadBalancerListener loadBalancerListener = entry.getValue();
+ allLoadBalancerListeners.add(loadBalancerListener);
+ }
+ logger.debug("Exiting getLoadBalancerListeners, Found {} OpenStackLoadBalancerListener", allLoadBalancerListeners.size());
+ List<NeutronLoadBalancerListener> ans = new ArrayList<NeutronLoadBalancerListener>();
+ ans.addAll(allLoadBalancerListeners);
+ return ans;
+ }
+
+ @Override
+ public boolean addNeutronLoadBalancerListener(NeutronLoadBalancerListener input) {
+ if (neutronLoadBalancerListenerExists(input.getLoadBalancerListenerID())) {
+ return false;
+ }
+ loadBalancerListenerDB.putIfAbsent(input.getLoadBalancerListenerID(), input);
+ //TODO: add code to find INeutronLoadBalancerListenerAware services and call newtorkCreated on them
+ return true;
+ }
+
+ @Override
+ public boolean removeNeutronLoadBalancerListener(String uuid) {
+ if (!neutronLoadBalancerListenerExists(uuid)) {
+ return false;
+ }
+ loadBalancerListenerDB.remove(uuid);
+ //TODO: add code to find INeutronLoadBalancerListenerAware services and call newtorkDeleted on them
+ return true;
+ }
+
+ @Override
+ public boolean updateNeutronLoadBalancerListener(String uuid, NeutronLoadBalancerListener delta) {
+ if (!neutronLoadBalancerListenerExists(uuid)) {
+ return false;
+ }
+ NeutronLoadBalancerListener target = loadBalancerListenerDB.get(uuid);
+ return overwrite(target, delta);
+ }
+
+ @Override
+ public boolean neutronLoadBalancerListenerInUse(String loadBalancerListenerUUID) {
+ return !neutronLoadBalancerListenerExists(loadBalancerListenerUUID);
+ }
+
+ private void loadConfiguration() {
+ for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+ NeutronLoadBalancerListener nn = (NeutronLoadBalancerListener) conf;
+ loadBalancerListenerDB.put(nn.getLoadBalancerListenerID(), nn);
+ }
+ }
+
+ @Override
+ public Status saveConfiguration() {
+ return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerListenerDB.values()),
+ FILE_NAME);
+ }
+
+ @Override
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+ return ois.readObject();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerPoolInterface implements INeutronLoadBalancerPoolCRUD, IConfigurationContainerAware,
+ IObjectReader {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerPoolInterface.class);
+ private static final String FILE_NAME = "neutron.loadBalancerPool.conf";
+ private String containerName = null;
+
+ private IClusterContainerServices clusterContainerService = null;
+ private IConfigurationContainerService configurationService;
+ private ConcurrentMap<String, NeutronLoadBalancerPool> loadBalancerPoolDB;
+
+ // methods needed for creating caches
+ void setClusterContainerService(IClusterContainerServices s) {
+ logger.debug("Cluster Service set");
+ clusterContainerService = s;
+ }
+
+ void unsetClusterContainerService(IClusterContainerServices s) {
+ if (clusterContainerService == s) {
+ logger.debug("Cluster Service removed!");
+ clusterContainerService = null;
+ }
+ }
+
+ public void setConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service set: {}", service);
+ configurationService = service;
+ }
+
+ public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service removed: {}", service);
+ configurationService = null;
+ }
+
+ private void allocateCache() {
+ if (this.clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+ logger.debug("Creating Cache for Neutron LoadBalancerPool");
+ try {
+ // neutron caches
+ this.clusterContainerService.createCache("neutronLoadBalancerPools",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch (CacheConfigException cce) {
+ logger.error("Cache couldn't be created for Neutron LoadBalancerPool - check cache mode");
+ } catch (CacheExistException cce) {
+ logger.error("Cache for Neutron LoadBalancerPool already exists, destroy and recreate");
+ }
+ logger.debug("Cache successfully created for Neutron LoadBalancerPool");
+ }
+
+ @SuppressWarnings ({"unchecked"})
+ private void retrieveCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
+ return;
+ }
+
+ logger.debug("Retrieving cache for Neutron LoadBalancerPool");
+ loadBalancerPoolDB = (ConcurrentMap<String, NeutronLoadBalancerPool>) clusterContainerService
+ .getCache("neutronLoadBalancerPools");
+ if (loadBalancerPoolDB == null) {
+ logger.error("Cache couldn't be retrieved for Neutron LoadBalancerPool");
+ }
+ logger.debug("Cache was successfully retrieved for Neutron LoadBalancerPool");
+ }
+
+ private void destroyCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterMger, can't destroy cache");
+ return;
+ }
+ logger.debug("Destroying Cache for LoadBalancerPool");
+ clusterContainerService.destroyCache("neutronLoadBalancerPools");
+ }
+
+ private void startUp() {
+ allocateCache();
+ retrieveCache();
+ loadConfiguration();
+ }
+
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ */
+ void init(Component c) {
+ Dictionary<?, ?> props = c.getServiceProperties();
+ if (props != null) {
+ this.containerName = (String) props.get("containerName");
+ logger.debug("Running containerName: {}", this.containerName);
+ } else {
+ // In the Global instance case the containerName is empty
+ this.containerName = "";
+ }
+ startUp();
+ }
+
+ /**
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ */
+ void destroy() {
+ destroyCache();
+ }
+
+ /**
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ */
+ void start() {
+ }
+
+ /**
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ */
+ void stop() {
+ }
+
+ // this method uses reflection to update an object from it's delta.
+
+ private boolean overwrite(Object target, Object delta) {
+ Method[] methods = target.getClass().getMethods();
+
+ for (Method toMethod : methods) {
+ if (toMethod.getDeclaringClass().equals(target.getClass())
+ && toMethod.getName().startsWith("set")) {
+
+ String toName = toMethod.getName();
+ String fromName = toName.replace("set", "get");
+
+ try {
+ Method fromMethod = delta.getClass().getMethod(fromName);
+ Object value = fromMethod.invoke(delta, (Object[]) null);
+ if (value != null) {
+ toMethod.invoke(target, value);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean neutronLoadBalancerPoolExists(String uuid) {
+ return loadBalancerPoolDB.containsKey(uuid);
+ }
+
+ @Override
+ public NeutronLoadBalancerPool getNeutronLoadBalancerPool(String uuid) {
+ if (!neutronLoadBalancerPoolExists(uuid)) {
+ logger.debug("No LoadBalancerPool has Been Defined");
+ return null;
+ }
+ return loadBalancerPoolDB.get(uuid);
+ }
+
+ @Override
+ public List<NeutronLoadBalancerPool> getAllNeutronLoadBalancerPools() {
+ Set<NeutronLoadBalancerPool> allLoadBalancerPools = new HashSet<NeutronLoadBalancerPool>();
+ for (Entry<String, NeutronLoadBalancerPool> entry : loadBalancerPoolDB.entrySet()) {
+ NeutronLoadBalancerPool loadBalancerPool = entry.getValue();
+ allLoadBalancerPools.add(loadBalancerPool);
+ }
+ logger.debug("Exiting getLoadBalancerPools, Found {} OpenStackLoadBalancerPool", allLoadBalancerPools.size());
+ List<NeutronLoadBalancerPool> ans = new ArrayList<NeutronLoadBalancerPool>();
+ ans.addAll(allLoadBalancerPools);
+ return ans;
+ }
+
+ @Override
+ public boolean addNeutronLoadBalancerPool(NeutronLoadBalancerPool input) {
+ if (neutronLoadBalancerPoolExists(input.getLoadBalancerPoolID())) {
+ return false;
+ }
+ loadBalancerPoolDB.putIfAbsent(input.getLoadBalancerPoolID(), input);
+ //TODO: add code to find INeutronLoadBalancerPoolAware services and call newtorkCreated on them
+ return true;
+ }
+
+ @Override
+ public boolean removeNeutronLoadBalancerPool(String uuid) {
+ if (!neutronLoadBalancerPoolExists(uuid)) {
+ return false;
+ }
+ loadBalancerPoolDB.remove(uuid);
+ //TODO: add code to find INeutronLoadBalancerPoolAware services and call newtorkDeleted on them
+ return true;
+ }
+
+ @Override
+ public boolean updateNeutronLoadBalancerPool(String uuid, NeutronLoadBalancerPool delta) {
+ if (!neutronLoadBalancerPoolExists(uuid)) {
+ return false;
+ }
+ NeutronLoadBalancerPool target = loadBalancerPoolDB.get(uuid);
+ return overwrite(target, delta);
+ }
+
+ @Override
+ public boolean neutronLoadBalancerPoolInUse(String loadBalancerPoolUUID) {
+ return !neutronLoadBalancerPoolExists(loadBalancerPoolUUID);
+ }
+
+ private void loadConfiguration() {
+ for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+ NeutronLoadBalancerPool nn = (NeutronLoadBalancerPool) conf;
+ loadBalancerPoolDB.put(nn.getLoadBalancerPoolID(), nn);
+ }
+ }
+
+ @Override
+ public Status saveConfiguration() {
+ return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerPoolDB.values()),
+ FILE_NAME);
+ }
+
+ @Override
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+ return ois.readObject();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerPoolMemberInterface
+ implements INeutronLoadBalancerPoolMemberCRUD, IConfigurationContainerAware,
+ IObjectReader {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerPoolMemberInterface.class);
+ private static final String FILE_NAME = "neutron.loadBalancerPoolMember.conf";
+ private String containerName = null;
+
+ private IClusterContainerServices clusterContainerService = null;
+ private IConfigurationContainerService configurationService;
+ private ConcurrentMap<String, NeutronLoadBalancerPoolMember> loadBalancerPoolMemberDB;
+
+ // methods needed for creating caches
+ void setClusterContainerService(IClusterContainerServices s) {
+ logger.debug("Cluster Service set");
+ clusterContainerService = s;
+ }
+
+ void unsetClusterContainerService(IClusterContainerServices s) {
+ if (clusterContainerService == s) {
+ logger.debug("Cluster Service removed!");
+ clusterContainerService = null;
+ }
+ }
+
+ public void setConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service set: {}", service);
+ configurationService = service;
+ }
+
+ public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+ logger.trace("Configuration service removed: {}", service);
+ configurationService = null;
+ }
+
+ private void allocateCache() {
+ if (this.clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't create cache");
+ return;
+ }
+ logger.debug("Creating Cache for Neutron LoadBalancerPoolMember");
+ try {
+ // neutron caches
+ this.clusterContainerService.createCache("neutronLoadBalancerPoolMembers",
+ EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+ } catch(CacheConfigException cce) {
+ logger.error("Cache couldn't be created for Neutron LoadBalancerPoolMember - check cache mode");
+ } catch(CacheExistException cce) {
+ logger.error("Cache for Neutron LoadBalancerPoolMember already exists, destroy and recreate");
+ }
+ logger.debug("Cache successfully created for Neutron LoadBalancerPoolMember");
+ }
+
+ @SuppressWarnings({"unchecked"})
+ private void retrieveCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterContainerService, can't retrieve cache");
+ return;
+ }
+
+ logger.debug("Retrieving cache for Neutron LoadBalancerPoolMember");
+ loadBalancerPoolMemberDB = (ConcurrentMap<String, NeutronLoadBalancerPoolMember>) clusterContainerService
+ .getCache("neutronLoadBalancerPoolMembers");
+ if (loadBalancerPoolMemberDB == null) {
+ logger.error("Cache couldn't be retrieved for Neutron LoadBalancerPoolMember");
+ }
+ logger.debug("Cache was successfully retrieved for Neutron LoadBalancerPoolMember");
+ }
+
+ private void destroyCache() {
+ if (clusterContainerService == null) {
+ logger.error("un-initialized clusterMger, can't destroy cache");
+ return;
+ }
+ logger.debug("Destroying Cache for HostTracker");
+ clusterContainerService.destroyCache("neutronLoadBalancerPoolMembers");
+ }
+
+ private void startUp() {
+ allocateCache();
+ retrieveCache();
+ loadConfiguration();
+ }
+
+ /**
+ * Function called by the dependency manager when all the required
+ * dependencies are satisfied
+ */
+ void init(Component c) {
+ Dictionary<?, ?> props = c.getServiceProperties();
+ if (props != null) {
+ this.containerName = (String) props.get("containerName");
+ logger.debug("Running containerName: {}", this.containerName);
+ } else {
+ // In the Global instance case the containerName is empty
+ this.containerName = "";
+ }
+ startUp();
+ }
+
+ /**
+ * Function called by the dependency manager when at least one dependency
+ * become unsatisfied or when the component is shutting down because for
+ * example bundle is being stopped.
+ */
+ void destroy() {
+ destroyCache();
+ }
+
+ /**
+ * Function called by dependency manager after "init ()" is called and after
+ * the services provided by the class are registered in the service registry
+ */
+ void start() {
+ }
+
+ /**
+ * Function called by the dependency manager before the services exported by
+ * the component are unregistered, this will be followed by a "destroy ()"
+ * calls
+ */
+ void stop() {
+ }
+
+ // this method uses reflection to update an object from it's delta.
+
+ private boolean overwrite(Object target, Object delta) {
+ Method[] methods = target.getClass().getMethods();
+
+ for (Method toMethod : methods) {
+ if (toMethod.getDeclaringClass().equals(target.getClass())
+ && toMethod.getName().startsWith("set")) {
+
+ String toName = toMethod.getName();
+ String fromName = toName.replace("set", "get");
+
+ try {
+ Method fromMethod = delta.getClass().getMethod(fromName);
+ Object value = fromMethod.invoke(delta, (Object[]) null);
+ if (value != null) {
+ toMethod.invoke(target, value);
+ }
+ } catch(Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean neutronLoadBalancerPoolMemberExists(String uuid) {
+ return loadBalancerPoolMemberDB.containsKey(uuid);
+ }
+
+ @Override
+ public NeutronLoadBalancerPoolMember getNeutronLoadBalancerPoolMember(String uuid) {
+ if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+ logger.debug("No LoadBalancerPoolMember Have Been Defined");
+ return null;
+ }
+ return loadBalancerPoolMemberDB.get(uuid);
+ }
+
+ @Override
+ public List<NeutronLoadBalancerPoolMember> getAllNeutronLoadBalancerPoolMembers() {
+ Set<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new HashSet<NeutronLoadBalancerPoolMember>();
+ for (Map.Entry<String, NeutronLoadBalancerPoolMember> entry : loadBalancerPoolMemberDB.entrySet()) {
+ NeutronLoadBalancerPoolMember loadBalancerPoolMember = entry.getValue();
+ allLoadBalancerPoolMembers.add(loadBalancerPoolMember);
+ }
+ logger.debug("Exiting getLoadBalancerPoolMembers, Found {} OpenStackLoadBalancerPoolMember",
+ allLoadBalancerPoolMembers.size());
+ List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
+ ans.addAll(allLoadBalancerPoolMembers);
+ return ans;
+ }
+
+ @Override
+ public boolean addNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember input) {
+ if (neutronLoadBalancerPoolMemberExists(input.getPoolMemberID())) {
+ return false;
+ }
+ loadBalancerPoolMemberDB.putIfAbsent(input.getPoolMemberID(), input);
+ return true;
+ }
+
+ @Override
+ public boolean removeNeutronLoadBalancerPoolMember(String uuid) {
+ if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+ return false;
+ }
+ loadBalancerPoolMemberDB.remove(uuid);
+ return true;
+ }
+
+ @Override
+ public boolean updateNeutronLoadBalancerPoolMember(String uuid, NeutronLoadBalancerPoolMember delta) {
+ if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+ return false;
+ }
+ NeutronLoadBalancerPoolMember target = loadBalancerPoolMemberDB.get(uuid);
+ return overwrite(target, delta);
+ }
+
+ @Override
+ public boolean neutronLoadBalancerPoolMemberInUse(String loadBalancerPoolMemberID) {
+ return !neutronLoadBalancerPoolMemberExists(loadBalancerPoolMemberID);
+ }
+
+ private void loadConfiguration() {
+ for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+ NeutronLoadBalancerPoolMember nn = (NeutronLoadBalancerPoolMember) conf;
+ loadBalancerPoolMemberDB.put(nn.getPoolMemberID(), nn);
+ }
+ }
+
+ @Override
+ public Status saveConfiguration() {
+ return configurationService.persistConfiguration(
+ new ArrayList<ConfigurationObject>(loadBalancerPoolMemberDB.values()),
+ FILE_NAME);
+ }
+
+ @Override
+ public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+ return ois.readObject();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancer Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerAware {
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancer can be created
+ *
+ * @param loadBalancer
+ * instance of proposed new LoadBalancer object
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the create operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canCreateNeutronLoadBalancer(NeutronLoadBalancer loadBalancer);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancer has been created
+ *
+ * @param loadBalancer
+ * instance of new LoadBalancer object
+ * @return void
+ */
+ public void neutronLoadBalancerCreated(NeutronLoadBalancer loadBalancer);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancer can be changed using the specified
+ * delta
+ *
+ * @param delta
+ * updates to the loadBalancer object using patch semantics
+ * @param original
+ * instance of the LoadBalancer object to be updated
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the update operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canUpdateNeutronLoadBalancer(NeutronLoadBalancer delta, NeutronLoadBalancer original);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancer has been updated
+ *
+ * @param loadBalancer
+ * instance of modified LoadBalancer object
+ * @return void
+ */
+ public void neutronLoadBalancerUpdated(NeutronLoadBalancer loadBalancer);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancer can be deleted
+ *
+ * @param loadBalancer
+ * instance of the LoadBalancer object to be deleted
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the delete operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canDeleteNeutronLoadBalancer(NeutronLoadBalancer loadBalancer);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancer has been deleted
+ *
+ * @param loadBalancer
+ * instance of deleted LoadBalancer object
+ * @return void
+ */
+ public void neutronLoadBalancerDeleted(NeutronLoadBalancer loadBalancer);
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancer objects
+ *
+ */
+
+public interface INeutronLoadBalancerCRUD {
+ /**
+ * Applications call this interface method to determine if a particular
+ *LoadBalancer object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancer object
+ * @return boolean
+ */
+
+ public boolean neutronLoadBalancerExists(String uuid);
+
+ /**
+ * Applications call this interface method to return if a particular
+ * LoadBalancer object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancer object
+ * @return {@link NeutronLoadBalancer}
+ * OpenStackLoadBalancer class
+ */
+
+ public NeutronLoadBalancer getNeutronLoadBalancer(String uuid);
+
+ /**
+ * Applications call this interface method to return all LoadBalancer objects
+ *
+ * @return List of OpenStackNetworks objects
+ */
+
+ public List<NeutronLoadBalancer> getAllNeutronLoadBalancers();
+
+ /**
+ * Applications call this interface method to add a LoadBalancer object to the
+ * concurrent map
+ *
+ * @param input
+ * OpenStackNetwork object
+ * @return boolean on whether the object was added or not
+ */
+
+ public boolean addNeutronLoadBalancer(NeutronLoadBalancer input);
+
+ /**
+ * Applications call this interface method to remove a Neutron LoadBalancer object to the
+ * concurrent map
+ *
+ * @param uuid
+ * identifier for the LoadBalancer object
+ * @return boolean on whether the object was removed or not
+ */
+
+ public boolean removeNeutronLoadBalancer(String uuid);
+
+ /**
+ * Applications call this interface method to edit a LoadBalancer object
+ *
+ * @param uuid
+ * identifier of the LoadBalancer object
+ * @param delta
+ * OpenStackLoadBalancer object containing changes to apply
+ * @return boolean on whether the object was updated or not
+ */
+
+ public boolean updateNeutronLoadBalancer(String uuid, NeutronLoadBalancer delta);
+
+ /**
+ * Applications call this interface method to see if a MAC address is in use
+ *
+ * @param uuid
+ * identifier of the LoadBalancer object
+ * @return boolean on whether the macAddress is already associated with a
+ * port or not
+ */
+
+ public boolean neutronLoadBalancerInUse(String uuid);
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerHealthMonitor Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerHealthMonitorAware {
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be created
+ *
+ * @param loadBalancerHealthMonitor
+ * instance of proposed new LoadBalancerHealthMonitor object
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the create operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canCreateNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been created
+ *
+ * @param loadBalancerHealthMonitor
+ * instance of new LoadBalancerHealthMonitor object
+ * @return void
+ */
+ public void neutronLoadBalancerHealthMonitorCreated(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be changed using the specified
+ * delta
+ *
+ * @param delta
+ * updates to the loadBalancerHealthMonitor object using patch semantics
+ * @param original
+ * instance of the LoadBalancerHealthMonitor object to be updated
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the update operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canUpdateNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor delta,
+ NeutronLoadBalancerHealthMonitor original);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been updated
+ *
+ * @param loadBalancerHealthMonitor
+ * instance of modified LoadBalancerHealthMonitor object
+ * @return void
+ */
+ public void neutronLoadBalancerHealthMonitorUpdated(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be deleted
+ *
+ * @param loadBalancerHealthMonitor
+ * instance of the LoadBalancerHealthMonitor object to be deleted
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the delete operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canDeleteNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been deleted
+ *
+ * @param loadBalancerHealthMonitor
+ * instance of deleted LoadBalancerHealthMonitor object
+ * @return void
+ */
+ public void neutronLoadBalancerHealthMonitorDeleted(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerHealthMonitor objects
+ *
+ */
+
+public interface INeutronLoadBalancerHealthMonitorCRUD {
+ /**
+ * Applications call this interface method to determine if a particular
+ *LoadBalancerHealthMonitor object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerHealthMonitor object
+ * @return boolean
+ */
+
+ public boolean neutronLoadBalancerHealthMonitorExists(String uuid);
+
+ /**
+ * Applications call this interface method to return if a particular
+ * LoadBalancerHealthMonitor object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerHealthMonitor object
+ * @return {@link NeutronLoadBalancerHealthMonitor}
+ * OpenStackLoadBalancerHealthMonitor class
+ */
+
+ public NeutronLoadBalancerHealthMonitor getNeutronLoadBalancerHealthMonitor(String uuid);
+
+ /**
+ * Applications call this interface method to return all LoadBalancerHealthMonitor objects
+ *
+ * @return List of OpenStackNetworks objects
+ */
+
+ public List<NeutronLoadBalancerHealthMonitor> getAllNeutronLoadBalancerHealthMonitors();
+
+ /**
+ * Applications call this interface method to add a LoadBalancerHealthMonitor object to the
+ * concurrent map
+ *
+ * @param input
+ * OpenStackNetwork object
+ * @return boolean on whether the object was added or not
+ */
+
+ public boolean addNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor input);
+
+ /**
+ * Applications call this interface method to remove a Neutron LoadBalancerHealthMonitor object to the
+ * concurrent map
+ *
+ * @param uuid
+ * identifier for the LoadBalancerHealthMonitor object
+ * @return boolean on whether the object was removed or not
+ */
+
+ public boolean removeNeutronLoadBalancerHealthMonitor(String uuid);
+
+ /**
+ * Applications call this interface method to edit a LoadBalancerHealthMonitor object
+ *
+ * @param uuid
+ * identifier of the LoadBalancerHealthMonitor object
+ * @param delta
+ * OpenStackLoadBalancerHealthMonitor object containing changes to apply
+ * @return boolean on whether the object was updated or not
+ */
+
+ public boolean updateNeutronLoadBalancerHealthMonitor(String uuid, NeutronLoadBalancerHealthMonitor delta);
+
+ /**
+ * Applications call this interface method to see if a MAC address is in use
+ *
+ * @param uuid
+ * identifier of the LoadBalancerHealthMonitor object
+ * @return boolean on whether the macAddress is already associated with a
+ * port or not
+ */
+
+ public boolean neutronLoadBalancerHealthMonitorInUse(String uuid);
+
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerListener Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerListenerAware {
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerListener can be created
+ *
+ * @param loadBalancerListener
+ * instance of proposed new LoadBalancerListener object
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the create operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canCreateNeutronLoadBalancerListener(NeutronLoadBalancerListener loadBalancerListener);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerListener has been created
+ *
+ * @param loadBalancerListener
+ * instance of new LoadBalancerListener object
+ * @return void
+ */
+ public void neutronLoadBalancerListenerCreated(NeutronLoadBalancerListener loadBalancerListener);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerListener can be changed using the specified
+ * delta
+ *
+ * @param delta
+ * updates to the loadBalancerListener object using patch semantics
+ * @param original
+ * instance of the LoadBalancerListener object to be updated
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the update operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canUpdateNeutronLoadBalancerListener(NeutronLoadBalancerListener delta,
+ NeutronLoadBalancerListener original);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerListener has been updated
+ *
+ * @param loadBalancerListener
+ * instance of modified LoadBalancerListener object
+ * @return void
+ */
+ public void neutronLoadBalancerListenerUpdated(NeutronLoadBalancerListener loadBalancerListener);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerListener can be deleted
+ *
+ * @param loadBalancerListener
+ * instance of the LoadBalancerListener object to be deleted
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the delete operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canDeleteNeutronLoadBalancerListener(NeutronLoadBalancerListener loadBalancerListener);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerListener has been deleted
+ *
+ * @param loadBalancerListener
+ * instance of deleted LoadBalancerListener object
+ * @return void
+ */
+ public void neutronLoadBalancerListenerDeleted(NeutronLoadBalancerListener loadBalancerListener);
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerListener objects
+ *
+ */
+
+public interface INeutronLoadBalancerListenerCRUD {
+ /**
+ * Applications call this interface method to determine if a particular
+ *LoadBalancerListener object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerListener object
+ * @return boolean
+ */
+
+ public boolean neutronLoadBalancerListenerExists(String uuid);
+
+ /**
+ * Applications call this interface method to return if a particular
+ * LoadBalancerListener object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerListener object
+ * @return {@link NeutronLoadBalancerListener}
+ * OpenStackLoadBalancerListener class
+ */
+
+ public NeutronLoadBalancerListener getNeutronLoadBalancerListener(String uuid);
+
+ /**
+ * Applications call this interface method to return all LoadBalancerListener objects
+ *
+ * @return List of OpenStackNetworks objects
+ */
+
+ public List<NeutronLoadBalancerListener> getAllNeutronLoadBalancerListeners();
+
+ /**
+ * Applications call this interface method to add a LoadBalancerListener object to the
+ * concurrent map
+ *
+ * @param input
+ * OpenStackNetwork object
+ * @return boolean on whether the object was added or not
+ */
+
+ public boolean addNeutronLoadBalancerListener(NeutronLoadBalancerListener input);
+
+ /**
+ * Applications call this interface method to remove a Neutron LoadBalancerListener object to the
+ * concurrent map
+ *
+ * @param uuid
+ * identifier for the LoadBalancerListener object
+ * @return boolean on whether the object was removed or not
+ */
+
+ public boolean removeNeutronLoadBalancerListener(String uuid);
+
+ /**
+ * Applications call this interface method to edit a LoadBalancerListener object
+ *
+ * @param uuid
+ * identifier of the LoadBalancerListener object
+ * @param delta
+ * OpenStackLoadBalancerListener object containing changes to apply
+ * @return boolean on whether the object was updated or not
+ */
+
+ public boolean updateNeutronLoadBalancerListener(String uuid, NeutronLoadBalancerListener delta);
+
+ /**
+ * Applications call this interface method to see if a MAC address is in use
+ *
+ * @param uuid
+ * identifier of the LoadBalancerListener object
+ * @return boolean on whether the macAddress is already associated with a
+ * port or not
+ */
+
+ public boolean neutronLoadBalancerListenerInUse(String uuid);
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerPool Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerPoolAware {
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPool can be created
+ *
+ * @param loadBalancerPool
+ * instance of proposed new LoadBalancerPool object
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the create operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canCreateNeutronLoadBalancerPool(NeutronLoadBalancerPool loadBalancerPool);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPool has been created
+ *
+ * @param loadBalancerPool
+ * instance of new LoadBalancerPool object
+ * @return void
+ */
+ public void neutronLoadBalancerPoolCreated(NeutronLoadBalancerPool loadBalancerPool);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPool can be changed using the specified
+ * delta
+ *
+ * @param delta
+ * updates to the loadBalancerPool object using patch semantics
+ * @param original
+ * instance of the LoadBalancerPool object to be updated
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the update operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canUpdateNeutronLoadBalancerPool(NeutronLoadBalancerPool delta, NeutronLoadBalancerPool original);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPool has been updated
+ *
+ * @param loadBalancerPool
+ * instance of modified LoadBalancerPool object
+ * @return void
+ */
+ public void neutronLoadBalancerPoolUpdated(NeutronLoadBalancerPool loadBalancerPool);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPool can be deleted
+ *
+ * @param loadBalancerPool
+ * instance of the LoadBalancerPool object to be deleted
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the delete operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canDeleteNeutronLoadBalancerPool(NeutronLoadBalancerPool loadBalancerPool);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPool has been deleted
+ *
+ * @param loadBalancerPool
+ * instance of deleted LoadBalancerPool object
+ * @return void
+ */
+ public void neutronLoadBalancerPoolDeleted(NeutronLoadBalancerPool loadBalancerPool);
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerPool objects
+ *
+ */
+
+public interface INeutronLoadBalancerPoolCRUD {
+ /**
+ * Applications call this interface method to determine if a particular
+ *LoadBalancerPool object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerPool object
+ * @return boolean
+ */
+
+ public boolean neutronLoadBalancerPoolExists(String uuid);
+
+ /**
+ * Applications call this interface method to return if a particular
+ * LoadBalancerPool object exists
+ *
+ * @param uuid
+ * UUID of the LoadBalancerPool object
+ * @return {@link NeutronLoadBalancerPool}
+ * OpenStackLoadBalancerPool class
+ */
+
+ public NeutronLoadBalancerPool getNeutronLoadBalancerPool(String uuid);
+
+ /**
+ * Applications call this interface method to return all LoadBalancerPool objects
+ *
+ * @return List of OpenStackNetworks objects
+ */
+
+ public List<NeutronLoadBalancerPool> getAllNeutronLoadBalancerPools();
+
+ /**
+ * Applications call this interface method to add a LoadBalancerPool object to the
+ * concurrent map
+ *
+ * @param input
+ * OpenStackNetwork object
+ * @return boolean on whether the object was added or not
+ */
+
+ public boolean addNeutronLoadBalancerPool(NeutronLoadBalancerPool input);
+
+ /**
+ * Applications call this interface method to remove a Neutron LoadBalancerPool object to the
+ * concurrent map
+ *
+ * @param uuid
+ * identifier for the LoadBalancerPool object
+ * @return boolean on whether the object was removed or not
+ */
+
+ public boolean removeNeutronLoadBalancerPool(String uuid);
+
+ /**
+ * Applications call this interface method to edit a LoadBalancerPool object
+ *
+ * @param uuid
+ * identifier of the LoadBalancerPool object
+ * @param delta
+ * OpenStackLoadBalancerPool object containing changes to apply
+ * @return boolean on whether the object was updated or not
+ */
+
+ public boolean updateNeutronLoadBalancerPool(String uuid, NeutronLoadBalancerPool delta);
+
+ /**
+ * Applications call this interface method to see if a MAC address is in use
+ *
+ * @param uuid
+ * identifier of the LoadBalancerPool object
+ * @return boolean on whether the macAddress is already associated with a
+ * port or not
+ */
+
+ public boolean neutronLoadBalancerPoolInUse(String uuid);
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.networkconfig.neutron;
+
+public interface INeutronLoadBalancerPoolMemberAware {
+
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be created
+ *
+ * @param loadBalancerPoolMember
+ * instance of proposed new LoadBalancerPool object
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the create operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canCreateNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPoolMember has been created
+ *
+ * @param loadBalancerPoolMember
+ * instance of new LoadBalancerPool object
+ * @return void
+ */
+ public void neutronLoadBalancerPoolMemberCreated(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be changed using the specified
+ * delta
+ *
+ * @param delta
+ * updates to the loadBalancerPoolMember object using patch semantics
+ * @param original
+ * instance of the LoadBalancerPool object to be updated
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the update operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canUpdateNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember delta,
+ NeutronLoadBalancerPoolMember original);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPoolMember has been updated
+ *
+ * @param loadBalancerPoolMember
+ * instance of modified LoadBalancerPool object
+ * @return void
+ */
+ public void neutronLoadBalancerPoolMemberUpdated(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+ /**
+ * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be deleted
+ *
+ * @param loadBalancerPoolMember
+ * instance of the LoadBalancerPool object to be deleted
+ * @return integer
+ * the return value is understood to be a HTTP status code. A return value outside of 200 through 299
+ * results in the delete operation being interrupted and the returned status value reflected in the
+ * HTTP response.
+ */
+ public int canDeleteNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+ /**
+ * Services provide this interface method for taking action after a loadBalancerPoolMember has been deleted
+ *
+ * @param loadBalancerPoolMember
+ * instance of deleted LoadBalancerPool object
+ * @return void
+ */
+ public void NeutronLoadBalancerPoolMemberDeleted(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+public interface INeutronLoadBalancerPoolMemberCRUD {
+
+ /**
+ * Applications call this interface method to determine if a particular
+ *NeutronLoadBalancerPoolMember object exists
+ *
+ * @param uuid
+ * UUID of the NeutronLoadBalancerPoolMember object
+ * @return boolean
+ */
+
+ public boolean neutronLoadBalancerPoolMemberExists(String uuid);
+
+ /**
+ * Applications call this interface method to return if a particular
+ * NeutronLoadBalancerPoolMember object exists
+ *
+ * @param uuid
+ * UUID of the NeutronLoadBalancerPoolMember object
+ * @return {@link org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember}
+ * OpenStackNeutronLoadBalancerPoolMember class
+ */
+
+ public NeutronLoadBalancerPoolMember getNeutronLoadBalancerPoolMember(String uuid);
+
+ /**
+ * Applications call this interface method to return all NeutronLoadBalancerPoolMember objects
+ *
+ * @return List of OpenStackNetworks objects
+ */
+
+ public List<NeutronLoadBalancerPoolMember> getAllNeutronLoadBalancerPoolMembers();
+
+ /**
+ * Applications call this interface method to add a NeutronLoadBalancerPoolMember object to the
+ * concurrent map
+ *
+ * @param input
+ * OpenStackNetwork object
+ * @return boolean on whether the object was added or not
+ */
+
+ public boolean addNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember input);
+
+ /**
+ * Applications call this interface method to remove a Neutron NeutronLoadBalancerPoolMember object to the
+ * concurrent map
+ *
+ * @param uuid
+ * identifier for the NeutronLoadBalancerPoolMember object
+ * @return boolean on whether the object was removed or not
+ */
+
+ public boolean removeNeutronLoadBalancerPoolMember(String uuid);
+
+ /**
+ * Applications call this interface method to edit a NeutronLoadBalancerPoolMember object
+ *
+ * @param uuid
+ * identifier of the NeutronLoadBalancerPoolMember object
+ * @param delta
+ * OpenStackNeutronLoadBalancerPoolMember object containing changes to apply
+ * @return boolean on whether the object was updated or not
+ */
+
+ public boolean updateNeutronLoadBalancerPoolMember(String uuid, NeutronLoadBalancerPoolMember delta);
+
+ /**
+ * Applications call this interface method to see if a MAC address is in use
+ *
+ * @param uuid
+ * identifier of the NeutronLoadBalancerPoolMember object
+ * @return boolean on whether the macAddress is already associated with a
+ * port or not
+ */
+
+ public boolean neutronLoadBalancerPoolMemberInUse(String uuid);
+
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.util.List;
+
+public class INeutronLoadBalancerPoolMemberRequest {
+
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="member")
+ NeutronLoadBalancerPoolMember singletonLoadBalancerPoolMember;
+
+ @XmlElement(name="members")
+ List<NeutronLoadBalancerPoolMember> bulkRequest;
+
+ INeutronLoadBalancerPoolMemberRequest() {
+ }
+
+ public INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancerPoolMember = null;
+ }
+
+ INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+ singletonLoadBalancerPoolMember = group;
+ }
+
+ public List<NeutronLoadBalancerPoolMember> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancerPoolMember getSingleton() {
+ return singletonLoadBalancerPoolMember;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancerPoolMember != null);
+ }
+}
\ No newline at end of file
INeutronFirewallRuleCRUD answer = (INeutronFirewallRuleCRUD) ServiceHelper.getGlobalInstance(INeutronFirewallRuleCRUD.class, o);
return answer;
}
+
+ public static INeutronLoadBalancerCRUD getINeutronLoadBalancerCRUD(Object o) {
+ INeutronLoadBalancerCRUD answer = (INeutronLoadBalancerCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerCRUD.class, o);
+ return answer;
+ }
+
+ public static INeutronLoadBalancerPoolCRUD getINeutronLoadBalancerPoolCRUD(Object o) {
+ INeutronLoadBalancerPoolCRUD answer = (INeutronLoadBalancerPoolCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerPoolCRUD.class, o);
+ return answer;
+ }
+
+ public static INeutronLoadBalancerListenerCRUD getINeutronLoadBalancerListenerCRUD(Object o) {
+ INeutronLoadBalancerListenerCRUD answer = (INeutronLoadBalancerListenerCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerListenerCRUD.class, o);
+ return answer;
+ }
+
+ public static INeutronLoadBalancerHealthMonitorCRUD getINeutronLoadBalancerHealthMonitorCRUD(Object o) {
+ INeutronLoadBalancerHealthMonitorCRUD answer = (INeutronLoadBalancerHealthMonitorCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerHealthMonitorCRUD.class, o);
+ return answer;
+ }
+
+ public static INeutronLoadBalancerPoolMemberCRUD getINeutronLoadBalancerPoolMemberCRUD(Object o) {
+ INeutronLoadBalancerPoolMemberCRUD answer = (INeutronLoadBalancerPoolMemberCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerPoolMemberCRUD.class, o);
+ return answer;
+ }
}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of the fields:
+ * Implemented fields are as follows:
+ *
+ * id uuid-str
+ * tenant_id uuid-str
+ * name String
+ * description String
+ * status String
+ * vip_address IP address
+ * vip_subnet uuid-str
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancer extends ConfigurationObject implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ @XmlElement(name="id")
+ String loadBalancerID;
+
+ @XmlElement (name="tenant_id")
+ String loadBalancerTenantID;
+
+ @XmlElement (name="name")
+ String loadBalancerName;
+
+ @XmlElement (name="description")
+ String loadBalancerDescription;
+
+ @XmlElement (name="status")
+ String loadBalancerStatus;
+
+ @XmlElement (name="vip_address")
+ String loadBalancerVipAddress;
+
+ @XmlElement (name="vip_subnet_id")
+ String loadBalancerVipSubnetID;
+
+ public String getLoadBalancerID() {
+ return loadBalancerID;
+ }
+
+ public void setLoadBalancerID(String loadBalancerID) {
+ this.loadBalancerID = loadBalancerID;
+ }
+
+ public String getLoadBalancerTenantID() {
+ return loadBalancerTenantID;
+ }
+
+ public void setLoadBalancerTenantID(String loadBalancerTenantID) {
+ this.loadBalancerTenantID = loadBalancerTenantID;
+ }
+
+ public String getLoadBalancerName() {
+ return loadBalancerName;
+ }
+
+ public void setLoadBalancerName(String loadBalancerName) {
+ this.loadBalancerName = loadBalancerName;
+ }
+
+ public String getLoadBalancerDescription() {
+ return loadBalancerDescription;
+ }
+
+ public void setLoadBalancerDescription(String loadBalancerDescription) {
+ this.loadBalancerDescription = loadBalancerDescription;
+ }
+
+ public String getLoadBalancerStatus() {
+ return loadBalancerStatus;
+ }
+
+ public void setLoadBalancerStatus(String loadBalancerStatus) {
+ this.loadBalancerStatus = loadBalancerStatus;
+ }
+
+ public String getLoadBalancerVipAddress() {
+ return loadBalancerVipAddress;
+ }
+
+ public void setLoadBalancerVipAddress(String loadBalancerVipAddress) {
+ this.loadBalancerVipAddress = loadBalancerVipAddress;
+ }
+
+ public String getLoadBalancerVipSubnetID() {
+ return loadBalancerVipSubnetID;
+ }
+
+ public void setLoadBalancerVipSubnetID(String loadBalancerVipSubnetID) {
+ this.loadBalancerVipSubnetID = loadBalancerVipSubnetID;
+ }
+
+ public NeutronLoadBalancer extractFields(List<String> fields) {
+ NeutronLoadBalancer ans = new NeutronLoadBalancer();
+ Iterator<String> i = fields.iterator();
+ while (i.hasNext()) {
+ String s = i.next();
+ if (s.equals("id")) {
+ ans.setLoadBalancerID(this.getLoadBalancerID());
+ }
+ if (s.equals("tenant_id")) {
+ ans.setLoadBalancerTenantID(this.getLoadBalancerTenantID());
+ }
+ if (s.equals("name")) {
+ ans.setLoadBalancerName(this.getLoadBalancerName());
+ }
+ if(s.equals("description")) {
+ ans.setLoadBalancerDescription(this.getLoadBalancerDescription());
+ }
+ if (s.equals("vip_address")) {
+ ans.setLoadBalancerVipAddress(this.getLoadBalancerVipAddress());
+ }
+ if (s.equals("vip_subnet_id")) {
+ ans.setLoadBalancerVipSubnetID(this.getLoadBalancerVipSubnetID());
+ }
+ if (s.equals("status")) {
+ ans.setLoadBalancerStatus(this.getLoadBalancerStatus());
+ }
+ }
+ return ans;
+ }
+
+ @Override public String toString() {
+ return "NeutronLoadBalancer{" +
+ "loadBalancerID='" + loadBalancerID + '\'' +
+ ", loadBalancerTenantID='" + loadBalancerTenantID + '\'' +
+ ", loadBalancerName='" + loadBalancerName + '\'' +
+ ", loadBalancerDescription='" + loadBalancerDescription + '\'' +
+ ", loadBalancerStatus='" + loadBalancerStatus + '\'' +
+ ", loadBalancerVipAddress='" + loadBalancerVipAddress + '\'' +
+ ", loadBalancerVipSubnetID='" + loadBalancerVipSubnetID + '\'' +
+ '}';
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of the fields:
+ * Implemented fields are as follows:
+ *
+ *
+ * id uuid-str
+ * tenant_id uuid-str
+ * type String
+ * delay Integer
+ * timeout Integer
+ * max_retries Integer
+ * http_method String
+ * url_path String
+ * expected_codes String
+ * admin_state_up Boolean
+ * status String
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerHealthMonitor extends ConfigurationObject implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancer.class);
+
+ @XmlElement(name="id")
+ String loadBalancerHealthMonitorID;
+
+ @XmlElement (name="tenant_id")
+ String loadBalancerHealthMonitorTenantID;
+
+ @XmlElement (name="type")
+ String loadBalancerHealthMonitorType;
+
+ @XmlElement (name="delay")
+ Integer loadBalancerHealthMonitorDelay;
+
+ @XmlElement (name="timeout")
+ Integer loadBalancerHealthMonitorTimeout;
+
+ @XmlElement (name="max_retries")
+ Integer loadBalancerHealthMonitorMaxRetries;
+
+ @XmlElement (name="http_method")
+ String loadBalancerHealthMonitorHttpMethod;
+
+ @XmlElement (name="url_path")
+ String loadBalancerHealthMonitorUrlPath;
+
+ @XmlElement (name="expected_codes")
+ String loadBalancerHealthMonitorExpectedCodes;
+
+ @XmlElement (defaultValue="true", name="admin_state_up")
+ Boolean loadBalancerHealthMonitorAdminStateIsUp;
+
+ @XmlElement (name="status")
+ String loadBalancerHealthMonitorStatus;
+
+ public String getLoadBalancerHealthMonitorID() {
+ return loadBalancerHealthMonitorID;
+ }
+
+ public void setLoadBalancerHealthMonitorID(String loadBalancerHealthMonitorID) {
+ this.loadBalancerHealthMonitorID = loadBalancerHealthMonitorID;
+ }
+
+ public String getLoadBalancerHealthMonitorTenantID() {
+ return loadBalancerHealthMonitorTenantID;
+ }
+
+ public void setLoadBalancerHealthMonitorTenantID(String loadBalancerHealthMonitorTenantID) {
+ this.loadBalancerHealthMonitorTenantID = loadBalancerHealthMonitorTenantID;
+ }
+
+ public String getLoadBalancerHealthMonitorType() {
+ return loadBalancerHealthMonitorType;
+ }
+
+ public void setLoadBalancerHealthMonitorType(String loadBalancerHealthMonitorType) {
+ this.loadBalancerHealthMonitorType = loadBalancerHealthMonitorType;
+ }
+
+ public Integer getLoadBalancerHealthMonitorDelay() {
+ return loadBalancerHealthMonitorDelay;
+ }
+
+ public void setLoadBalancerHealthMonitorDelay(Integer loadBalancerHealthMonitorDelay) {
+ this.loadBalancerHealthMonitorDelay = loadBalancerHealthMonitorDelay;
+ }
+
+ public Integer getLoadBalancerHealthMonitorTimeout() {
+ return loadBalancerHealthMonitorTimeout;
+ }
+
+ public void setLoadBalancerHealthMonitorTimeout(Integer loadBalancerHealthMonitorTimeout) {
+ this.loadBalancerHealthMonitorTimeout = loadBalancerHealthMonitorTimeout;
+ }
+
+ public Integer getLoadBalancerHealthMonitorMaxRetries() {
+ return loadBalancerHealthMonitorMaxRetries;
+ }
+
+ public void setLoadBalancerHealthMonitorMaxRetries(Integer loadBalancerHealthMonitorMaxRetries) {
+ this.loadBalancerHealthMonitorMaxRetries = loadBalancerHealthMonitorMaxRetries;
+ }
+
+ public String getLoadBalancerHealthMonitorHttpMethod() {
+ return loadBalancerHealthMonitorHttpMethod;
+ }
+
+ public void setLoadBalancerHealthMonitorHttpMethod(String loadBalancerHealthMonitorHttpMethod) {
+ this.loadBalancerHealthMonitorHttpMethod = loadBalancerHealthMonitorHttpMethod;
+ }
+
+ public String getLoadBalancerHealthMonitorUrlPath() {
+ return loadBalancerHealthMonitorUrlPath;
+ }
+
+ public void setLoadBalancerHealthMonitorUrlPath(String loadBalancerHealthMonitorUrlPath) {
+ this.loadBalancerHealthMonitorUrlPath = loadBalancerHealthMonitorUrlPath;
+ }
+
+ public String getLoadBalancerHealthMonitorExpectedCodes() {
+ return loadBalancerHealthMonitorExpectedCodes;
+ }
+
+ public void setLoadBalancerHealthMonitorExpectedCodes(String loadBalancerHealthMonitorExpectedCodes) {
+ this.loadBalancerHealthMonitorExpectedCodes = loadBalancerHealthMonitorExpectedCodes;
+ }
+
+ public Boolean getLoadBalancerHealthMonitorAdminStateIsUp() {
+ return loadBalancerHealthMonitorAdminStateIsUp;
+ }
+
+ public void setLoadBalancerHealthMonitorAdminStateIsUp(Boolean loadBalancerHealthMonitorAdminStateIsUp) {
+ this.loadBalancerHealthMonitorAdminStateIsUp = loadBalancerHealthMonitorAdminStateIsUp;
+ }
+
+ public String getLoadBalancerHealthMonitorStatus() {
+ return loadBalancerHealthMonitorStatus;
+ }
+
+ public void setLoadBalancerHealthMonitorStatus(String loadBalancerHealthMonitorStatus) {
+ this.loadBalancerHealthMonitorStatus = loadBalancerHealthMonitorStatus;
+ }
+
+ public NeutronLoadBalancerHealthMonitor extractFields(List<String> fields) {
+ NeutronLoadBalancerHealthMonitor ans = new NeutronLoadBalancerHealthMonitor();
+ Iterator<String> i = fields.iterator();
+ while (i.hasNext()) {
+ String s = i.next();
+ if (s.equals("id")) {
+ ans.setLoadBalancerHealthMonitorID(this.getLoadBalancerHealthMonitorID());
+ }
+ if (s.equals("tenant_id")) {
+ ans.setLoadBalancerHealthMonitorTenantID(this.getLoadBalancerHealthMonitorTenantID());
+ }
+ if (s.equals("type")) {
+ ans.setLoadBalancerHealthMonitorType(this.getLoadBalancerHealthMonitorType());
+ }
+ if (s.equals("delay")) {
+ ans.setLoadBalancerHealthMonitorDelay(this.getLoadBalancerHealthMonitorDelay());
+ }
+ if (s.equals("timeout")) {
+ ans.setLoadBalancerHealthMonitorTimeout(this.getLoadBalancerHealthMonitorTimeout());
+ }
+ if (s.equals("max_retries")) {
+ ans.setLoadBalancerHealthMonitorMaxRetries(this.getLoadBalancerHealthMonitorMaxRetries());
+ }
+ if (s.equals("http_method")) {
+ ans.setLoadBalancerHealthMonitorHttpMethod(this.getLoadBalancerHealthMonitorHttpMethod());
+ }
+ if(s.equals("url_path")) {
+ ans.setLoadBalancerHealthMonitorUrlPath(this.getLoadBalancerHealthMonitorUrlPath());
+ }
+ if (s.equals("expected_codes")) {
+ ans.setLoadBalancerHealthMonitorExpectedCodes(this.getLoadBalancerHealthMonitorExpectedCodes());
+ }
+ if (s.equals("admin_state_up")) {
+ ans.setLoadBalancerHealthMonitorAdminStateIsUp(loadBalancerHealthMonitorAdminStateIsUp);
+ }
+ if (s.equals("status")) {
+ ans.setLoadBalancerHealthMonitorStatus(this.getLoadBalancerHealthMonitorStatus());
+ }
+ }
+ return ans;
+ }
+
+ @Override public String toString() {
+ return "NeutronLoadBalancerHealthMonitor{" +
+ "loadBalancerHealthMonitorID='" + loadBalancerHealthMonitorID + '\'' +
+ ", loadBalancerHealthMonitorTenantID='" + loadBalancerHealthMonitorTenantID + '\'' +
+ ", loadBalancerHealthMonitorType='" + loadBalancerHealthMonitorType + '\'' +
+ ", loadBalancerHealthMonitorDelay=" + loadBalancerHealthMonitorDelay +
+ ", loadBalancerHealthMonitorTimeout=" + loadBalancerHealthMonitorTimeout +
+ ", loadBalancerHealthMonitorMaxRetries=" + loadBalancerHealthMonitorMaxRetries +
+ ", loadBalancerHealthMonitorHttpMethod='" + loadBalancerHealthMonitorHttpMethod + '\'' +
+ ", loadBalancerHealthMonitorUrlPath='" + loadBalancerHealthMonitorUrlPath + '\'' +
+ ", loadBalancerHealthMonitorExpectedCodes='" + loadBalancerHealthMonitorExpectedCodes + '\'' +
+ ", loadBalancerHealthMonitorAdminStateIsUp=" + loadBalancerHealthMonitorAdminStateIsUp +
+ ", loadBalancerHealthMonitorStatus='" + loadBalancerHealthMonitorStatus + '\'' +
+ '}';
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of the fields:
+ * Implemented fields are as follows:
+ *
+ * id uuid-str
+ * default_pool_id String
+ * tenant_id uuid-str
+ * name String
+ * description String
+ * shared Bool
+ * protocol String
+ * protocol_port String
+ * load_balancer_id String
+ * admin_state_up Boolean
+ * status String
+ *
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerListener extends ConfigurationObject implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ @XmlElement(name="id")
+ String loadBalancerListenerID;
+
+ @XmlElement (name="default_pool_id")
+ String neutronLoadBalancerListenerDefaultPoolID;
+
+ @XmlElement (name="tenant_id")
+ String loadBalancerListenerTenantID;
+
+ @XmlElement (name="name")
+ String loadBalancerListenerName;
+
+ @XmlElement (name="description")
+ String loadBalancerListenerDescription;
+
+ @XmlElement (defaultValue="true", name="admin_state_up")
+ Boolean loadBalancerListenerAdminStateIsUp;
+
+ @XmlElement (name="status")
+ String loadBalancerListenerStatus;
+
+ @XmlElement (defaultValue="false", name="shared")
+ Boolean loadBalancerListenerIsShared;
+
+ @XmlElement (name="protocol")
+ String neutronLoadBalancerListenerProtocol;
+
+ @XmlElement (name="protocol_port")
+ String neutronLoadBalancerListenerProtocolPort;
+
+ @XmlElement (name="load_balancer_id")
+ String neutronLoadBalancerListenerLoadBalancerID;
+
+
+ public String getLoadBalancerListenerID() {
+ return loadBalancerListenerID;
+ }
+
+ public void setLoadBalancerListenerID(String loadBalancerListenerID) {
+ this.loadBalancerListenerID = loadBalancerListenerID;
+ }
+
+ public String getLoadBalancerListenerTenantID() {
+ return loadBalancerListenerTenantID;
+ }
+
+ public void setLoadBalancerListenerTenantID(String loadBalancerListenerTenantID) {
+ this.loadBalancerListenerTenantID = loadBalancerListenerTenantID;
+ }
+
+ public String getLoadBalancerListenerName() {
+ return loadBalancerListenerName;
+ }
+
+ public void setLoadBalancerListenerName(String loadBalancerListenerName) {
+ this.loadBalancerListenerName = loadBalancerListenerName;
+ }
+
+ public String getLoadBalancerListenerDescription() {
+ return loadBalancerListenerDescription;
+ }
+
+ public void setLoadBalancerListenerDescription(String loadBalancerListenerDescription) {
+ this.loadBalancerListenerDescription = loadBalancerListenerDescription;
+ }
+
+ public Boolean getLoadBalancerListenerAdminStateIsUp() {
+ return loadBalancerListenerAdminStateIsUp;
+ }
+
+ public void setLoadBalancerListenerAdminStateIsUp(Boolean loadBalancerListenerAdminStateIsUp) {
+ this.loadBalancerListenerAdminStateIsUp = loadBalancerListenerAdminStateIsUp;
+ }
+
+ public String getLoadBalancerListenerStatus() {
+ return loadBalancerListenerStatus;
+ }
+
+ public void setLoadBalancerListenerStatus(String loadBalancerListenerStatus) {
+ this.loadBalancerListenerStatus = loadBalancerListenerStatus;
+ }
+
+ public Boolean getLoadBalancerListenerIsShared() {
+ return loadBalancerListenerIsShared;
+ }
+
+ public void setLoadBalancerListenerIsShared(Boolean loadBalancerListenerIsShared) {
+ this.loadBalancerListenerIsShared = loadBalancerListenerIsShared;
+ }
+
+ public String getNeutronLoadBalancerListenerProtocol() {
+ return neutronLoadBalancerListenerProtocol;
+ }
+
+ public void setNeutronLoadBalancerListenerProtocol(String neutronLoadBalancerListenerProtocol) {
+ this.neutronLoadBalancerListenerProtocol = neutronLoadBalancerListenerProtocol;
+ }
+
+ public String getNeutronLoadBalancerListenerProtocolPort() {
+ return neutronLoadBalancerListenerProtocolPort;
+ }
+
+ public void setNeutronLoadBalancerListenerProtocolPort(String neutronLoadBalancerListenerProtocolPort) {
+ this.neutronLoadBalancerListenerProtocolPort = neutronLoadBalancerListenerProtocolPort;
+ }
+
+ public String getNeutronLoadBalancerListenerDefaultPoolID() {
+ return neutronLoadBalancerListenerDefaultPoolID;
+ }
+
+ public void setNeutronLoadBalancerListenerDefaultPoolID(String neutronLoadBalancerListenerDefaultPoolID) {
+ this.neutronLoadBalancerListenerDefaultPoolID = neutronLoadBalancerListenerDefaultPoolID;
+ }
+
+ public String getNeutronLoadBalancerListenerLoadBalancerID() {
+ return neutronLoadBalancerListenerLoadBalancerID;
+ }
+
+ public void setNeutronLoadBalancerListenerLoadBalancerID(String neutronLoadBalancerListenerLoadBalancerID) {
+ this.neutronLoadBalancerListenerLoadBalancerID = neutronLoadBalancerListenerLoadBalancerID;
+ }
+
+ public NeutronLoadBalancerListener extractFields(List<String> fields) {
+ NeutronLoadBalancerListener ans = new NeutronLoadBalancerListener();
+ Iterator<String> i = fields.iterator();
+ while (i.hasNext()) {
+ String s = i.next();
+ if (s.equals("id")) {
+ ans.setLoadBalancerListenerID(this.getLoadBalancerListenerID());
+ }
+ if(s.equals("default_pool_id")) {
+ ans.setNeutronLoadBalancerListenerDefaultPoolID(this.getNeutronLoadBalancerListenerDefaultPoolID());
+ }
+ if (s.equals("tenant_id")) {
+ ans.setLoadBalancerListenerTenantID(this.getLoadBalancerListenerTenantID());
+ }
+ if (s.equals("name")) {
+ ans.setLoadBalancerListenerName(this.getLoadBalancerListenerName());
+ }
+ if(s.equals("description")) {
+ ans.setLoadBalancerListenerDescription(this.getLoadBalancerListenerDescription());
+ }
+ if (s.equals("shared")) {
+ ans.setLoadBalancerListenerIsShared(loadBalancerListenerIsShared);
+ }
+ if (s.equals("protocol")) {
+ ans.setNeutronLoadBalancerListenerProtocol(this.getNeutronLoadBalancerListenerProtocol());
+ }
+ if (s.equals("protocol_port")) {
+ ans.setNeutronLoadBalancerListenerProtocolPort(this.getNeutronLoadBalancerListenerProtocolPort());
+ }
+ if (s.equals("load_balancer_id")) {
+ ans.setNeutronLoadBalancerListenerLoadBalancerID(this.getNeutronLoadBalancerListenerLoadBalancerID());
+ }
+ if (s.equals("admin_state_up")) {
+ ans.setLoadBalancerListenerAdminStateIsUp(loadBalancerListenerAdminStateIsUp);
+ }
+ if (s.equals("status")) {
+ ans.setLoadBalancerListenerStatus(this.getLoadBalancerListenerStatus());
+ }
+ }
+ return ans;
+ }
+
+ @Override public String toString() {
+ return "NeutronLoadBalancerListener{" +
+ "loadBalancerListenerID='" + loadBalancerListenerID + '\'' +
+ ", neutronLoadBalancerListenerDefaultPoolID='" + neutronLoadBalancerListenerDefaultPoolID + '\'' +
+ ", loadBalancerListenerTenantID='" + loadBalancerListenerTenantID + '\'' +
+ ", loadBalancerListenerName='" + loadBalancerListenerName + '\'' +
+ ", loadBalancerListenerDescription='" + loadBalancerListenerDescription + '\'' +
+ ", loadBalancerListenerAdminStateIsUp=" + loadBalancerListenerAdminStateIsUp +
+ ", loadBalancerListenerStatus='" + loadBalancerListenerStatus + '\'' +
+ ", loadBalancerListenerIsShared=" + loadBalancerListenerIsShared +
+ ", neutronLoadBalancerListenerProtocol='" + neutronLoadBalancerListenerProtocol + '\'' +
+ ", neutronLoadBalancerListenerProtocolPort='" + neutronLoadBalancerListenerProtocolPort + '\'' +
+ ", neutronLoadBalancerListenerLoadBalancerID='" + neutronLoadBalancerListenerLoadBalancerID + '\'' +
+ '}';
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of the fields:
+ * Implemented fields are as follows:
+ *
+ * id uuid-str
+ * tenant_id uuid-str
+ * name String
+ * description String
+ * protocol String
+ * lb_algorithm String
+ * healthmonitor_id String
+ * admin_state_up Bool
+ * status String
+ * members List <String>
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerPool extends ConfigurationObject implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ @XmlElement(name="id")
+ String loadBalancerPoolID;
+
+ @XmlElement (name="tenant_id")
+ String loadBalancerPoolTenantID;
+
+ @XmlElement (name="name")
+ String loadBalancerPoolName;
+
+ @XmlElement (name="description")
+ String loadBalancerPoolDescription;
+
+ @XmlElement (name="protocol")
+ String loadBalancerPoolProtocol;
+
+ @XmlElement (name="lb_algorithm")
+ String loadBalancerPoolLbAlgorithm;
+
+ @XmlElement (name="healthmonitor_id")
+ String neutronLoadBalancerPoolHealthMonitorID;
+
+ @XmlElement (defaultValue="true", name="admin_state_up")
+ Boolean loadBalancerPoolAdminStateIsUp;
+
+ @XmlElement (name="status")
+ String loadBalancerPoolStatus;
+
+ @XmlElement (name="members")
+ List loadBalancerPoolMembers;
+
+ HashMap<String, NeutronLoadBalancerPoolMember> member;
+
+ public NeutronLoadBalancerPool() {
+ member = new HashMap<String, NeutronLoadBalancerPoolMember>();
+ }
+
+ public String getLoadBalancerPoolID() {
+ return loadBalancerPoolID;
+ }
+
+ public void setLoadBalancerPoolID(String loadBalancerPoolID) {
+ this.loadBalancerPoolID = loadBalancerPoolID;
+ }
+
+ public String getLoadBalancerPoolTenantID() {
+ return loadBalancerPoolTenantID;
+ }
+
+ public void setLoadBalancerPoolTenantID(String loadBalancerPoolTenantID) {
+ this.loadBalancerPoolTenantID = loadBalancerPoolTenantID;
+ }
+
+ public String getLoadBalancerPoolName() {
+ return loadBalancerPoolName;
+ }
+
+ public void setLoadBalancerPoolName(String loadBalancerPoolName) {
+ this.loadBalancerPoolName = loadBalancerPoolName;
+ }
+
+ public String getLoadBalancerPoolDescription() {
+ return loadBalancerPoolDescription;
+ }
+
+ public void setLoadBalancerPoolDescription(String loadBalancerPoolDescription) {
+ this.loadBalancerPoolDescription = loadBalancerPoolDescription;
+ }
+
+ public String getLoadBalancerPoolProtocol() {
+ return loadBalancerPoolProtocol;
+ }
+
+ public void setLoadBalancerPoolProtocol(String loadBalancerPoolProtocol) {
+ this.loadBalancerPoolProtocol = loadBalancerPoolProtocol;
+ }
+
+ public String getLoadBalancerPoolLbAlgorithm() {
+ return loadBalancerPoolLbAlgorithm;
+ }
+
+ public void setLoadBalancerPoolLbAlgorithm(String loadBalancerPoolLbAlgorithm) {
+ this.loadBalancerPoolLbAlgorithm = loadBalancerPoolLbAlgorithm;
+ }
+
+ public String getNeutronLoadBalancerPoolHealthMonitorID() {
+ return neutronLoadBalancerPoolHealthMonitorID;
+ }
+
+ public void setNeutronLoadBalancerPoolHealthMonitorID(String neutronLoadBalancerPoolHealthMonitorID) {
+ this.neutronLoadBalancerPoolHealthMonitorID = neutronLoadBalancerPoolHealthMonitorID;
+ }
+
+ public Boolean getLoadBalancerPoolAdminIsStateIsUp() {
+ return loadBalancerPoolAdminStateIsUp;
+ }
+
+ public void setLoadBalancerPoolAdminStateIsUp(Boolean loadBalancerPoolAdminStateIsUp) {
+ this.loadBalancerPoolAdminStateIsUp = loadBalancerPoolAdminStateIsUp;
+ }
+
+ public String getLoadBalancerPoolStatus() {
+ return loadBalancerPoolStatus;
+ }
+
+ public void setLoadBalancerPoolStatus(String loadBalancerPoolStatus) {
+ this.loadBalancerPoolStatus = loadBalancerPoolStatus;
+ }
+
+ public List getLoadBalancerPoolMembers() {
+ return loadBalancerPoolMembers;
+ }
+
+ public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+ this.loadBalancerPoolMembers = loadBalancerPoolMembers;
+ }
+
+ public NeutronLoadBalancerPool extractFields(List<String> fields) {
+ NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
+ Iterator<String> i = fields.iterator();
+ while (i.hasNext()) {
+ String s = i.next();
+ if (s.equals("id")) {
+ ans.setLoadBalancerPoolID(this.getLoadBalancerPoolID());
+ }
+ if (s.equals("tenant_id")) {
+ ans.setLoadBalancerPoolTenantID(this.getLoadBalancerPoolTenantID());
+ }
+ if (s.equals("name")) {
+ ans.setLoadBalancerPoolName(this.getLoadBalancerPoolName());
+ }
+ if(s.equals("description")) {
+ ans.setLoadBalancerPoolDescription(this.getLoadBalancerPoolDescription());
+ }
+ if(s.equals("protocol")) {
+ ans.setLoadBalancerPoolProtocol(this.getLoadBalancerPoolProtocol());
+ }
+ if(s.equals("lb_algorithm")) {
+ ans.setLoadBalancerPoolLbAlgorithm(this.getLoadBalancerPoolLbAlgorithm());
+ }
+ if(s.equals("healthmonitor_id")) {
+ ans.setNeutronLoadBalancerPoolHealthMonitorID(this.getNeutronLoadBalancerPoolHealthMonitorID());
+ }
+ if (s.equals("admin_state_up")) {
+ ans.setLoadBalancerPoolAdminStateIsUp(loadBalancerPoolAdminStateIsUp);
+ }
+ if (s.equals("status")) {
+ ans.setLoadBalancerPoolStatus(this.getLoadBalancerPoolStatus());
+ }
+ if (s.equals("members")) {
+ ans.setLoadBalancerPoolMembers(getLoadBalancerPoolMembers());
+ }
+ }
+ return ans;
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+public class NeutronLoadBalancerPoolMember extends ConfigurationObject implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * TODO: Plumb into LBaaS Pool. Members are nested underneath Pool CRUD.
+ */
+ @XmlElement (name="id")
+ String poolMemberID;
+
+ @XmlElement (name="tenant_id")
+ String poolMemberTenantID;
+
+ @XmlElement (name="address")
+ String poolMemberAddress;
+
+ @XmlElement (name="protocol_port")
+ Integer poolMemberProtoPort;
+
+ @XmlElement (name="admin_state_up")
+ Boolean poolMemberAdminStateIsUp;
+
+ @XmlElement (name="weight")
+ Integer poolMemberWeight;
+
+ @XmlElement (name="subnet_id")
+ String poolMemberSubnetID;
+
+ @XmlElement (name="status")
+ String poolMemberStatus;
+
+ public NeutronLoadBalancerPoolMember() {
+ }
+
+ public String getPoolMemberID() {
+ return poolMemberID;
+ }
+
+ public void setPoolMemberID(String poolMemberID) {
+ this.poolMemberID = poolMemberID;
+ }
+
+ public String getPoolMemberTenantID() {
+ return poolMemberTenantID;
+ }
+
+ public void setPoolMemberTenantID(String poolMemberTenantID) {
+ this.poolMemberTenantID = poolMemberTenantID;
+ }
+
+ public String getPoolMemberAddress() {
+ return poolMemberAddress;
+ }
+
+ public void setPoolMemberAddress(String poolMemberAddress) {
+ this.poolMemberAddress = poolMemberAddress;
+ }
+
+ public Integer getPoolMemberProtoPort() {
+ return poolMemberProtoPort;
+ }
+
+ public void setPoolMemberProtoPort(Integer poolMemberProtoPort) {
+ this.poolMemberProtoPort = poolMemberProtoPort;
+ }
+
+ public Boolean getPoolMemberAdminStateIsUp() {
+ return poolMemberAdminStateIsUp;
+ }
+
+ public void setPoolMemberAdminStateIsUp(Boolean poolMemberAdminStateIsUp) {
+ this.poolMemberAdminStateIsUp = poolMemberAdminStateIsUp;
+ }
+
+ public Integer getPoolMemberWeight() {
+ return poolMemberWeight;
+ }
+
+ public void setPoolMemberWeight(Integer poolMemberWeight) {
+ this.poolMemberWeight = poolMemberWeight;
+ }
+
+ public String getPoolMemberSubnetID() {
+ return poolMemberSubnetID;
+ }
+
+ public void setPoolMemberSubnetID(String poolMemberSubnetID) {
+ this.poolMemberSubnetID = poolMemberSubnetID;
+ }
+
+ public String getPoolMemberStatus() {
+ return poolMemberStatus;
+ }
+
+ public void setPoolMemberStatus(String poolMemberStatus) {
+ this.poolMemberStatus = poolMemberStatus;
+ }
+
+ public NeutronLoadBalancerPoolMember extractFields(List<String> fields) {
+ NeutronLoadBalancerPoolMember ans = new NeutronLoadBalancerPoolMember();
+ Iterator<String> i = fields.iterator();
+ while (i.hasNext()) {
+ String s = i.next();
+ if (s.equals("id")) {
+ ans.setPoolMemberID(this.getPoolMemberID());
+ }
+ if (s.equals("tenant_id")) {
+ ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
+ }
+ if (s.equals("address")) {
+ ans.setPoolMemberAddress(this.getPoolMemberAddress());
+ }
+ if(s.equals("protocol_port")) {
+ ans.setPoolMemberProtoPort(this.getPoolMemberProtoPort());
+ }
+ if (s.equals("admin_state_up")) {
+ ans.setPoolMemberAdminStateIsUp(poolMemberAdminStateIsUp);
+ }
+ if(s.equals("weight")) {
+ ans.setPoolMemberWeight(this.getPoolMemberWeight());
+ }
+ if(s.equals("subnet_id")) {
+ ans.setPoolMemberSubnetID(this.getPoolMemberSubnetID());
+ }
+ if (s.equals("status")) {
+ ans.setPoolMemberStatus(this.getPoolMemberStatus());
+ }
+ }
+ return ans;
+ }
+ @Override public String toString() {
+ return "NeutronLoadBalancerPoolMember{" +
+ "poolMemberID='" + poolMemberID + '\'' +
+ ", poolMemberTenantID='" + poolMemberTenantID + '\'' +
+ ", poolMemberAddress='" + poolMemberAddress + '\'' +
+ ", poolMemberProtoPort=" + poolMemberProtoPort +
+ ", poolMemberAdminStateIsUp=" + poolMemberAdminStateIsUp +
+ ", poolMemberWeight=" + poolMemberWeight +
+ ", poolMemberSubnetID='" + poolMemberSubnetID + '\'' +
+ ", poolMemberStatus='" + poolMemberStatus + '\'' +
+ '}';
+ }
+}
if (s.equals("tenant_id")) {
ans.setTenantID(this.getTenantID());
}
+ if (s.equals("security_groups")) {
+ List<NeutronSecurityGroup> securityGroups = new ArrayList<NeutronSecurityGroup>();
+ securityGroups.addAll(this.getSecurityGroups());
+ ans.setSecurityGroups(securityGroups);
+ }
}
return ans;
}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.util.List;
+
+public class INeutronLoadBalancerPoolMemberRequest {
+
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="member")
+ NeutronLoadBalancerPoolMember singletonLoadBalancerPoolMember;
+
+ @XmlElement(name="members")
+ List<NeutronLoadBalancerPoolMember> bulkRequest;
+
+ INeutronLoadBalancerPoolMemberRequest() {
+ }
+
+ INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancerPoolMember = null;
+ }
+
+ INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+ singletonLoadBalancerPoolMember = group;
+ }
+
+ public List<NeutronLoadBalancerPoolMember> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancerPoolMember getSingleton() {
+ return singletonLoadBalancerPoolMember;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancerPoolMember != null);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for Load Balancer HealthMonitor.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerHealthMonitor
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/healthmonitors")
+public class NeutronLoadBalancerHealthMonitorNorthbound {
+ private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancer.class);
+
+ private NeutronLoadBalancerHealthMonitor extractFields(NeutronLoadBalancerHealthMonitor o, List<String> fields) {
+ return o.extractFields(fields);
+ }
+
+ /**
+ * Returns a list of all LoadBalancerHealthMonitor */
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+
+ public Response listGroups(
+ // return fields
+ @QueryParam("fields") List<String> fields,
+ // OpenStack LoadBalancerHealthMonitor attributes
+ @QueryParam("id") String queryLoadBalancerHealthMonitorID,
+ @QueryParam("tenant_id") String queryLoadBalancerHealthMonitorTenantID,
+ // TODO "type" is being a property by the JSON parser.
+ @QueryParam("type") String queryLoadBalancerHealthMonitorType,
+ @QueryParam("delay") Integer queryLoadBalancerHealthMonitorDelay,
+ @QueryParam("timeout") Integer queryLoadBalancerHealthMonitorTimeout,
+ @QueryParam("max_retries") Integer queryLoadBalancerHealthMonitorMaxRetries,
+ @QueryParam("http_method") String queryLoadBalancerHealthMonitorHttpMethod,
+ @QueryParam("url_path") String queryLoadBalancerHealthMonitorUrlPath,
+ @QueryParam("expected_codes") String queryLoadBalancerHealthMonitorExpectedCodes,
+ @QueryParam("admin_state_up") Boolean queryLoadBalancerHealthMonitorIsAdminStateUp,
+ @QueryParam("status") String queryLoadBalancerHealthMonitorStatus,
+ // pagination
+ @QueryParam("limit") String limit,
+ @QueryParam("marker") String marker,
+ @QueryParam("page_reverse") String pageReverse
+ // sorting not supported
+ ) {
+ INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerHealthMonitorCRUD(this);
+ if (loadBalancerHealthMonitorInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ List<NeutronLoadBalancerHealthMonitor> allLoadBalancerHealthMonitors = loadBalancerHealthMonitorInterface.getAllNeutronLoadBalancerHealthMonitors();
+ List<NeutronLoadBalancerHealthMonitor> ans = new ArrayList<NeutronLoadBalancerHealthMonitor>();
+ Iterator<NeutronLoadBalancerHealthMonitor> i = allLoadBalancerHealthMonitors.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerHealthMonitor nsg = i.next();
+ if ((queryLoadBalancerHealthMonitorID == null ||
+ queryLoadBalancerHealthMonitorID.equals(nsg.getLoadBalancerHealthMonitorID())) &&
+ (queryLoadBalancerHealthMonitorTenantID == null ||
+ queryLoadBalancerHealthMonitorTenantID.equals
+ (nsg.getLoadBalancerHealthMonitorTenantID())) &&
+ (queryLoadBalancerHealthMonitorType == null ||
+ queryLoadBalancerHealthMonitorType.equals
+ (nsg.getLoadBalancerHealthMonitorType())) &&
+ (queryLoadBalancerHealthMonitorDelay == null ||
+ queryLoadBalancerHealthMonitorDelay.equals
+ (nsg.getLoadBalancerHealthMonitorDelay())) &&
+ (queryLoadBalancerHealthMonitorTimeout == null ||
+ queryLoadBalancerHealthMonitorTimeout.equals
+ (nsg.getLoadBalancerHealthMonitorTimeout())) &&
+ (queryLoadBalancerHealthMonitorMaxRetries == null ||
+ queryLoadBalancerHealthMonitorMaxRetries.equals
+ (nsg.getLoadBalancerHealthMonitorMaxRetries())) &&
+ (queryLoadBalancerHealthMonitorHttpMethod == null ||
+ queryLoadBalancerHealthMonitorHttpMethod.equals
+ (nsg.getLoadBalancerHealthMonitorHttpMethod())) &&
+ (queryLoadBalancerHealthMonitorUrlPath == null ||
+ queryLoadBalancerHealthMonitorUrlPath.equals
+ (nsg.getLoadBalancerHealthMonitorUrlPath())) &&
+ (queryLoadBalancerHealthMonitorExpectedCodes == null ||
+ queryLoadBalancerHealthMonitorExpectedCodes.equals
+ (nsg.getLoadBalancerHealthMonitorExpectedCodes())) &&
+ (queryLoadBalancerHealthMonitorIsAdminStateUp == null ||
+ queryLoadBalancerHealthMonitorIsAdminStateUp.equals
+ (nsg.getLoadBalancerHealthMonitorAdminStateIsUp())) &&
+ (queryLoadBalancerHealthMonitorStatus == null ||
+ queryLoadBalancerHealthMonitorStatus.equals
+ (nsg.getLoadBalancerHealthMonitorStatus()))) {
+ if (fields.size() > 0) {
+ ans.add(extractFields(nsg,fields));
+ } else {
+ ans.add(nsg);
+ }
+ }
+ }
+ return Response.status(200).entity(
+ new NeutronLoadBalancerHealthMonitorRequest(ans)).build();
+ }
+
+ /**
+ * Returns a specific LoadBalancerHealthMonitor */
+
+ @Path("{loadBalancerHealthMonitorID}")
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response showLoadBalancerHealthMonitor(@PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID,
+ // return fields
+ @QueryParam("fields") List<String> fields) {
+ INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+ if (loadBalancerHealthMonitorInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+ throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+ }
+ if (fields.size() > 0) {
+ NeutronLoadBalancerHealthMonitor ans = loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+ return Response.status(200).entity(
+ new NeutronLoadBalancerHealthMonitorRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(new NeutronLoadBalancerHealthMonitorRequest(loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID))).build();
+ }
+ }
+
+ /**
+ * Creates new LoadBalancerHealthMonitor */
+
+ @POST
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 201, condition = "Created"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response createLoadBalancerHealthMonitors(final NeutronLoadBalancerHealthMonitorRequest input) {
+ INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+ if (loadBalancerHealthMonitorInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (input.isSingleton()) {
+ NeutronLoadBalancerHealthMonitor singleton = input.getSingleton();
+
+ /*
+ * Verify that the LoadBalancerHealthMonitor doesn't already exist.
+ */
+ if (loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(singleton.getLoadBalancerHealthMonitorID())) {
+ throw new BadRequestException("LoadBalancerHealthMonitor UUID already exists");
+ }
+ loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(singleton);
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canCreateNeutronLoadBalancerHealthMonitor(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(singleton);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ service.neutronLoadBalancerHealthMonitorCreated(singleton);
+ }
+ }
+ } else {
+ List<NeutronLoadBalancerHealthMonitor> bulk = input.getBulk();
+ Iterator<NeutronLoadBalancerHealthMonitor> i = bulk.iterator();
+ HashMap<String, NeutronLoadBalancerHealthMonitor> testMap = new HashMap<String, NeutronLoadBalancerHealthMonitor>();
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+ while (i.hasNext()) {
+ NeutronLoadBalancerHealthMonitor test = i.next();
+
+ /*
+ * Verify that the firewall policy doesn't already exist
+ */
+
+ if (loadBalancerHealthMonitorInterface
+ .neutronLoadBalancerHealthMonitorExists(test.getLoadBalancerHealthMonitorID())) {
+ throw new BadRequestException("LoadBalancerHealthMonitor UUID already is already created");
+ }
+ if (testMap.containsKey(test.getLoadBalancerHealthMonitorID())) {
+ throw new BadRequestException("LoadBalancerHealthMonitor UUID already exists");
+ }
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canCreateNeutronLoadBalancerHealthMonitor(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ }
+ /*
+ * now, each element of the bulk request can be added to the cache
+ */
+ i = bulk.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerHealthMonitor test = i.next();
+ loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(test);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ service.neutronLoadBalancerHealthMonitorCreated(test);
+ }
+ }
+ }
+ }
+ return Response.status(201).entity(input).build();
+ }
+
+ /**
+ * Updates a LoadBalancerHealthMonitor Policy
+ */
+ @Path("{loadBalancerHealthMonitorID}")
+ @PUT
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response updateLoadBalancerHealthMonitor(
+ @PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID,
+ final NeutronLoadBalancerHealthMonitorRequest input) {
+ INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerHealthMonitorCRUD(this);
+ if (loadBalancerHealthMonitorInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerHealthMonitor exists and there is only one delta provided
+ */
+ if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+ throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+ }
+ if (!input.isSingleton()) {
+ throw new BadRequestException("Only singleton edit supported");
+ }
+ NeutronLoadBalancerHealthMonitor delta = input.getSingleton();
+ NeutronLoadBalancerHealthMonitor original = loadBalancerHealthMonitorInterface
+ .getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+
+ /*
+ * updates restricted by Neutron
+ */
+ if (delta.getLoadBalancerHealthMonitorID() != null ||
+ delta.getLoadBalancerHealthMonitorTenantID() != null ||
+ delta.getLoadBalancerHealthMonitorType() != null ||
+ delta.getLoadBalancerHealthMonitorDelay() != null ||
+ delta.getLoadBalancerHealthMonitorTimeout() != null ||
+ delta.getLoadBalancerHealthMonitorMaxRetries() != null ||
+ delta.getLoadBalancerHealthMonitorHttpMethod() != null ||
+ delta.getLoadBalancerHealthMonitorUrlPath() != null ||
+ delta.getLoadBalancerHealthMonitorExpectedCodes() != null ||
+ delta.getLoadBalancerHealthMonitorAdminStateIsUp() != null ||
+ delta.getLoadBalancerHealthMonitorStatus() != null) {
+ throw new BadRequestException("Attribute edit blocked by Neutron");
+ }
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerHealthMonitor(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * update the object and return it
+ */
+ loadBalancerHealthMonitorInterface.updateNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID, delta);
+ NeutronLoadBalancerHealthMonitor updatedLoadBalancerHealthMonitor = loadBalancerHealthMonitorInterface
+ .getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ service.neutronLoadBalancerHealthMonitorUpdated(updatedLoadBalancerHealthMonitor);
+ }
+ }
+ return Response.status(200).entity(new NeutronLoadBalancerHealthMonitorRequest
+ (loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor
+ (loadBalancerHealthMonitorID))).build();
+ }
+
+
+
+ /**
+ * Deletes a LoadBalancerHealthMonitor
+ * */
+ @Path("{loadBalancerHealthMonitorID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancerHealthMonitor(
+ @PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID) {
+ INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+ if (loadBalancerHealthMonitorInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ /*
+ * verify the LoadBalancerHealthMonitor exists and it isn't currently in use
+ */
+ if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+ throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+ }
+ if (loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorInUse(loadBalancerHealthMonitorID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancerHealthMonitor singleton = loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerHealthMonitor(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerHealthMonitorInterface.removeNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+ service.neutronLoadBalancerHealthMonitorDeleted(singleton);
+ }
+ }
+ return Response.status(204).build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerHealthMonitorRequest {
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="healthmonitor")
+ NeutronLoadBalancerHealthMonitor singletonLoadBalancerHealthMonitor;
+
+ @XmlElement(name="healthmonitors")
+ List<NeutronLoadBalancerHealthMonitor> bulkRequest;
+
+ NeutronLoadBalancerHealthMonitorRequest() {
+ }
+
+ NeutronLoadBalancerHealthMonitorRequest(List<NeutronLoadBalancerHealthMonitor> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancerHealthMonitor = null;
+ }
+
+ NeutronLoadBalancerHealthMonitorRequest(NeutronLoadBalancerHealthMonitor group) {
+ singletonLoadBalancerHealthMonitor = group;
+ }
+
+ public List<NeutronLoadBalancerHealthMonitor> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancerHealthMonitor getSingleton() {
+ return singletonLoadBalancerHealthMonitor;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancerHealthMonitor != null);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancerListener Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerListener Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/listeners")
+public class NeutronLoadBalancerListenerNorthbound {
+
+ private NeutronLoadBalancerListener extractFields(NeutronLoadBalancerListener o, List<String> fields) {
+ return o.extractFields(fields);
+ }
+
+ /**
+ * Returns a list of all LoadBalancerListener */
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+
+ public Response listGroups(
+ // return fields
+ @QueryParam("fields") List<String> fields,
+ // OpenStack LoadBalancerListener attributes
+ @QueryParam("id") String queryLoadBalancerListenerID,
+ @QueryParam("default_pool_id") String queryLoadBalancerListenerDefaultPoolID,
+ @QueryParam("tenant_id") String queryLoadBalancerListenerTenantID,
+ @QueryParam("name") String queryLoadBalancerListenerName,
+ @QueryParam("description") String queryLoadBalancerListenerDescription,
+ @QueryParam("shared") String queryLoadBalancerListenerIsShared,
+ @QueryParam("protocol") String queryLoadBalancerListenerProtocol,
+ @QueryParam("protocol_port") String queryLoadBalancerListenerProtocolPort,
+ @QueryParam("load_balancer_id") String queryLoadBalancerListenerLoadBalancerID,
+ @QueryParam("admin_state_up") String queryLoadBalancerListenerAdminIsUp,
+ @QueryParam("status") String queryLoadBalancerListenerStatus,
+ // pagination
+ @QueryParam("limit") String limit,
+ @QueryParam("marker") String marker,
+ @QueryParam("page_reverse") String pageReverse
+ // sorting not supported
+ ) {
+ INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+ // INeutronLoadBalancerListenerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerRuleCRUD(this);
+
+ if (loadBalancerListenerInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ List<NeutronLoadBalancerListener> allLoadBalancerListeners = loadBalancerListenerInterface.getAllNeutronLoadBalancerListeners();
+ // List<NeutronLoadBalancerListenerRule> allLoadBalancerListenerRules = firewallRuleInterface.getAllNeutronLoadBalancerListenerRules();
+ List<NeutronLoadBalancerListener> ans = new ArrayList<NeutronLoadBalancerListener>();
+ // List<NeutronLoadBalancerListenerRule> rules = new ArrayList<NeutronLoadBalancerListenerRule>();
+ Iterator<NeutronLoadBalancerListener> i = allLoadBalancerListeners.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerListener nsg = i.next();
+ if ((queryLoadBalancerListenerID == null ||
+ queryLoadBalancerListenerID.equals(nsg.getLoadBalancerListenerID())) &&
+ (queryLoadBalancerListenerDefaultPoolID == null ||
+ queryLoadBalancerListenerDefaultPoolID.equals(nsg.getNeutronLoadBalancerListenerDefaultPoolID())) &&
+ (queryLoadBalancerListenerTenantID == null ||
+ queryLoadBalancerListenerTenantID.equals(nsg.getLoadBalancerListenerTenantID())) &&
+ (queryLoadBalancerListenerName == null ||
+ queryLoadBalancerListenerName.equals(nsg.getLoadBalancerListenerName())) &&
+ (queryLoadBalancerListenerDescription == null ||
+ queryLoadBalancerListenerDescription.equals(nsg.getLoadBalancerListenerDescription())) &&
+ (queryLoadBalancerListenerIsShared == null ||
+ queryLoadBalancerListenerIsShared.equals(nsg.getLoadBalancerListenerIsShared())) &&
+ (queryLoadBalancerListenerProtocol == null ||
+ queryLoadBalancerListenerProtocol.equals(nsg.getNeutronLoadBalancerListenerProtocol())) &&
+ (queryLoadBalancerListenerProtocolPort == null ||
+ queryLoadBalancerListenerProtocolPort.equals(nsg.getNeutronLoadBalancerListenerProtocolPort())) &&
+ (queryLoadBalancerListenerLoadBalancerID == null ||
+ queryLoadBalancerListenerLoadBalancerID.equals(nsg.getNeutronLoadBalancerListenerLoadBalancerID())) &&
+ (queryLoadBalancerListenerAdminIsUp == null ||
+ queryLoadBalancerListenerAdminIsUp.equals(nsg.getLoadBalancerListenerAdminStateIsUp())) &&
+ (queryLoadBalancerListenerStatus == null ||
+ queryLoadBalancerListenerStatus.equals(nsg.getLoadBalancerListenerStatus()))) {
+ if (fields.size() > 0) {
+ ans.add(extractFields(nsg,fields));
+ } else {
+ ans.add(nsg);
+ }
+ }
+ }
+ return Response.status(200).entity(
+ new NeutronLoadBalancerListenerRequest(ans)).build();
+ }
+
+ /**
+ * Returns a specific LoadBalancerListener */
+
+ @Path("{loadBalancerListenerID}")
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response showLoadBalancerListener(@PathParam("loadBalancerListenerID") String loadBalancerListenerID,
+ // return fields
+ @QueryParam("fields") List<String> fields) {
+ INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+ if (loadBalancerListenerInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+ throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+ }
+ if (fields.size() > 0) {
+ NeutronLoadBalancerListener ans = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+ return Response.status(200).entity(
+ new NeutronLoadBalancerListenerRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(new NeutronLoadBalancerListenerRequest(loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID))).build();
+ }
+ }
+
+ /**
+ * Creates new LoadBalancerListener */
+
+ @POST
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 201, condition = "Created"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response createLoadBalancerListeners(final NeutronLoadBalancerListenerRequest input) {
+ INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+ if (loadBalancerListenerInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (input.isSingleton()) {
+ NeutronLoadBalancerListener singleton = input.getSingleton();
+
+ /*
+ * Verify that the LoadBalancerListener doesn't already exist.
+ */
+ if (loadBalancerListenerInterface.neutronLoadBalancerListenerExists(singleton.getLoadBalancerListenerID())) {
+ throw new BadRequestException("LoadBalancerListener UUID already exists");
+ }
+ loadBalancerListenerInterface.addNeutronLoadBalancerListener(singleton);
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canCreateNeutronLoadBalancerListener(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerListenerInterface.addNeutronLoadBalancerListener(singleton);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ service.neutronLoadBalancerListenerCreated(singleton);
+ }
+ }
+ } else {
+ List<NeutronLoadBalancerListener> bulk = input.getBulk();
+ Iterator<NeutronLoadBalancerListener> i = bulk.iterator();
+ HashMap<String, NeutronLoadBalancerListener> testMap = new HashMap<String, NeutronLoadBalancerListener>();
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+ while (i.hasNext()) {
+ NeutronLoadBalancerListener test = i.next();
+
+ /*
+ * Verify that the firewall policy doesn't already exist
+ */
+
+ if (loadBalancerListenerInterface.neutronLoadBalancerListenerExists(test.getLoadBalancerListenerID())) {
+ throw new BadRequestException("LoadBalancerListener UUID already is already created");
+ }
+ if (testMap.containsKey(test.getLoadBalancerListenerID())) {
+ throw new BadRequestException("LoadBalancerListener UUID already exists");
+ }
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canCreateNeutronLoadBalancerListener(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ }
+ /*
+ * now, each element of the bulk request can be added to the cache
+ */
+ i = bulk.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerListener test = i.next();
+ loadBalancerListenerInterface.addNeutronLoadBalancerListener(test);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ service.neutronLoadBalancerListenerCreated(test);
+ }
+ }
+ }
+ }
+ return Response.status(201).entity(input).build();
+ }
+
+ /**
+ * Updates a LoadBalancerListener Policy
+ */
+ @Path("{loadBalancerListenerID}")
+ @PUT
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response updateLoadBalancerListener(
+ @PathParam("loadBalancerListenerID") String loadBalancerListenerID, final NeutronLoadBalancerListenerRequest input) {
+ INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+ if (loadBalancerListenerInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerListener exists and there is only one delta provided
+ */
+ if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+ throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+ }
+ if (!input.isSingleton()) {
+ throw new BadRequestException("Only singleton edit supported");
+ }
+ NeutronLoadBalancerListener delta = input.getSingleton();
+ NeutronLoadBalancerListener original = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+
+ /*
+ * updates restricted by Neutron
+ */
+ if (delta.getLoadBalancerListenerID() != null ||
+ delta.getNeutronLoadBalancerListenerDefaultPoolID() != null ||
+ delta.getLoadBalancerListenerTenantID() != null ||
+ delta.getLoadBalancerListenerName() != null ||
+ delta.getLoadBalancerListenerDescription() != null ||
+ delta.getLoadBalancerListenerIsShared() != null ||
+ delta.getNeutronLoadBalancerListenerProtocol() != null ||
+ delta.getNeutronLoadBalancerListenerProtocolPort() != null ||
+ delta.getNeutronLoadBalancerListenerLoadBalancerID() != null ||
+ delta.getLoadBalancerListenerAdminStateIsUp() != null ||
+ delta.getLoadBalancerListenerStatus() != null) {
+ throw new BadRequestException("Attribute edit blocked by Neutron");
+ }
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerListener(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * update the object and return it
+ */
+ loadBalancerListenerInterface.updateNeutronLoadBalancerListener(loadBalancerListenerID, delta);
+ NeutronLoadBalancerListener updatedLoadBalancerListener = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ service.neutronLoadBalancerListenerUpdated(updatedLoadBalancerListener);
+ }
+ }
+ return Response.status(200).entity(new NeutronLoadBalancerListenerRequest(loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID))).build();
+ }
+
+ /**
+ * Deletes a LoadBalancerListener */
+
+ @Path("{loadBalancerListenerID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancerListener(
+ @PathParam("loadBalancerListenerID") String loadBalancerListenerID) {
+ INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+ if (loadBalancerListenerInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerListener exists and it isn't currently in use
+ */
+ if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+ throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+ }
+ if (loadBalancerListenerInterface.neutronLoadBalancerListenerInUse(loadBalancerListenerID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancerListener singleton = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ int status = service.canDeleteNeutronLoadBalancerListener(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ loadBalancerListenerInterface.removeNeutronLoadBalancerListener(loadBalancerListenerID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+ service.neutronLoadBalancerListenerDeleted(singleton);
+ }
+ }
+ return Response.status(204).build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerListenerRequest {
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="listener")
+ NeutronLoadBalancerListener singletonLoadBalancerListener;
+
+ @XmlElement(name="listeners")
+ List<NeutronLoadBalancerListener> bulkRequest;
+
+ NeutronLoadBalancerListenerRequest() {
+ }
+
+ NeutronLoadBalancerListenerRequest(List<NeutronLoadBalancerListener> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancerListener = null;
+ }
+
+ NeutronLoadBalancerListenerRequest(NeutronLoadBalancerListener group) {
+ singletonLoadBalancerListener = group;
+ }
+
+ public List<NeutronLoadBalancerListener> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancerListener getSingleton() {
+ return singletonLoadBalancerListener;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancerListener != null);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancer Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/loadbalancers")
+public class NeutronLoadBalancerNorthbound {
+
+ private NeutronLoadBalancer extractFields(NeutronLoadBalancer o, List<String> fields) {
+ return o.extractFields(fields);
+ }
+
+ /**
+ * Returns a list of all LoadBalancer */
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+
+ public Response listGroups(
+ // return fields
+ @QueryParam("fields") List<String> fields,
+ // OpenStack LoadBalancer attributes
+ @QueryParam("id") String queryLoadBalancerID,
+ @QueryParam("tenant_id") String queryLoadBalancerTenantID,
+ @QueryParam("name") String queryLoadBalancerName,
+ @QueryParam("description") String queryLoadBalancerDescription,
+ @QueryParam("status") String queryLoadBalancerStatus,
+ @QueryParam("vip_address") String queryLoadBalancerVipAddress,
+ @QueryParam("vip_subnet") String queryLoadBalancerVipSubnet,
+ // pagination
+ @QueryParam("limit") String limit,
+ @QueryParam("marker") String marker,
+ @QueryParam("page_reverse") String pageReverse
+ // sorting not supported
+ ) {
+ INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ this);
+ // INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+ // List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
+ List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
+ // List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
+ Iterator<NeutronLoadBalancer> i = allLoadBalancers.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancer nsg = i.next();
+ if ((queryLoadBalancerID == null ||
+ queryLoadBalancerID.equals(nsg.getLoadBalancerID())) &&
+ (queryLoadBalancerTenantID == null ||
+ queryLoadBalancerTenantID.equals(nsg.getLoadBalancerTenantID())) &&
+ (queryLoadBalancerName == null ||
+ queryLoadBalancerName.equals(nsg.getLoadBalancerName())) &&
+ (queryLoadBalancerDescription == null ||
+ queryLoadBalancerDescription.equals(nsg.getLoadBalancerDescription())) &&
+ (queryLoadBalancerVipAddress == null ||
+ queryLoadBalancerVipAddress.equals(nsg.getLoadBalancerVipAddress())) &&
+ (queryLoadBalancerVipSubnet == null ||
+ queryLoadBalancerVipSubnet.equals(nsg.getLoadBalancerVipSubnetID()))) {
+ if (fields.size() > 0) {
+ ans.add(extractFields(nsg,fields));
+ } else {
+ ans.add(nsg);
+ }
+ }
+ }
+ return Response.status(200).entity(
+ new NeutronLoadBalancerRequest(ans)).build();
+ }
+
+ /**
+ * Returns a specific LoadBalancer */
+
+ @Path("{loadBalancerPoolID}")
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+ // return fields
+ @QueryParam("fields") List<String> fields) {
+ INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+ }
+ if (fields.size() > 0) {
+ NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ return Response.status(200).entity(
+ new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
+ loadBalancerPoolID))).build();
+ }
+ }
+
+ /**
+ * Creates new LoadBalancer */
+
+ @POST
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+
+ @StatusCodes({
+ @ResponseCode(code = 201, condition = "Created"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
+ INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (input.isSingleton()) {
+ NeutronLoadBalancer singleton = input.getSingleton();
+
+ /*
+ * Verify that the LoadBalancer doesn't already exist.
+ */
+ if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+ throw new BadRequestException("LoadBalancer UUID already exists");
+ }
+ loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canCreateNeutronLoadBalancer(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ service.neutronLoadBalancerCreated(singleton);
+ }
+ }
+ } else {
+ List<NeutronLoadBalancer> bulk = input.getBulk();
+ Iterator<NeutronLoadBalancer> i = bulk.iterator();
+ HashMap<String, NeutronLoadBalancer> testMap = new HashMap<String, NeutronLoadBalancer>();
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+ while (i.hasNext()) {
+ NeutronLoadBalancer test = i.next();
+
+ /*
+ * Verify that the firewall policy doesn't already exist
+ */
+
+ if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+ throw new BadRequestException("Load Balancer Pool UUID already is already created");
+ }
+ if (testMap.containsKey(test.getLoadBalancerID())) {
+ throw new BadRequestException("Load Balancer Pool UUID already exists");
+ }
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canCreateNeutronLoadBalancer(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ }
+ /*
+ * now, each element of the bulk request can be added to the cache
+ */
+ i = bulk.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancer test = i.next();
+ loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ service.neutronLoadBalancerCreated(test);
+ }
+ }
+ }
+ }
+ return Response.status(201).entity(input).build();
+ }
+
+ /**
+ * Updates a LoadBalancer Policy
+ */
+ @Path("{loadBalancerPoolID}")
+ @PUT
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response updateLoadBalancer(
+ @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
+ INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancer exists and there is only one delta provided
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+ }
+ if (!input.isSingleton()) {
+ throw new BadRequestException("Only singleton edit supported");
+ }
+ NeutronLoadBalancer delta = input.getSingleton();
+ NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+
+ /*
+ * updates restricted by Neutron
+ */
+ if (delta.getLoadBalancerID() != null ||
+ delta.getLoadBalancerTenantID() != null ||
+ delta.getLoadBalancerName() != null ||
+ delta.getLoadBalancerDescription() != null ||
+ delta.getLoadBalancerStatus() != null ||
+ delta.getLoadBalancerVipAddress() != null ||
+ delta.getLoadBalancerVipSubnetID() != null) {
+ throw new BadRequestException("Attribute edit blocked by Neutron");
+ }
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canUpdateNeutronLoadBalancer(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * update the object and return it
+ */
+ loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
+ NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
+ loadBalancerPoolID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ service.neutronLoadBalancerUpdated(updatedLoadBalancer);
+ }
+ }
+ return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
+ loadBalancerPoolID))).build();
+ }
+
+ /**
+ * Deletes a LoadBalancer */
+
+ @Path("{loadBalancerPoolID}")
+ @DELETE
+ @StatusCodes({
+ @ResponseCode(code = 204, condition = "No Content"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response deleteLoadBalancer(
+ @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
+ INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+ this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancer exists and it isn't currently in use
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+ throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+ }
+ if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+ return Response.status(409).build();
+ }
+ NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ int status = service.canDeleteNeutronLoadBalancer(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+ service.neutronLoadBalancerDeleted(singleton);
+ }
+ }
+ return Response.status(204).build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+
+@Path("/pools/{loadBalancerPoolID}/members")
+public class NeutronLoadBalancerPoolMembersNorthbound {
+
+ private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
+ return o.extractFields(fields);
+ }
+/**
+ * Returns a list of all LoadBalancerPool
+ */
+@GET
+@Produces({MediaType.APPLICATION_JSON})
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 501, condition = "Not Implemented")})
+
+public Response listMembers(
+ // return fields
+ @QueryParam("fields") List<String> fields,
+ // OpenStack LoadBalancerPool attributes
+ @QueryParam("id") String queryLoadBalancerPoolMemberID,
+ @QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
+ @QueryParam("address") String queryLoadBalancerPoolMemberAddress,
+ @QueryParam("protocol_port") String queryLoadBalancerPoolMemberProtoPort,
+ @QueryParam("admin_state_up") String queryLoadBalancerPoolMemberAdminStateUp,
+ @QueryParam("weight") String queryLoadBalancerPoolMemberWeight,
+ @QueryParam("subnet_id") String queryLoadBalancerPoolMemberSubnetID,
+ @QueryParam("status") String queryLoadBalancerPoolMemberStatus,
+
+ // pagination
+ @QueryParam("limit") String limit,
+ @QueryParam("marker") String marker,
+ @QueryParam("page_reverse") String pageReverse
+ // sorting not supported
+) {
+ INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
+ .getINeutronLoadBalancerPoolMemberCRUD(this);
+ if (loadBalancerPoolMemberInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
+ .getAllNeutronLoadBalancerPoolMembers();
+ List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
+ Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember nsg = i.next();
+ if ((queryLoadBalancerPoolMemberID == null ||
+ queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+ (queryLoadBalancerPoolMemberTenantID == null ||
+ queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
+ (queryLoadBalancerPoolMemberAddress == null ||
+ queryLoadBalancerPoolMemberAddress.equals(nsg.getPoolMemberAddress())) &&
+ (queryLoadBalancerPoolMemberAdminStateUp == null ||
+ queryLoadBalancerPoolMemberAdminStateUp.equals(nsg.getPoolMemberAdminStateIsUp())) &&
+ (queryLoadBalancerPoolMemberWeight == null ||
+ queryLoadBalancerPoolMemberWeight.equals(nsg.getPoolMemberWeight())) &&
+ (queryLoadBalancerPoolMemberSubnetID == null ||
+ queryLoadBalancerPoolMemberSubnetID.equals(nsg.getPoolMemberSubnetID())) &&
+ (queryLoadBalancerPoolMemberStatus == null ||
+ queryLoadBalancerPoolMemberStatus.equals(nsg.getPoolMemberStatus()))) {
+ if (fields.size() > 0) {
+ ans.add(extractFields(nsg, fields));
+ } else {
+ ans.add(nsg);
+ }
+ }
+ }
+ return Response.status(200).entity(
+ new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Adds a Member to an LBaaS Pool member
+ */
+@Path("/pools/{loadBalancerPoolID}/members")
+@PUT
+@Produces({MediaType.APPLICATION_JSON})
+@Consumes({MediaType.APPLICATION_JSON})
+@StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented")})
+public Response createLoadBalancerPoolMember( INeutronLoadBalancerPoolMemberRequest input) {
+
+ INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
+ this);
+ if (loadBalancerPoolMemberInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (input.isSingleton()) {
+ NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+
+ /*
+ * Verify that the LoadBalancerPoolMember doesn't already exist.
+ */
+ if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
+ singleton.getPoolMemberID())) {
+ throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+ }
+ loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPoolMember(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ service.neutronLoadBalancerPoolMemberCreated(singleton);
+ }
+ }
+ } else {
+ List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
+ Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
+ HashMap<String, NeutronLoadBalancerPoolMember> testMap = new HashMap<String, NeutronLoadBalancerPoolMember>();
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember test = i.next();
+
+ /*
+ * Verify that the firewall doesn't already exist
+ */
+
+ if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
+ test.getPoolMemberID())) {
+ throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+ }
+ if (testMap.containsKey(test.getPoolMemberID())) {
+ throw new BadRequestException("Load Balancer PoolMember UUID already exists");
+ }
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPoolMember(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ }
+ /*
+ * now, each element of the bulk request can be added to the cache
+ */
+ i = bulk.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPoolMember test = i.next();
+ loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+ service.neutronLoadBalancerPoolMemberCreated(test);
+ }
+ }
+ }
+ }
+ return Response.status(201).entity(input).build();
+}
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancerPool Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerPool Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/pools")
+public class NeutronLoadBalancerPoolNorthbound {
+
+ private NeutronLoadBalancerPool extractFields(NeutronLoadBalancerPool o, List<String> fields) {
+ return o.extractFields(fields);
+ }
+
+ /**
+ * Returns a list of all LoadBalancerPool
+ * */
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+
+ public Response listGroups(
+ // return fields
+ @QueryParam("fields") List<String> fields,
+ // OpenStack LoadBalancerPool attributes
+ @QueryParam("id") String queryLoadBalancerPoolID,
+ @QueryParam("tenant_id") String queryLoadBalancerPoolTenantID,
+ @QueryParam("name") String queryLoadBalancerPoolName,
+ @QueryParam("description") String queryLoadBalancerDescription,
+ @QueryParam("protocol") String queryLoadBalancerProtocol,
+ @QueryParam("lb_algorithm") String queryLoadBalancerPoolLbAlgorithm,
+ @QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
+ @QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
+ @QueryParam("status") String queryLoadBalancerPoolStatus,
+ @QueryParam("members") List queryLoadBalancerPoolMembers,
+ // pagination
+ @QueryParam("limit") String limit,
+ @QueryParam("marker") String marker,
+ @QueryParam("page_reverse") String pageReverse
+ // sorting not supported
+ ) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ List<NeutronLoadBalancerPool> allLoadBalancerPools = loadBalancerPoolInterface.getAllNeutronLoadBalancerPools();
+ List<NeutronLoadBalancerPool> ans = new ArrayList<NeutronLoadBalancerPool>();
+ Iterator<NeutronLoadBalancerPool> i = allLoadBalancerPools.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPool nsg = i.next();
+ if ((queryLoadBalancerPoolID == null ||
+ queryLoadBalancerPoolID.equals(nsg.getLoadBalancerPoolID())) &&
+ (queryLoadBalancerPoolTenantID == null ||
+ queryLoadBalancerPoolTenantID.equals(nsg.getLoadBalancerPoolTenantID())) &&
+ (queryLoadBalancerPoolName == null ||
+ queryLoadBalancerPoolName.equals(nsg.getLoadBalancerPoolName())) &&
+ (queryLoadBalancerDescription == null ||
+ queryLoadBalancerDescription.equals(nsg.getLoadBalancerPoolDescription())) &&
+ (queryLoadBalancerPoolLbAlgorithm == null ||
+ queryLoadBalancerPoolLbAlgorithm.equals(nsg.getLoadBalancerPoolLbAlgorithm())) &&
+ (queryLoadBalancerPoolHealthMonitorID == null ||
+ queryLoadBalancerPoolHealthMonitorID.equals(nsg.getNeutronLoadBalancerPoolHealthMonitorID())) &&
+ (queryLoadBalancerIsAdminStateUp == null ||
+ queryLoadBalancerIsAdminStateUp.equals(nsg.getLoadBalancerPoolAdminIsStateIsUp())) &&
+ (queryLoadBalancerPoolStatus == null ||
+ queryLoadBalancerPoolStatus.equals(nsg.getLoadBalancerPoolStatus())) &&
+ (queryLoadBalancerPoolMembers.size() == 0 ||
+ queryLoadBalancerPoolMembers.equals(nsg.getLoadBalancerPoolMembers()))) {
+ if (fields.size() > 0) {
+ ans.add(extractFields(nsg,fields));
+ } else {
+ ans.add(nsg);
+ }
+ }
+ }
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolRequest(ans)).build();
+ }
+
+ /**
+ * Returns a specific LoadBalancerPool */
+
+ @Path("{loadBalancerPoolID}")
+ @GET
+ @Produces({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response showLoadBalancerPool(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+ // return fields
+ @QueryParam("fields") List<String> fields) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolID)) {
+ throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+ }
+ if (fields.size() > 0) {
+ NeutronLoadBalancerPool ans = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+ return Response.status(200).entity(
+ new NeutronLoadBalancerPoolRequest(extractFields(ans, fields))).build();
+ } else {
+ return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
+ }
+ }
+
+ /**
+ * Creates new LoadBalancerPool */
+
+ @POST
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 201, condition = "Created"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 409, condition = "Conflict"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response createLoadBalancerPools(final NeutronLoadBalancerPoolRequest input) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+ if (input.isSingleton()) {
+ NeutronLoadBalancerPool singleton = input.getSingleton();
+
+ /*
+ * Verify that the LoadBalancerPool doesn't already exist.
+ */
+ if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(singleton.getLoadBalancerPoolID())) {
+ throw new BadRequestException("LoadBalancerPool UUID already exists");
+ }
+ loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPool(singleton);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolCreated(singleton);
+ }
+ }
+ } else {
+ List<NeutronLoadBalancerPool> bulk = input.getBulk();
+ Iterator<NeutronLoadBalancerPool> i = bulk.iterator();
+ HashMap<String, NeutronLoadBalancerPool> testMap = new HashMap<String, NeutronLoadBalancerPool>();
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ while (i.hasNext()) {
+ NeutronLoadBalancerPool test = i.next();
+
+ /*
+ * Verify that the firewall doesn't already exist
+ */
+
+ if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
+ throw new BadRequestException("Load Balancer Pool UUID already is already created");
+ }
+ if (testMap.containsKey(test.getLoadBalancerPoolID())) {
+ throw new BadRequestException("Load Balancer Pool UUID already exists");
+ }
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canCreateNeutronLoadBalancerPool(test);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+ }
+ /*
+ * now, each element of the bulk request can be added to the cache
+ */
+ i = bulk.iterator();
+ while (i.hasNext()) {
+ NeutronLoadBalancerPool test = i.next();
+ loadBalancerPoolInterface.addNeutronLoadBalancerPool(test);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolCreated(test);
+ }
+ }
+ }
+ }
+ return Response.status(201).entity(input).build();
+ }
+
+ /**
+ * Updates a LoadBalancerPool Policy
+ */
+ @Path("{loadBalancerPoolID}")
+ @PUT
+ @Produces({ MediaType.APPLICATION_JSON })
+ @Consumes({ MediaType.APPLICATION_JSON })
+ @StatusCodes({
+ @ResponseCode(code = 200, condition = "Operation successful"),
+ @ResponseCode(code = 400, condition = "Bad Request"),
+ @ResponseCode(code = 401, condition = "Unauthorized"),
+ @ResponseCode(code = 403, condition = "Forbidden"),
+ @ResponseCode(code = 404, condition = "Not Found"),
+ @ResponseCode(code = 501, condition = "Not Implemented") })
+ public Response updateLoadBalancerPool(
+ @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerPoolRequest input) {
+ INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+ if (loadBalancerPoolInterface == null) {
+ throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+ + RestMessages.SERVICEUNAVAILABLE.toString());
+ }
+
+ /*
+ * verify the LoadBalancerPool exists and there is only one delta provided
+ */
+ if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolID)) {
+ throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+ }
+ if (!input.isSingleton()) {
+ throw new BadRequestException("Only singleton edit supported");
+ }
+ NeutronLoadBalancerPool delta = input.getSingleton();
+ NeutronLoadBalancerPool original = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+
+ /*
+ * updates restricted by Neutron
+ */
+ if (delta.getLoadBalancerPoolID() != null ||
+ delta.getLoadBalancerPoolTenantID() != null ||
+ delta.getLoadBalancerPoolName() != null ||
+ delta.getLoadBalancerPoolDescription() != null ||
+ delta.getLoadBalancerPoolProtocol() != null ||
+ delta.getLoadBalancerPoolLbAlgorithm() != null ||
+ delta.getNeutronLoadBalancerPoolHealthMonitorID() != null ||
+ delta.getLoadBalancerPoolAdminIsStateIsUp() != null ||
+ delta.getLoadBalancerPoolStatus() != null ||
+ delta.getLoadBalancerPoolMembers() != null) {
+ throw new BadRequestException("Attribute edit blocked by Neutron");
+ }
+
+ Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ int status = service.canUpdateNeutronLoadBalancerPool(delta, original);
+ if (status < 200 || status > 299) {
+ return Response.status(status).build();
+ }
+ }
+ }
+
+ /*
+ * update the object and return it
+ */
+ loadBalancerPoolInterface.updateNeutronLoadBalancerPool(loadBalancerPoolID, delta);
+ NeutronLoadBalancerPool updatedLoadBalancerPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+ if (instances != null) {
+ for (Object instance : instances) {
+ INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+ service.neutronLoadBalancerPoolUpdated(updatedLoadBalancerPool);
+ }
+ }
+ return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerPoolRequest {
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="pool")
+ NeutronLoadBalancerPool singletonLoadBalancerPool;
+
+ @XmlElement(name="pools")
+ List<NeutronLoadBalancerPool> bulkRequest;
+
+ NeutronLoadBalancerPoolRequest() {
+ }
+
+ NeutronLoadBalancerPoolRequest(List<NeutronLoadBalancerPool> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancerPool = null;
+ }
+
+ NeutronLoadBalancerPoolRequest(NeutronLoadBalancerPool group) {
+ singletonLoadBalancerPool = group;
+ }
+
+ public List<NeutronLoadBalancerPool> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancerPool getSingleton() {
+ return singletonLoadBalancerPool;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancerPool != null);
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerRequest {
+ /**
+ * See OpenStack Network API v2.0 Reference for description of
+ * http://docs.openstack.org/api/openstack-network/2.0/content/
+ */
+
+ @XmlElement(name="loadbalancer")
+ NeutronLoadBalancer singletonLoadBalancer;
+
+ @XmlElement(name="loadbalancers")
+ List<NeutronLoadBalancer> bulkRequest;
+
+ NeutronLoadBalancerRequest() {
+ }
+
+ NeutronLoadBalancerRequest(List<NeutronLoadBalancer> bulk) {
+ bulkRequest = bulk;
+ singletonLoadBalancer = null;
+ }
+
+ NeutronLoadBalancerRequest(NeutronLoadBalancer group) {
+ singletonLoadBalancer = group;
+ }
+
+ public List<NeutronLoadBalancer> getBulk() {
+ return bulkRequest;
+ }
+
+ public NeutronLoadBalancer getSingleton() {
+ return singletonLoadBalancer;
+ }
+
+ public boolean isSingleton() {
+ return (singletonLoadBalancer != null);
+ }
+}
\ No newline at end of file
classes.add(NeutronFirewallNorthbound.class);
classes.add(NeutronFirewallPolicyNorthbound.class);
classes.add(NeutronFirewallRulesNorthbound.class);
+ classes.add(NeutronLoadBalancerNorthbound.class);
+ classes.add(NeutronLoadBalancerListenerNorthbound.class);
+ classes.add(NeutronLoadBalancerPoolNorthbound.class);
+ classes.add(NeutronLoadBalancerHealthMonitorNorthbound.class);
+ classes.add(NeutronLoadBalancerPoolMembersNorthbound.class);
return classes;
}
if (input.getPortUUID() != null &&
input.getSubnetUUID() == null) {
NeutronRouter_Interface targetInterface = target.getInterfaces().get(input.getPortUUID());
+ if (targetInterface == null) {
+ throw new ResourceNotFoundException("Router interface not found for given Port UUID");
+ }
input.setSubnetUUID(targetInterface.getSubnetUUID());
input.setID(target.getID());
input.setTenantID(target.getTenantID());
throw new ResourceNotFoundException("Port UUID not found");
}
if (port.getFixedIPs() == null) {
- throw new ResourceNotFoundException("Port UUID jas no fixed IPs");
+ throw new ResourceNotFoundException("Port UUID has no fixed IPs");
}
NeutronSubnet subnet = subnetInterface.getSubnet(input.getSubnetUUID());
if (subnet == null) {
<module>opendaylight/commons/parent</module>
<module>opendaylight/commons/logback_settings</module>
<module>opendaylight/commons/filter-valve</module>
+ <module>opendaylight/commons/liblldp</module>
<!-- Karaf Distribution -->
<module>opendaylight/dummy-console</module>