<parent>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>commons.opendaylight</artifactId>\r
- <version>1.4.2-SNAPSHOT</version>\r
+ <version>1.5.0-SNAPSHOT</version>\r
<relativePath>../../opendaylight/commons/opendaylight</relativePath>\r
</parent>\r
<artifactId>features-adsal-compatibility</artifactId>\r
<packaging>jar</packaging>\r
<properties>\r
<features.file>features.xml</features.file>\r
- <feature.test.version>0.6.2-SNAPSHOT</feature.test.version>\r
+ <feature.test.version>0.7.0-SNAPSHOT</feature.test.version>\r
</properties>\r
<dependencies>\r
<!--\r
<dependency>\r
<groupId>org.opendaylight.yangtools</groupId>\r
<artifactId>features-yangtools</artifactId>\r
- <version>0.6.2-SNAPSHOT</version>\r
+ <version>0.7.0-SNAPSHOT</version>\r
<classifier>features</classifier>\r
<type>xml</type>\r
</dependency>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>features-mdsal</artifactId>\r
- <version>1.1-SNAPSHOT</version>\r
+ <version>1.2.0-SNAPSHOT</version>\r
<classifier>features</classifier>\r
<type>xml</type>\r
</dependency>\r
<dependency>\r
<groupId>org.opendaylight.openflowplugin</groupId>\r
<artifactId>features-openflowplugin</artifactId>\r
- <version>0.0.3-SNAPSHOT</version>\r
+ <version>0.1.0-SNAPSHOT</version>\r
<classifier>features</classifier>\r
<type>xml</type>\r
</dependency>\r
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-adsal</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<!-- Bundle dependencies -->
<feature version="${sal.version}">odl-adsal-core</feature>
<bundle>mvn:org.opendaylight.controller/clustering.services/${clustering.services.version}</bundle>
<bundle>mvn:org.opendaylight.controller/clustering.services-implementation/${clustering.services_implementation.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/clustering.stub/${clustering.stub.version}</bundle>
</feature>
<feature name="odl-adsal-configuration" description="OpenDaylight :: AD-SAL :: Configuration" version="${configuration.version}">
<feature version="${sal.version}">odl-adsal-core</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-akka</artifactId>
<features.file>features.xml</features.file>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.0.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.4.2-SNAPSHOT</karaf.resources.version>
+ <branding.version>1.1.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
<karaf.version>3.0.1</karaf.version>
- <feature.test.version>0.6.2-SNAPSHOT</feature.test.version>
- <karaf.empty.version>1.4.2-SNAPSHOT</karaf.empty.version>
+ <feature.test.version>0.7.0-SNAPSHOT</feature.test.version>
+ <karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.6.2-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.1-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.0.3-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
-->
<feature name='odl-akka-all' version='${project.version}' description='OpenDaylight :: Akka :: All'>
<!--
* Basic MD-SAL Provider
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-controller-model</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='0.0.3-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-base</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>karaf-tomcat-security</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
</dependency>
<!-- dependency for opendaylight-karaf-empty for use by testing -->
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
</dependencies>
<feature>odl-base-jackson</feature>
<feature>odl-base-spring-security</feature>
</feature>
- <feature name="odl-base-dummy-console" description="Temporary Dummy Console" version="1.1.0-SNAPSHOT">
- <bundle>mvn:org.opendaylight.controller/dummy-console/1.1.0-SNAPSHOT</bundle>
+ <feature name="odl-base-dummy-console" description="Temporary Dummy Console" version="1.2.0-SNAPSHOT">
+ <bundle>mvn:org.opendaylight.controller/dummy-console/1.2.0-SNAPSHOT</bundle>
</feature>
<feature name="odl-base-felix-dm" description="Felix Dependency Manager" version="${felix.dependencymanager.version}">
<bundle>mvn:org.osgi/org.osgi.compendium/${osgi.compendium.version}</bundle>
<bundle>wrap:mvn:io.netty/netty-common/${netty.version}</bundle>
<bundle>wrap:mvn:io.netty/netty-handler/${netty.version}</bundle>
<bundle>wrap:mvn:io.netty/netty-codec-http/${netty.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/1.1-SNAPSHOT</bundle>
</feature>
<feature name="odl-base-jersey" description="Jersey" version="${jersey.version}">
<feature>odl-base-gemini-web</feature>
<feature name="odl-base-eclipselink-persistence" description="EclipseLink Persistence API" version="2.0.4.v201112161009">
<bundle start="true">mvn:eclipselink/javax.persistence/2.0.4.v201112161009</bundle>
<bundle start="true">mvn:eclipselink/javax.resource/1.5.0.v200906010428</bundle>
+ <bundle start="true">mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/2.5.0</bundle>
<bundle start="true">mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/2.5.0</bundle>
<bundle start="true">mvn:org.eclipse.persistence/org.eclipse.persistence.core/2.5.0</bundle>
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config-netty</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config-persister</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/config/</relativePath>
</parent>
<artifactId>features-config</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>controller-features</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>extras-features</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<artifactId>features-flow</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<artifactId>features-mdsal</artifactId>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
</dependency>
</dependencies>
<feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
<feature version='${project.version}'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-mdsal-clustering</feature>
- <feature version='${project.version}'>odl-restconf</feature>
<feature version='${project.version}'>odl-mdsal-xsql</feature>
<feature version='${project.version}'>odl-toaster</feature>
</feature>
<bundle>mvn:org.opendaylight.controller/sal-inmemory-datastore/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.mdsal.configfile}">mvn:org.opendaylight.controller/md-sal-config/${mdsal.version}/xml/config</configfile>
</feature>
- <feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
- <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
- <feature>war</feature>
- <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
- has a yang file which augments a yang file in sal-remote, and order seems to matter -->
- <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
- <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
- <bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
- <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty-codec-http/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
- <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
- <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
- </feature>
<feature name='odl-toaster' version='${project.version}' description="OpenDaylight :: Toaster">
<feature version='${yangtools.version}'>odl-yangtools-common</feature>
<feature version='${yangtools.version}'>odl-yangtools-binding</feature>
<bundle>mvn:org.opendaylight.controller/sal-karaf-xsql/${project.version}</bundle>
<configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
</feature>
- <feature name ='odl-mdsal-apidocs' version='${project.version}'>
- <feature version='${project.version}'>odl-restconf</feature>
- <bundle>mvn:org.opendaylight.controller/sal-rest-docgen/${project.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.core/jackson-annotations/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.core/jackson-core/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.core/jackson-databind/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.datatype/jackson-datatype-json-org/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.module/jackson-module-jaxb-annotations/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.jaxrs/jackson-jaxrs-base/${jackson.version}</bundle>
- <bundle>mvn:com.fasterxml.jackson.jaxrs/jackson-jaxrs-json-provider/${jackson.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
- <bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
- <bundle>wrap:mvn:org.json/json/${org.json.version}</bundle>
- </feature>
<feature name ='odl-mdsal-clustering-commons' version='${project.version}'>
<feature version='${project.version}'>odl-mdsal-broker</feature>
<feature version='${akka.version}'>odl-akka-system</feature>
<configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
<configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
</feature>
-
- <feature name='odl-clustering-test-app' version='${project.version}'>
- <feature version='${project.version}'>odl-mdsal-clustering</feature>
- <feature version='${project.version}'>odl-restconf</feature>
- <feature version='${yangtools.version}'>odl-yangtools-models</feature>
- <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
- <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
- <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
- <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
- </feature>
</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../../opendaylight/md-sal</relativePath>
</parent>
<!--
-->
<artifactId>features-netconf-connector</artifactId>
<!-- Optional TODO: Uncomment version if you are not using a parent pom.xml
- <version>1.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
-->
<packaging>jar</packaging>
<properties>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
Optional TODO: Remove TODO comments.
-->
<!-- test to validate features.xml -->
+ <!--FIXME BUG-2195 When running single feature tests for netconf connector, features including ssh proxy server always fail (this behavior does not appear when running karaf distro directly)-->
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-test</artifactId>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.6.2-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.1-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.0.3-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
-->
<repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
<repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
* Basic MD-SAL Provider
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${project.version}'>odl-controller-model</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
- <feature version='0.0.3-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
... whatever other bundles you need
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../../opendaylight/netconf</relativePath>
</parent>
<artifactId>features-netconf</artifactId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-netty-util</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<feature version='${project.version}'>odl-netconf-mapping-api</feature>
<feature version='${project.version}'>odl-netconf-util</feature>
<bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
- <bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/${ganymed.version}</bundle>
<bundle>mvn:org.apache.sshd/sshd-core/${sshd-core.version}</bundle>
<bundle>mvn:org.openexi/nagasena/${exi.nagasena.version}</bundle>
<bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-nsf</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-empty</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<type>zip</type>
</dependency>
<!-- Feature Dependencies -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-controller</artifactId>
<module>adsal-compatibility</module>
<module>akka</module>
<module>netconf-connector</module>
+ <module>restconf</module>
</modules>
</project>
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>features-protocol-framework</artifactId>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Necessary TODO: Put your copyright here.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+--><project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-parent</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <relativePath>../../opendaylight/md-sal</relativePath>
+ </parent>
+ <!--
+ Necessary TODO: Hookup your parent pom here, else you will not get necessary versions,
+ maven repos etc. If you run this archetype in a subdirectory of your project, it
+ will pick the pom.xml from the parent directory as the parent pom, which may or may
+ not be correct.
+ -->
+ <artifactId>features-restconf</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <!-- Optional TODO: Uncomment version if you are not using a parent pom.xml
+ <version>1.1.0-SNAPSHOT</version>
+ -->
+ <packaging>jar</packaging>
+ <properties>
+ <features.file>features.xml</features.file>
+ </properties>
+ <dependencies>
+ <!--
+ Necessary TODO: Put dependencies on any feature repos
+ you use in your features.xml file.
+
+ Note: they will need to be <type>xml</xml>
+ and <classifier>features</classifier>.
+ One other thing to watch for is to make sure they are
+ <scope>compile</compile>, which they should be by default,
+ but be cautious lest they be at a different scope in a parent pom.
+
+ Examples:
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <version>0.7.0-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-mdsal</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.openflowplugin</groupId>
+ <artifactId>features-openflowplugin</artifactId>
+ <version>0.1.0-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ -->
+
+ <!--
+ Necessary TODO: Put dependencies for bundles directly referenced
+ in your features.xml file. For every <bundle> reference in your
+ features.xml file, you need a corresponding dependency here.
+
+ Examples:
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-provider</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-model</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ -->
+
+ <!--
+ Necessary TODO: Put dependencies for configfiles directly referenced
+ in your features.xml file. For every <configfile> reference in your
+ features.xml file, you need a corresponding dependency here.
+
+ Example (presuming here version is coming from the parent pom):
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>controller-config</artifactId>
+ <version>${project.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ -->
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-yangtools</artifactId>
+ <version>${yangtools.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-mdsal</artifactId>
+ <version>${mdsal.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.aaa</groupId>
+ <artifactId>features-aaa</artifactId>
+ <version>${aaa.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-remote</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-rest-connector</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.google.code.gson</groupId>
+ <artifactId>gson</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.sun.jersey</groupId>
+ <artifactId>jersey-servlet</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-buffer</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-codec-http</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-handler</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
+ <artifactId>netty-transport</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-rest-connector-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-core</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.datatype</groupId>
+ <artifactId>jackson-datatype-json-org</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.module</groupId>
+ <artifactId>jackson-module-jaxb-annotations</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.jaxrs</groupId>
+ <artifactId>jackson-jaxrs-base</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.jaxrs</groupId>
+ <artifactId>jackson-jaxrs-json-provider</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.json</groupId>
+ <artifactId>json</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-data-codec-gson</artifactId>
+ </dependency>
+
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-model</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-provider</artifactId>
+ <version>${mdsal.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>config</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleshardconf</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>clustering-it-config</artifactId>
+ <version>${mdsal.version}</version>
+ <type>xml</type>
+ <classifier>testmoduleconf</classifier>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-rest-docgen</artifactId>
+ </dependency>
+
+ <!--
+ Optional TODO: Remove TODO comments.
+ -->
+ <!-- test to validate features.xml -->
+ <dependency>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>features-test</artifactId>
+ <version>${yangtools.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <!-- dependency for opendaylight-karaf-empty for use by testing -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>opendaylight-karaf-empty</artifactId>
+ <version>${commons.opendaylight.version}</version>
+ <type>zip</type>
+ </dependency>
+ <!-- Uncomment this if you get an error : java.lang.NoSuchMethodError: org.slf4j.helpers.MessageFormatter.format(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Object;)Lorg/slf4j/helpers/FormattingTuple;
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-simple</artifactId>
+ <version>1.7.2</version>
+ </dependency>
+ -->
+
+ </dependencies>
+ <build>
+ <resources>
+ <resource>
+ <directory>src/main/resources</directory>
+ <filtering>true</filtering>
+ </resource>
+ </resources>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-resources-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>filter</id>
+ <phase>generate-resources</phase>
+ <goals>
+ <goal>resources</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>build-helper-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>attach-artifacts</id>
+ <phase>package</phase>
+ <goals>
+ <goal>attach-artifact</goal>
+ </goals>
+ <configuration>
+ <artifacts>
+ <artifact>
+ <file>${project.build.directory}/classes/${features.file}</file>
+ <type>xml</type>
+ <classifier>features</classifier>
+ </artifact>
+ </artifacts>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${surefire.version}</version>
+ <configuration>
+ <systemPropertyVariables>
+ <karaf.distro.groupId>org.opendaylight.controller</karaf.distro.groupId>
+ <karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
+ <karaf.distro.version>${commons.opendaylight.version}</karaf.distro.version>
+ </systemPropertyVariables>
+ <dependenciesToScan>
+ <dependency>org.opendaylight.yangtools:features-test</dependency>
+ </dependenciesToScan>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ <scm>
+ <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+ <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+ <tag>HEAD</tag>
+ <url>https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=summary</url>
+ </scm>
+</project>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<features name="odl-controller-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+ <!--
+ Necessary TODO: Please read the features guidelines:
+ https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Feature_Best_Practices
+ -->
+ <!--
+ Necessary TODO: Add repo entries for the repositories of features you refer to
+ in this feature file but do not define here.
+ Examples:
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
+ -->
+ <repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
+ <repository>mvn:org.opendaylight.aaa/features-aaa/${aaa.version}/xml/features</repository>
+ <feature name='odl-restconf-all' version='${project.version}' description='OpenDaylight :: Restconf :: All'>
+ <!--
+ Necessary TODO:
+ List all of the user consumable features you define in this feature file here.
+ Generally you would *not* list individual bundles here, but only features defined in *this* file.
+ It is useful to list them in the same order they occur in the file.
+
+ Examples:
+ <feature version='${project.version}'>odl-controller-provider</feature>
+ <feature version='${project.version}'>odl-controller-model</feature>
+ -->
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${project.version}'>odl-mdsal-apidocs</feature>
+ <feature version='${project.version}'>odl-clustering-test-app</feature>
+ </feature>
+ <!--
+ Necessary TODO: Define your features. It is useful to list then in order of dependency. So if A depends on B, list A first.
+ When naming your features please be mindful of the guidelines:
+ https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines
+ Particularly:
+ a) Prefixing names with 'odl-': https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Feature_Naming
+ b) Descriptions: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Description
+ c) Avoid start-levels: https://wiki.opendaylight.org/view/Runtime:Karaf_Features_Guidelines#Avoid_start-levels
+
+ It's also nice to list inside a feature, first the features it needs, then the bundles it needs, then the configfiles.
+ Examples:
+
+ * Basic MD-SAL Provider
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider '>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='${project.version}'>odl-controller-model</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ * Basic MD-SAL Model feature
+ <feature name='odl-controller-model' version='${project.version}' description='OpenDaylight :: controller :: Model'>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-model/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ * Config Subsystem example - the config file is your config subsystem configuration
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ <configfile finalname="etc/opendaylight/karaf/80-controller.xml">mvn:org.opendaylight.controller/controller-config/${project.version}/xml/config</configfile>
+ ... whatever other bundles you need
+ </feature>
+
+ * Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
+ <feature name='odl-controller-provider' version='${project.version}' description='OpenDaylight :: controller :: Provider'>
+ <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <bundle>mvn:org.opendaylight.controller/controller-provider/${project.version}</bundle>
+ ... whatever other bundles you need
+ </feature>
+
+ -->
+
+ <feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
+ <feature version='${aaa.version}'>odl-aaa-authn</feature>
+ <feature version='${mdsal.version}'>odl-restconf-noauth</feature>
+ </feature>
+ <feature name='odl-restconf-noauth' version='${project.version}' description="OpenDaylight :: Restconf">
+ <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+ <feature>war</feature>
+ <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
+ has a yang file which augments a yang file in sal-remote, and order seems to matter -->
+ <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
+ <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
+ <bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
+ <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-codec-http/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
+ <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
+ </feature>
+ <feature name ='odl-mdsal-apidocs' version='${project.version}' description="OpenDaylight :: MDSAL :: APIDOCS">
+ <feature version='${project.version}'>odl-restconf</feature>
+ <bundle>mvn:org.opendaylight.controller/sal-rest-docgen/${project.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.core/jackson-annotations/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.core/jackson-core/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.core/jackson-databind/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.datatype/jackson-datatype-json-org/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.module/jackson-module-jaxb-annotations/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.jaxrs/jackson-jaxrs-base/${jackson.version}</bundle>
+ <bundle>mvn:com.fasterxml.jackson.jaxrs/jackson-jaxrs-json-provider/${jackson.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
+ <bundle>mvn:com.sun.jersey/jersey-servlet/${jersey.version}</bundle>
+ <bundle>wrap:mvn:org.json/json/${org.json.version}</bundle>
+ </feature>
+
+ <feature name='odl-clustering-test-app' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-clustering</feature>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+ <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+ <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+ <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+ <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+ </feature>
+
+ <feature name='odl-toaster-rest' version='${project.version}'>
+ <feature version='${project.version}'>odl-restconf</feature>
+ <feature version='${project.version}'>odl-toaster</feature>
+ </feature>
+
+ <feature name='odl-toaster-ui' version='${project.version}'>
+ <feature version='${project.version}'>odl-mdsal-apidocs</feature>
+ <feature version='${project.version}'>odl-mdsal-xsql</feature>
+ <feature version='${project.version}'>odl-toaster-rest</feature>
+ </feature>
+ <!-- Optional TODO: Remove TODO Comments -->
+
+</features>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>itests-controller</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>base-features-it</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../opendaylight/commons/opendaylight</relativePath>
</parent>
<artifactId>itests-controller</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>appauth</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>odl-model-project</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<properties>
<nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>\r
<nexus.repository.release>opendaylight.release</nexus.repository.release>
<nexus.repository.snapshot>opendaylight.release</nexus.repository.snaphot>
- <yang.version>0.6.2-SNAPSHOT</yang.version>\r
- <yang.codegen.version>0.6.2-SNAPSHOT</yang.codegen.version>\r
+ <yang.version>0.7.0-SNAPSHOT</yang.version>\r
+ <yang.codegen.version>0.7.0-SNAPSHOT</yang.codegen.version>\r
<bundle.plugin.version>2.3.7</bundle.plugin.version>\r
</properties>\r
<scm>\r
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-configfile-archetype</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
-
+ <parent>
+ <groupId>org.opendaylight.controller.archetypes</groupId>
+ <artifactId>archetypes-parent</artifactId>
+ <version>0.2.0-SNAPSHOT</version>
+ </parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-distro-archetype</artifactId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>distribution-karaf-archetype</name>
<properties>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.0.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.4.2-SNAPSHOT</karaf.resources.version>
+ <branding.version>1.1.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
<karaf.version>3.0.1</karaf.version>
</properties>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
<scope>runtime</scope>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.archetypes</groupId>
+ <artifactId>archetypes-parent</artifactId>
+ <version>0.2.0-SNAPSHOT</version>
+ </parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>opendaylight-karaf-features-archetype</artifactId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>opendaylight-karaf-features-archetype</name>
<features.file>features.xml</features.file>
<!-- Optional TODO: Move these properties to your parent pom and possibly
DependencyManagement section of your parent pom -->
- <branding.version>1.0.0-SNAPSHOT</branding.version>
- <karaf.resources.version>1.4.2-SNAPSHOT</karaf.resources.version>
+ <branding.version>1.1.0-SNAPSHOT</branding.version>
+ <karaf.resources.version>1.5.0-SNAPSHOT</karaf.resources.version>
<karaf.version>3.0.1</karaf.version>
- <feature.test.version>0.6.2-SNAPSHOT</feature.test.version>
- <karaf.empty.version>1.4.2-SNAPSHOT</karaf.empty.version>
+ <feature.test.version>0.7.0-SNAPSHOT</feature.test.version>
+ <karaf.empty.version>1.5.0-SNAPSHOT</karaf.empty.version>
<surefire.version>2.16</surefire.version>
</properties>
<dependencies>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>features-yangtools</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-mdsal</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
<dependency>
<groupId>org.opendaylight.openflowplugin</groupId>
<artifactId>features-openflowplugin</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<classifier>features</classifier>
<type>xml</type>
</dependency>
Necessary TODO: Add repo entries for the repositories of features you refer to
in this feature file but do not define here.
Examples:
- <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.6.2-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.controller/features-mdsal/1.1-SNAPSHOT/xml/features</repository>
- <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.0.3-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.yangtools/features-yangtools/0.7.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.controller/features-mdsal/1.2.0-SNAPSHOT/xml/features</repository>
+ <repository>mvn:org.opendaylight.openflowplugin/features-openflowplugin/0.1.0-SNAPSHOT/xml/features</repository>
-->
<feature name='odl-${repoName}-all' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: All'>
<!--
* Basic MD-SAL Provider
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider '>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<feature version='${symbol_dollar}{project.version}'>odl-${repoName}-model</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
* Basic MD-SAL Model feature
<feature name='odl-${repoName}-model' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Model'>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-binding</feature>
- <feature version='0.6.2-SNAPSHOT'>odl-yangtools-models</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-binding</feature>
+ <feature version='0.7.0-SNAPSHOT'>odl-yangtools-models</feature>
<bundle>mvn:${groupId}/${repoName}-model/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
</feature>
* Config Subsystem example - the config file is your config subsystem configuration
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider'>
- <feature version='1.1-SNAPSHOT'>odl-mdsal-broker</feature>
+ <feature version='1.2.0-SNAPSHOT'>odl-mdsal-broker</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
<configfile finalname="etc/opendaylight/karaf/80-${repoName}.xml">mvn:${groupId}/${repoName}-config/${symbol_dollar}{project.version}/xml/config</configfile>
... whatever other bundles you need
* Basic MD-SAL Provider that uses openflowplugin-flow-services (which brings along odl-mdsal-broker)
<feature name='odl-${repoName}-provider' version='${symbol_dollar}{project.version}' description='OpenDaylight :: ${repoName} :: Provider'>
- <feature version='0.0.3-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
+ <feature version='0.1.0-SNAPSHOT'>odl-openflowplugin-flow-services</feature>
<bundle>mvn:${groupId}/${repoName}-provider/${symbol_dollar}{project.version}</bundle>
... whatever other bundles you need
</feature>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<scm>
</scm>
<groupId>org.opendaylight.controller.archetypes</groupId>
<artifactId>archetypes-parent</artifactId>
- <version>0.1.1-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<distributionManagement>
<!-- OpenDayLight Released artifact -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>arphandler</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>clustering.services.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services</artifactId>
- <version>0.5.1-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.services-implementation</artifactId>
- <version>0.4.3-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<!-- Sonar properties using jacoco to retrieve integration test results -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.stub</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>clustering.test</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>concepts</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../opendaylight</relativePath>
</parent>
<artifactId>filter-valve</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.httpclient</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencies>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
</plugin>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../opendaylight</relativePath>
</parent>
<artifactId>liblldp</artifactId>
- <version>0.8.1-SNAPSHOT</version>
+ <version>0.9.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.0.2-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<relativePath>../parent</relativePath>
</parent>
<artifactId>commons.logback_settings</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
<developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
<parent>
<groupId>org.opendaylight.odlparent</groupId>
<artifactId>odlparent</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath></relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
<akka.version>2.3.4</akka.version>
<aopalliance.version>1.0.0</aopalliance.version>
- <appauth.version>0.4.2-SNAPSHOT</appauth.version>
- <archetype-app-northbound>0.0.1-SNAPSHOT</archetype-app-northbound>
+ <appauth.version>0.5.0-SNAPSHOT</appauth.version>
+ <archetype-app-northbound>0.1.0-SNAPSHOT</archetype-app-northbound>
<aries.util.version>1.1.0</aries.util.version>
- <arphandler.version>0.5.2-SNAPSHOT</arphandler.version>
+ <arphandler.version>0.6.0-SNAPSHOT</arphandler.version>
<!-- Controller Modules Versions -->
<asm.version>4.1</asm.version>
<!-- Plugin Versions -->
<bouncycastle.version>1.50</bouncycastle.version>
<bundle.plugin.version>2.4.0</bundle.plugin.version>
- <bundlescanner.api.version>0.4.2-SNAPSHOT</bundlescanner.api.version>
- <bundlescanner.implementation.version>0.4.2-SNAPSHOT</bundlescanner.implementation.version>
- <bundlescanner.version>0.4.2-SNAPSHOT</bundlescanner.version>
+ <bundlescanner.api.version>0.5.0-SNAPSHOT</bundlescanner.api.version>
+ <bundlescanner.implementation.version>0.5.0-SNAPSHOT</bundlescanner.implementation.version>
+ <bundlescanner.version>0.5.0-SNAPSHOT</bundlescanner.version>
<checkstyle.version>2.12</checkstyle.version>
- <clustering.services.version>0.5.1-SNAPSHOT</clustering.services.version>
- <clustering.services_implementation.version>0.4.3-SNAPSHOT</clustering.services_implementation.version>
- <clustering.stub.version>0.4.2-SNAPSHOT</clustering.stub.version>
- <clustering.test.version>0.4.2-SNAPSHOT</clustering.test.version>
- <commmons.northbound.version>0.4.2-SNAPSHOT</commmons.northbound.version>
+ <clustering.services.version>0.6.0-SNAPSHOT</clustering.services.version>
+ <clustering.services_implementation.version>0.5.0-SNAPSHOT</clustering.services_implementation.version>
+ <clustering.stub.version>0.5.0-SNAPSHOT</clustering.stub.version>
+ <clustering.test.version>0.5.0-SNAPSHOT</clustering.test.version>
+ <commmons.northbound.version>0.5.0-SNAPSHOT</commmons.northbound.version>
<!-- Third Party Versions -->
<codahale.metrics.version>3.0.1</codahale.metrics.version>
<commons.tomcat.api>7.0.53.v201406060720</commons.tomcat.api>
<commons.tomcat.util>7.0.53.v201406070630</commons.tomcat.util>
- <commons.checkstyle.version>0.0.3-SNAPSHOT</commons.checkstyle.version>
+ <commons.checkstyle.version>0.1.0-SNAPSHOT</commons.checkstyle.version>
<commons.fileupload.version>1.2.2</commons.fileupload.version>
- <commons.httpclient.version>0.1.2-SNAPSHOT</commons.httpclient.version>
+ <commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
<commons.io.version>2.4</commons.io.version>
<commons.lang3.version>3.1</commons.lang3.version>
- <commons.logback_settings.version>0.0.2-SNAPSHOT</commons.logback_settings.version>
+ <commons.logback_settings.version>0.1.0-SNAPSHOT</commons.logback_settings.version>
<commons.net.version>3.0.1</commons.net.version>
- <commons.opendaylight.commons.httpclient>0.1.2-SNAPSHOT</commons.opendaylight.commons.httpclient>
- <commons.opendaylight.concepts.version>0.5.2-SNAPSHOT</commons.opendaylight.concepts.version>
- <commons.opendaylight.version>1.4.2-SNAPSHOT</commons.opendaylight.version>
- <commons.parent.version>1.0.2-SNAPSHOT</commons.parent.version>
+ <commons.opendaylight.commons.httpclient>0.2.0-SNAPSHOT</commons.opendaylight.commons.httpclient>
+ <commons.opendaylight.concepts.version>0.6.0-SNAPSHOT</commons.opendaylight.concepts.version>
+ <commons.opendaylight.version>1.5.0-SNAPSHOT</commons.opendaylight.version>
+ <commons.parent.version>1.1.0-SNAPSHOT</commons.parent.version>
<compiler.version>2.3.2</compiler.version>
- <commons.httpclient.version>0.1.2-SNAPSHOT</commons.httpclient.version>
- <concepts.version>0.5.2-SNAPSHOT</concepts.version>
+ <commons.httpclient.version>0.2.0-SNAPSHOT</commons.httpclient.version>
+ <concepts.version>0.6.0-SNAPSHOT</concepts.version>
<concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
- <config.version>0.2.5-SNAPSHOT</config.version>
- <aaa.version>0.1.0-SNAPSHOT</aaa.version>
+ <config.version>0.3.0-SNAPSHOT</config.version>
+ <aaa.version>0.2.0-SNAPSHOT</aaa.version>
<config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
<config.clustering.configfile>05-clustering.xml</config.clustering.configfile>
<config.netty.configfile>00-netty.xml</config.netty.configfile>
<config.toaster.configfile>03-toaster-sample.xml</config.toaster.configfile>
<config.restconf.configfile>10-rest-connector.xml</config.restconf.configfile>
<config.netconf.connector.configfile>99-netconf-connector.xml</config.netconf.connector.configfile>
- <configuration.implementation.version>0.4.3-SNAPSHOT</configuration.implementation.version>
- <configuration.version>0.4.3-SNAPSHOT</configuration.version>
- <connectionmanager.version>0.1.2-SNAPSHOT</connectionmanager.version>
- <containermanager.it.version>0.5.2-SNAPSHOT</containermanager.it.version>
- <containermanager.northbound.version>0.4.2-SNAPSHOT</containermanager.northbound.version>
- <containermanager.shell.version>0.5.2-SNAPSHOT</containermanager.shell.version>
- <containermanager.version>0.5.2-SNAPSHOT</containermanager.version>
- <controllermanager.northbound.version>0.0.2-SNAPSHOT</controllermanager.northbound.version>
- <devices.web.version>0.4.2-SNAPSHOT</devices.web.version>
- <dummy-console.version>1.1.0-SNAPSHOT</dummy-console.version>
+ <configuration.implementation.version>0.5.0-SNAPSHOT</configuration.implementation.version>
+ <configuration.version>0.5.0-SNAPSHOT</configuration.version>
+ <connectionmanager.version>0.2.0-SNAPSHOT</connectionmanager.version>
+ <containermanager.it.version>0.6.0-SNAPSHOT</containermanager.it.version>
+ <containermanager.northbound.version>0.5.0-SNAPSHOT</containermanager.northbound.version>
+ <containermanager.shell.version>0.6.0-SNAPSHOT</containermanager.shell.version>
+ <containermanager.version>0.6.0-SNAPSHOT</containermanager.version>
+ <controllermanager.northbound.version>0.1.0-SNAPSHOT</controllermanager.northbound.version>
+ <devices.web.version>0.5.0-SNAPSHOT</devices.web.version>
+ <dummy-console.version>1.2.0-SNAPSHOT</dummy-console.version>
<eclipse.persistence.version>2.5.0</eclipse.persistence.version>
<eclipse.jdt.core.compiler.batch.version>3.8.0.I20120518-2145</eclipse.jdt.core.compiler.batch.version>
<!-- enforcer version -->
<exi.nagasena.version>0000.0002.0038.0</exi.nagasena.version>
<felix.util.version>1.6.0</felix.util.version>
- <filtervalve.version>1.4.2-SNAPSHOT</filtervalve.version>
+ <filtervalve.version>1.5.0-SNAPSHOT</filtervalve.version>
<findbugs.maven.plugin.version>2.4.0</findbugs.maven.plugin.version>
- <flowprogrammer.northbound.version>0.4.2-SNAPSHOT</flowprogrammer.northbound.version>
- <flows.web.version>0.4.2-SNAPSHOT</flows.web.version>
- <forwarding.staticrouting>0.5.2-SNAPSHOT</forwarding.staticrouting>
- <forwarding.staticrouting.northbound.version>0.4.2-SNAPSHOT</forwarding.staticrouting.northbound.version>
- <forwardingrulesmanager.implementation.version>0.4.2-SNAPSHOT</forwardingrulesmanager.implementation.version>
- <forwardingrulesmanager.version>0.6.0-SNAPSHOT</forwardingrulesmanager.version>
- <ganymed.version>1.1-SNAPSHOT</ganymed.version>
- <hosttracker.api.version>0.5.2-SNAPSHOT</hosttracker.api.version>
- <hosttracker.implementation.version>0.5.2-SNAPSHOT</hosttracker.implementation.version>
- <hosttracker.northbound.version>0.4.2-SNAPSHOT</hosttracker.northbound.version>
- <hosttracker.shell.version>1.0.0-SNAPSHOT</hosttracker.shell.version>
- <hosttracker_new.api.version>0.4.2-SNAPSHOT</hosttracker_new.api.version>
- <hosttracker_new.implementation.version>0.4.2-SNAPSHOT</hosttracker_new.implementation.version>
- <httpservice-bridge.northbound.version>0.0.2-SNAPSHOT</httpservice-bridge.northbound.version>
- <ietf-inet-types.version>2010.09.24.4-SNAPSHOT</ietf-inet-types.version>
- <ietf-restconf.version>2013.10.19.1-SNAPSHOT</ietf-restconf.version>
- <ietf-topology.version>2013.10.21.2-SNAPSHOT</ietf-topology.version>
- <ietf-yang-types.version>2010.09.24.4-SNAPSHOT</ietf-yang-types.version>
+ <flowprogrammer.northbound.version>0.5.0-SNAPSHOT</flowprogrammer.northbound.version>
+ <flows.web.version>0.5.0-SNAPSHOT</flows.web.version>
+ <forwarding.staticrouting>0.6.0-SNAPSHOT</forwarding.staticrouting>
+ <forwarding.staticrouting.northbound.version>0.5.0-SNAPSHOT</forwarding.staticrouting.northbound.version>
+ <forwardingrulesmanager.implementation.version>0.5.0-SNAPSHOT</forwardingrulesmanager.implementation.version>
+ <forwardingrulesmanager.version>0.7.0-SNAPSHOT</forwardingrulesmanager.version>
+ <hosttracker.api.version>0.6.0-SNAPSHOT</hosttracker.api.version>
+ <hosttracker.implementation.version>0.6.0-SNAPSHOT</hosttracker.implementation.version>
+ <hosttracker.northbound.version>0.5.0-SNAPSHOT</hosttracker.northbound.version>
+ <hosttracker.shell.version>1.1.0-SNAPSHOT</hosttracker.shell.version>
+ <hosttracker_new.api.version>0.5.0-SNAPSHOT</hosttracker_new.api.version>
+ <hosttracker_new.implementation.version>0.5.0-SNAPSHOT</hosttracker_new.implementation.version>
+ <httpservice-bridge.northbound.version>0.1.0-SNAPSHOT</httpservice-bridge.northbound.version>
+ <ietf-inet-types.version>2010.09.24.7-SNAPSHOT</ietf-inet-types.version>
+ <ietf-restconf.version>2013.10.19.7-SNAPSHOT</ietf-restconf.version>
+ <ietf-topology.version>2013.10.21.7-SNAPSHOT</ietf-topology.version>
+ <ietf-yang-types.version>2010.09.24.7-SNAPSHOT</ietf-yang-types.version>
<jdepend.maven.plugin.version>2.0-beta-2</jdepend.maven.plugin.version>
<jmxGeneratorPath>src/main/yang-gen-config</jmxGeneratorPath>
- <jolokia-bridge.version>0.0.2-SNAPSHOT</jolokia-bridge.version>
+ <jolokia-bridge.version>0.1.0-SNAPSHOT</jolokia-bridge.version>
<jolokia.version>1.1.4</jolokia.version>
<jsr305.api.version>2.0.1</jsr305.api.version>
<jsr311.api.version>1.1.1</jsr311.api.version>
<jsr311.v2.api.version>2.0</jsr311.v2.api.version>
- <karaf.branding.version>1.0.0-SNAPSHOT</karaf.branding.version>
+ <karaf.branding.version>1.1.0-SNAPSHOT</karaf.branding.version>
<karaf.shell.version>3.0.0</karaf.shell.version>
<karaf.version>3.0.1</karaf.version>
<leveldb.version>0.7</leveldb.version>
<leveldbjni.version>1.8</leveldbjni.version>
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
<logback.version>1.0.9</logback.version>
- <logging.bridge.version>0.4.2-SNAPSHOT</logging.bridge.version>
+ <logging.bridge.version>0.5.0-SNAPSHOT</logging.bridge.version>
<maven.plugin.api.version>3.0.5</maven.plugin.api.version>
<mimepull.version>1.9.4</mimepull.version>
- <mdsal.version>1.1-SNAPSHOT</mdsal.version>
- <netconf.version>0.2.5-SNAPSHOT</netconf.version>
- <networkconfig.bridgedomain.northbound.version>0.0.3-SNAPSHOT</networkconfig.bridgedomain.northbound.version>
- <networkconfig.neutron.implementation.version>0.4.2-SNAPSHOT</networkconfig.neutron.implementation.version>
- <networkconfig.neutron.northbound.version>0.4.2-SNAPSHOT</networkconfig.neutron.northbound.version>
- <networkconfig.neutron.version>0.4.2-SNAPSHOT</networkconfig.neutron.version>
+ <mdsal.version>1.2.0-SNAPSHOT</mdsal.version>
+ <netconf.version>0.3.0-SNAPSHOT</netconf.version>
+ <networkconfig.bridgedomain.northbound.version>0.1.0-SNAPSHOT</networkconfig.bridgedomain.northbound.version>
+ <networkconfig.neutron.implementation.version>0.5.0-SNAPSHOT</networkconfig.neutron.implementation.version>
+ <networkconfig.neutron.northbound.version>0.5.0-SNAPSHOT</networkconfig.neutron.northbound.version>
+ <networkconfig.neutron.version>0.5.0-SNAPSHOT</networkconfig.neutron.version>
<!-- ODL repository / plugin repository -->
<nexusproxy>http://nexus.opendaylight.org/content</nexusproxy>
- <northbound.commons.version>0.4.2-SNAPSHOT</northbound.commons.version>
- <northbound.hosttracker.version>1.4.2-SNAPSHOT</northbound.hosttracker.version>
- <northbound.jolokia.version>1.4.2-SNAPSHOT</northbound.jolokia.version>
- <opendaylight-l2-types.version>2013.08.27.4-SNAPSHOT</opendaylight-l2-types.version>
- <osgi-brandfragment.web.version>0.0.2-SNAPSHOT</osgi-brandfragment.web.version>
+ <northbound.commons.version>0.5.0-SNAPSHOT</northbound.commons.version>
+ <northbound.hosttracker.version>1.5.0-SNAPSHOT</northbound.hosttracker.version>
+ <northbound.jolokia.version>1.5.0-SNAPSHOT</northbound.jolokia.version>
+ <opendaylight-l2-types.version>2013.08.27.7-SNAPSHOT</opendaylight-l2-types.version>
+ <osgi-brandfragment.web.version>0.1.0-SNAPSHOT</osgi-brandfragment.web.version>
<pax.exam.version>4.0.0</pax.exam.version>
<parboiled.version>1.1.6</parboiled.version>
<parboiled.scala.version>1.1.6</parboiled.scala.version>
<propertymavenplugin.version>1.0-alpha-2</propertymavenplugin.version>
<protobuf.version>2.5.0</protobuf.version>
- <protocol-framework.version>0.5.0-SNAPSHOT</protocol-framework.version>
- <protocol_plugins.openflow.version>0.4.2-SNAPSHOT</protocol_plugins.openflow.version>
- <protocol_plugins.stub.version>0.4.2-SNAPSHOT</protocol_plugins.stub.version>
- <routing.dijkstra_implementation.version>0.4.2-SNAPSHOT</routing.dijkstra_implementation.version>
- <sal.connection.version>0.1.2-SNAPSHOT</sal.connection.version>
- <sal.implementation.version>0.4.2-SNAPSHOT</sal.implementation.version>
- <sal.networkconfiguration.version>0.0.3-SNAPSHOT</sal.networkconfiguration.version>
- <sal.version>0.8.1-SNAPSHOT</sal.version>
+ <protocol-framework.version>0.6.0-SNAPSHOT</protocol-framework.version>
+ <protocol_plugins.openflow.version>0.5.0-SNAPSHOT</protocol_plugins.openflow.version>
+ <protocol_plugins.stub.version>0.5.0-SNAPSHOT</protocol_plugins.stub.version>
+ <routing.dijkstra_implementation.version>0.5.0-SNAPSHOT</routing.dijkstra_implementation.version>
+ <sal.connection.version>0.2.0-SNAPSHOT</sal.connection.version>
+ <sal.implementation.version>0.5.0-SNAPSHOT</sal.implementation.version>
+ <sal.networkconfiguration.version>0.1.0-SNAPSHOT</sal.networkconfiguration.version>
+ <sal.version>0.9.0-SNAPSHOT</sal.version>
<salGeneratorPath>src/main/yang-gen-sal</salGeneratorPath>
- <samples.loadbalancer>0.5.2-SNAPSHOT</samples.loadbalancer>
- <samples.loadbalancer.northbound.version>0.4.2-SNAPSHOT</samples.loadbalancer.northbound.version>
- <samples.simpleforwarding.version>0.4.2-SNAPSHOT</samples.simpleforwarding.version>
- <sanitytest.version>0.4.2-SNAPSHOT</sanitytest.version>
+ <samples.loadbalancer>0.6.0-SNAPSHOT</samples.loadbalancer>
+ <samples.loadbalancer.northbound.version>0.5.0-SNAPSHOT</samples.loadbalancer.northbound.version>
+ <samples.simpleforwarding.version>0.5.0-SNAPSHOT</samples.simpleforwarding.version>
+ <sanitytest.version>0.5.0-SNAPSHOT</sanitytest.version>
<scala.version>2.10</scala.version>
<scala.micro.version>4</scala.micro.version>
- <security.version>0.4.2-SNAPSHOT</security.version>
- <karaf.security.version>0.4.2-SNAPSHOT</karaf.security.version>
+ <security.version>0.5.0-SNAPSHOT</security.version>
+ <karaf.security.version>0.5.0-SNAPSHOT</karaf.security.version>
<shapeless.version>1.2.4</shapeless.version>
<sitedeploy>dav:http://nexus.opendaylight.org/content/sites/site</sitedeploy>
<sonar.branch>${user.name}-private-view</sonar.branch>
<sonar.language>java</sonar.language>
<sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
<sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
- <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+ <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
<sonar.profile>Sonar way with Findbugs</sonar.profile>
<spifly.version>1.0.0</spifly.version>
<spring-osgi.version>1.2.1</spring-osgi.version>
<spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
<spring-security.version>3.1.3.RELEASE</spring-security.version>
<spring.version>3.1.3.RELEASE</spring.version>
- <statistics.northbound.version>0.4.2-SNAPSHOT</statistics.northbound.version>
- <statisticsmanager.implementation.version>0.4.2-SNAPSHOT</statisticsmanager.implementation.version>
- <statisticsmanager.version>0.5.1-SNAPSHOT</statisticsmanager.version>
- <subnets.northbound.version>0.4.2-SNAPSHOT</subnets.northbound.version>
+ <statistics.northbound.version>0.5.0-SNAPSHOT</statistics.northbound.version>
+ <statisticsmanager.implementation.version>0.5.0-SNAPSHOT</statisticsmanager.implementation.version>
+ <statisticsmanager.version>0.6.0-SNAPSHOT</statisticsmanager.version>
+ <subnets.northbound.version>0.5.0-SNAPSHOT</subnets.northbound.version>
<surefire.version>2.15</surefire.version>
- <switchmanager.api.version>0.7.1-SNAPSHOT</switchmanager.api.version>
- <switchmanager.implementation.version>0.4.2-SNAPSHOT</switchmanager.implementation.version>
- <switchmanager.northbound.version>0.4.2-SNAPSHOT</switchmanager.northbound.version>
+ <switchmanager.api.version>0.8.0-SNAPSHOT</switchmanager.api.version>
+ <switchmanager.implementation.version>0.5.0-SNAPSHOT</switchmanager.implementation.version>
+ <switchmanager.northbound.version>0.5.0-SNAPSHOT</switchmanager.northbound.version>
<testvm.argLine>-Xmx1024m -XX:MaxPermSize=256m</testvm.argLine>
- <topology.northbound.version>0.4.2-SNAPSHOT</topology.northbound.version>
- <topology.web.version>0.4.2-SNAPSHOT</topology.web.version>
- <topologymanager.version>0.4.2-SNAPSHOT</topologymanager.version>
- <topologymanager.shell.version>1.0.0-SNAPSHOT</topologymanager.shell.version>
- <troubleshoot.web.version>0.4.2-SNAPSHOT</troubleshoot.web.version>
+ <topology.northbound.version>0.5.0-SNAPSHOT</topology.northbound.version>
+ <topology.web.version>0.5.0-SNAPSHOT</topology.web.version>
+ <topologymanager.version>0.5.0-SNAPSHOT</topologymanager.version>
+ <topologymanager.shell.version>1.1.0-SNAPSHOT</topologymanager.shell.version>
+ <troubleshoot.web.version>0.5.0-SNAPSHOT</troubleshoot.web.version>
<typesafe.config.version>1.2.0</typesafe.config.version>
<uncommons.maths.version>1.2.2a</uncommons.maths.version>
- <usermanager.implementation.version>0.4.2-SNAPSHOT</usermanager.implementation.version>
- <usermanager.northbound.version>0.0.2-SNAPSHOT</usermanager.northbound.version>
- <usermanager.version>0.4.2-SNAPSHOT</usermanager.version>
- <nsf.version>0.4.2-SNAPSHOT</nsf.version>
- <web.version>0.4.2-SNAPSHOT</web.version>
+ <usermanager.implementation.version>0.5.0-SNAPSHOT</usermanager.implementation.version>
+ <usermanager.northbound.version>0.1.0-SNAPSHOT</usermanager.northbound.version>
+ <usermanager.version>0.5.0-SNAPSHOT</usermanager.version>
+ <nsf.version>0.5.0-SNAPSHOT</nsf.version>
+ <web.version>0.5.0-SNAPSHOT</web.version>
<xtend.dstdir>src/main/xtend-gen</xtend.dstdir>
- <yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
- <yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
- <yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
+ <yang-ext.version>2013.09.07.7-SNAPSHOT</yang-ext.version>
+ <yang-jmx-generator.version>1.1.0-SNAPSHOT</yang-jmx-generator.version>
+ <yangtools.version>0.7.0-SNAPSHOT</yangtools.version>
<sshd-core.version>0.12.0</sshd-core.version>
<jmh.version>0.9.7</jmh.version>
</properties>
<dependencyManagement>
<dependencies>
+
<!-- project specific dependencies -->
<dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>ietf-netconf-monitoring-extension</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>${project.groupId}</groupId>
- <artifactId>netconf-netty-util</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-artifacts</artifactId>
<version>${netconf.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
</dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>config-manager</artifactId>
<version>${config.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-netconf-connector</artifactId>
- <version>${netconf.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-persister-api</artifactId>
<artifactId>config-persister-feature-adapter</artifactId>
<version>${config.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>config-persister-impl</artifactId>
- <version>${netconf.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>httpservice-bridge</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>jolokia-bridge</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
<!-- Karaf Dependencies -->
<dependency>
<version>${dummy-console.version}</version>
</dependency>
- <!-- Netconf -->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-api</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-client</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-client</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
-
- <!--Netconf config-->
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config-dispatcher</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-impl</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-impl</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-mapping-api</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-monitoring</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-netty-util</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-auth</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-usermanager</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-ssh</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-ssh</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-tcp</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-util</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-util</artifactId>
- <version>${netconf.version}</version>
- <type>test-jar</type>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netty-config-api</artifactId>
<artifactId>md-sal-config</artifactId>
<version>${mdsal.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-config</artifactId>
- <version>${netconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>netconf-connector-config</artifactId>
- <version>${netconf.version}</version>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-rest-docgen</artifactId>
<artifactId>com.sun.jersey.jersey-servlet</artifactId>
<version>${jersey-servlet.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- <version>${ganymed.version}</version>
- </dependency>
+
<!-- Third parties from opendaylight released -->
<dependency>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>org.openflow.openflowj</artifactId>
<version>1.0.2</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-data-codec</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-spi</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-generator-util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>binding-type-provider</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>concepts</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>object-cache-guava</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>restconf-client-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-composite-node</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-codec-gson</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <!-- yangtools dependencies -->
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-binding</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-common</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
+ <!-- yangtools artifacts -->
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-data-util</artifactId>
+ <artifactId>yangtools-artifacts</artifactId>
<version>${yangtools.version}</version>
+ <type>pom</type>
+ <scope>import</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-maven-plugin-spi</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-model-util</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-api</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>yang-parser-impl</artifactId>
- <version>${yangtools.version}</version>
- </dependency>
- <!-- yang model dependencies -->
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-inet-types</artifactId>
- <version>${ietf-inet-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-restconf</artifactId>
- <version>${ietf-restconf.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-topology</artifactId>
- <version>${ietf-topology.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-topology-l3-unicast-igp</artifactId>
- <version>${ietf-topology.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types</artifactId>
- <version>${ietf-yang-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>ietf-yang-types-20130715</artifactId>
- <version>2013.07.15.1-SNAPSHOT</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>opendaylight-l2-types</artifactId>
- <version>${opendaylight-l2-types.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.model</groupId>
- <artifactId>yang-ext</artifactId>
- <version>${yang-ext.version}</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>antlr4-runtime-osgi-nohead</artifactId>
- <version>4.0</version>
- </dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools.thirdparty</groupId>
- <artifactId>xtend-lib-osgi</artifactId>
- <version>${xtend.version}</version>
- </dependency>
+
<dependency>
<groupId>org.openexi</groupId>
<artifactId>nagasena</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.logback_settings</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<version>${mdsal.version}</version>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.yangtools</groupId>
- <artifactId>mockito-configuration</artifactId>
- <version>${yangtools.version}</version>
- <scope>test</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-config</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>features-netconf</artifactId>
- <version>${netconf.version}</version>
- <classifier>features</classifier>
- <type>xml</type>
- <scope>runtime</scope>
- </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-config-persister</artifactId>
<type>xml</type>
<scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-netconf-connector</artifactId>
+ <version>${mdsal.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
<!-- JMH Benchmark dependencies -->
<dependency>
<groupId>org.openjdk.jmh</groupId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.logback_settings</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<executions>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>checkstyle</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
<executions>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.0.2-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol-framework</artifactId>
- <version>0.5.0-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<name>${project.artifactId}</name>
<description>Common protocol framework</description>
promise.setSuccess(session);
}
- protected final void negotiationFailed(final Throwable cause) {
+ protected void negotiationFailed(final Throwable cause) {
LOG.debug("Negotiation on channel {} failed", channel, cause);
channel.close();
promise.setFailure(cause);
return;
}
- // Check if initial connection was fully finished. If the session was dropped during negotiation, reconnect will not happen.
- // Session can be dropped during negotiation on purpose by the client side and would make no sense to initiate reconnect
if (promise.isInitialConnectFinished() == false) {
- return;
+ LOG.debug("Connection to {} was dropped during negotiation, reattempting", promise.address);
}
LOG.debug("Reconnecting after connection to {} was dropped", promise.address);
assertFalse(session.isSuccess());
}
- @Test
- public void testNegotiationFailedNoReconnect() throws Exception {
- final Promise<Boolean> p = new DefaultPromise<>(GlobalEventExecutor.INSTANCE);
-
- this.dispatcher = getServerDispatcher(p);
-
- this.server = this.dispatcher.createServer(this.serverAddress, new SessionListenerFactory<SimpleSessionListener>() {
- @Override
- public SimpleSessionListener getSessionListener() {
- return new SimpleSessionListener();
- }
- });
-
- this.server.get();
-
- this.clientDispatcher = new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
- @Override
- public SessionNegotiator<SimpleSession> getSessionNegotiator(final SessionListenerFactory<SimpleSessionListener> factory,
- final Channel channel, final Promise<SimpleSession> promise) {
-
- return new SimpleSessionNegotiator(promise, channel) {
- @Override
- protected void startNegotiation() throws Exception {
- negotiationFailed(new IllegalStateException("Negotiation failed"));
- }
- };
- }
- }, new DefaultPromise<SimpleSession>(GlobalEventExecutor.INSTANCE), eventLoopGroup);
-
- final ReconnectStrategyFactory reconnectStrategyFactory = mock(ReconnectStrategyFactory.class);
- final ReconnectStrategy reconnectStrategy = getMockedReconnectStrategy();
- doReturn(reconnectStrategy).when(reconnectStrategyFactory).createReconnectStrategy();
-
- this.clientDispatcher.createReconnectingClient(this.serverAddress,
- reconnectStrategyFactory, new SessionListenerFactory<SimpleSessionListener>() {
- @Override
- public SimpleSessionListener getSessionListener() {
- return new SimpleSessionListener();
- }
- });
-
-
- // Only one strategy should be created for initial connect, no more = no reconnects
- verify(reconnectStrategyFactory, times(1)).createReconnectStrategy();
- }
-
private SimpleDispatcher getClientDispatcher() {
return new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
@Override
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>config-api</artifactId>
@Override
public String toString() {
- final StringBuffer sb = new StringBuffer("IdentityAttributeRef{");
+ final StringBuilder sb = new StringBuilder("IdentityAttributeRef{");
sb.append("qNameOfIdentity='").append(qNameOfIdentity).append('\'');
sb.append('}');
return sb.toString();
*/
@ThreadSafe
public class ObjectNameUtil {
+ private ObjectNameUtil() {
+ }
public static final String ON_DOMAIN = ConfigRegistryConstants.ON_DOMAIN;
public static final String MODULE_FACTORY_NAME_KEY = "moduleFactoryName";
}
public static Map<String, String> getAdditionalProperties(ObjectName on) {
- Hashtable<String, String> keyPropertyList = on.getKeyPropertyList();
+ Map<String, String> keyPropertyList = on.getKeyPropertyList();
Map<String, String> result = new HashMap<>();
for (Entry<String, String> entry : keyPropertyList.entrySet()) {
result.put(entry.getKey(), entry.getValue());
public static ObjectName createModulePattern(String moduleName,
String instanceName) {
- moduleName = moduleName == null ? ON_WILDCARD : moduleName;
- instanceName = instanceName == null ? ON_WILDCARD : instanceName;
+ String finalModuleName = moduleName == null ? ON_WILDCARD : moduleName;
+ String finalInstanceName = instanceName == null ? ON_WILDCARD : instanceName;
// do not return object names containing transaction name
ObjectName namePattern = ObjectNameUtil
+ ObjectNameUtil.TYPE_KEY + "="
+ ObjectNameUtil.TYPE_MODULE + ","
+ ObjectNameUtil.MODULE_FACTORY_NAME_KEY + "="
- + moduleName + "," + ""
- + ObjectNameUtil.INSTANCE_NAME_KEY + "=" + instanceName);
+ + finalModuleName + "," + ""
+ + ObjectNameUtil.INSTANCE_NAME_KEY + "=" + finalInstanceName);
return namePattern;
}
public static ObjectName createModulePattern(String ifcName,
String instanceName, String transactionName) {
- ifcName = ifcName == null ? ON_WILDCARD : ifcName;
- instanceName = instanceName == null ? ON_WILDCARD : instanceName;
- transactionName = transactionName == null ? ON_WILDCARD : transactionName;
+ String finalIfcName = ifcName == null ? ON_WILDCARD : ifcName;
+ String finalInstanceName = instanceName == null ? ON_WILDCARD : instanceName;
+ String finalTransactionName = transactionName == null ? ON_WILDCARD : transactionName;
return ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+ ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
- + "=" + ifcName + "," + ObjectNameUtil.INSTANCE_NAME_KEY + "="
- + instanceName + "," + ObjectNameUtil.TRANSACTION_NAME_KEY
- + "=" + transactionName);
+ + "=" + finalIfcName + "," + ObjectNameUtil.INSTANCE_NAME_KEY + "="
+ + finalInstanceName + "," + ObjectNameUtil.TRANSACTION_NAME_KEY
+ + "=" + finalTransactionName);
}
public static ObjectName createRuntimeBeanPattern(String moduleName,
String instanceName) {
- moduleName = moduleName == null ? ON_WILDCARD : moduleName;
- instanceName = instanceName == null ? ON_WILDCARD : instanceName;
+ String finalModuleName = moduleName == null ? ON_WILDCARD : moduleName;
+ String finalInstanceName = instanceName == null ? ON_WILDCARD : instanceName;
return ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN + ":"
+ ObjectNameUtil.TYPE_KEY + "="
+ ObjectNameUtil.TYPE_RUNTIME_BEAN + ","
- + ObjectNameUtil.MODULE_FACTORY_NAME_KEY + "=" + moduleName
- + "," + ObjectNameUtil.INSTANCE_NAME_KEY + "=" + instanceName
+ + ObjectNameUtil.MODULE_FACTORY_NAME_KEY + "=" + finalModuleName
+ + "," + ObjectNameUtil.INSTANCE_NAME_KEY + "=" + finalInstanceName
+ ",*");
}
public class ConfigRegistryConstants {
+ private ConfigRegistryConstants() {
+ }
+
public static final String TYPE_CONFIG_REGISTRY = "ConfigRegistry";
public static final String ON_DOMAIN = "org.opendaylight.controller";
the actual service-type which is actually required.";
mandatory true;
- type service-type-ref;
+ type leafref {
+ path "/config:services/config:service/config:type";
+ }
}
leaf name {
"Top level container encapsulating configuration of all modules.";
list module {
- key "name";
+ key "type name";
leaf name {
description "Unique module instance name";
type string;
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-manager</artifactId>
*/
@ThreadSafe
public class ConfigRegistryImpl implements AutoCloseable, ConfigRegistryImplMXBean {
- private static final Logger logger = LoggerFactory.getLogger(ConfigRegistryImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigRegistryImpl.class);
private final ModuleFactoriesResolver resolver;
private final MBeanServer configMBeanServer;
for (ModuleInternalInfo moduleInternalInfo : currentConfig.getEntries()) {
String name = moduleInternalInfo.getModuleFactory().getImplementationName();
if (allCurrentFactories.containsKey(name) == false) {
- logger.trace("Factory {} not found in SR, using reference from previous commit", name);
+ LOGGER.trace("Factory {} not found in SR, using reference from previous commit", name);
allCurrentFactories.put(name,
Maps.immutableEntry(moduleInternalInfo.getModuleFactory(), moduleInternalInfo.getBundleContext()));
}
throws ConflictingVersionException, ValidationException {
final String transactionName = ObjectNameUtil
.getTransactionName(transactionControllerON);
- logger.trace("About to commit {}. Current parentVersion: {}, versionCounter {}", transactionName, version, versionCounter);
+ LOGGER.trace("About to commit {}. Current parentVersion: {}, versionCounter {}", transactionName, version, versionCounter);
// find ConfigTransactionController
Map<String, Entry<ConfigTransactionControllerInternal, ConfigTransactionLookupRegistry>> transactions = transactionsHolder.getCurrentTransactions();
} catch (Error | RuntimeException t) { // some libs throw Errors: e.g.
// javax.xml.ws.spi.FactoryFinder$ConfigurationError
isHealthy = false;
- logger.error("Configuration Transaction failed on 2PC, server is unhealthy", t);
+ LOGGER.error("Configuration Transaction failed on 2PC, server is unhealthy", t);
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else {
int orderingIdx = 0;
for (ModuleIdentifier moduleIdentifier : orderedModuleIdentifiers) {
- logger.trace("Registering {}", moduleIdentifier);
+ LOGGER.trace("Registering {}", moduleIdentifier);
ModuleInternalTransactionalInfo entry = commitInfo.getCommitted()
.get(moduleIdentifier);
if (entry == null) {
configTransactionControllerEntry.getValue().close();
configTransactionController.abortConfig();
} catch (RuntimeException e) {
- logger.warn("Ignoring exception while aborting {}",
+ LOGGER.warn("Ignoring exception while aborting {}",
configTransactionController, e);
}
}
@Override
public Set<ObjectName> lookupRuntimeBeans(String moduleName,
String instanceName) {
- if (moduleName == null) {
- moduleName = "*";
- }
- if (instanceName == null) {
- instanceName = "*";
- }
+ String finalModuleName = moduleName == null ? "*" : moduleName;
+ String finalInstanceName = instanceName == null ? "*" : instanceName;
ObjectName namePattern = ObjectNameUtil.createRuntimeBeanPattern(
- moduleName, instanceName);
+ finalModuleName, finalInstanceName);
return baseJMXRegistrator.queryNames(namePattern, null);
}
* Service Registry.
*/
public void addAll(Collection<ModuleInternalInfo> configInfos) {
- if (currentConfig.size() > 0) {
+ if (!currentConfig.isEmpty()) {
throw new IllegalStateException(
"Error - some config entries were not removed: "
+ currentConfig);
ConfigTransactionControllerInternal,
ConfigTransactionControllerImplMXBean,
Identifiable<TransactionIdentifier> {
- private static final Logger logger = LoggerFactory.getLogger(ConfigTransactionControllerImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigTransactionControllerImpl.class);
private final ConfigTransactionLookupRegistry txLookupRegistry;
private final ObjectName controllerON;
boolean isDefaultBean, BundleContext bundleContext)
throws InstanceAlreadyExistsException {
- logger.debug("Adding module {} to transaction {}", moduleIdentifier, this);
+ LOGGER.debug("Adding module {} to transaction {}", moduleIdentifier, this);
if (moduleIdentifier.equals(module.getIdentifier()) == false) {
throw new IllegalStateException("Incorrect name reported by module. Expected "
+ moduleIdentifier + ", got " + module.getIdentifier());
}
private synchronized void destroyModule(ModuleIdentifier moduleIdentifier) {
- logger.debug("Destroying module {} in transaction {}", moduleIdentifier, this);
+ LOGGER.debug("Destroying module {} in transaction {}", moduleIdentifier, this);
transactionStatus.checkNotAborted();
ModuleInternalTransactionalInfo found = dependencyResolverManager.findModuleInternalTransactionalInfo(moduleIdentifier);
- if (blankTransaction == false) {
-
- if (found.isDefaultBean()) {
- logger.warn("Warning: removing default bean. This will be forbidden in next version of config-subsystem");
- }
+ if (blankTransaction == false &&
+ found.isDefaultBean()) {
+ LOGGER.warn("Warning: removing default bean. This will be forbidden in next version of config-subsystem");
}
// first remove refNames, it checks for objectname existence
writableSRRegistry.removeServiceReferences(
ObjectNameUtil.createTransactionModuleON(getTransactionName(), moduleIdentifier));
} catch (InstanceNotFoundException e) {
- logger.error("Possible code error: cannot find {} in {}", moduleIdentifier, writableSRRegistry);
+ LOGGER.error("Possible code error: cannot find {} in {}", moduleIdentifier, writableSRRegistry);
throw new IllegalStateException("Possible code error: cannot find " + moduleIdentifier, e);
}
}
configBeanModificationDisabled.set(true);
try {
- validate_noLocks();
+ validateNoLocks();
} finally {
configBeanModificationDisabled.set(false);
}
}
- private void validate_noLocks() throws ValidationException {
+ private void validateNoLocks() throws ValidationException {
transactionStatus.checkNotAborted();
- logger.trace("Validating transaction {}", getTransactionIdentifier());
+ LOGGER.trace("Validating transaction {}", getTransactionIdentifier());
// call validate()
List<ValidationException> collectedExceptions = new ArrayList<>();
for (Entry<ModuleIdentifier, Module> entry : dependencyResolverManager
try {
module.validate();
} catch (Exception e) {
- logger.warn("Validation exception in {}", getTransactionName(),
+ LOGGER.warn("Validation exception in {}", getTransactionName(),
e);
collectedExceptions.add(ValidationException
.createForSingleException(name, e));
}
}
- if (collectedExceptions.size() > 0) {
+ if (!collectedExceptions.isEmpty()) {
throw ValidationException
.createFromCollectedValidationExceptions(collectedExceptions);
}
- logger.trace("Validated transaction {}", getTransactionIdentifier());
+ LOGGER.trace("Validated transaction {}", getTransactionIdentifier());
}
/**
transactionStatus.checkNotCommitStarted();
configBeanModificationDisabled.set(true);
try {
- validate_noLocks();
+ validateNoLocks();
} catch (ValidationException e) {
- logger.trace("Commit failed on validation");
+ LOGGER.trace("Commit failed on validation");
configBeanModificationDisabled.set(false); // recoverable error
throw e;
}
+ "to obtain a lock");
}
- logger.trace("Committing transaction {}", getTransactionIdentifier());
+ LOGGER.trace("Committing transaction {}", getTransactionIdentifier());
// call getInstance()
for (Entry<ModuleIdentifier, Module> entry : dependencyResolverManager
Module module = entry.getValue();
ModuleIdentifier name = entry.getKey();
try {
- logger.debug("About to commit {} in transaction {}",
+ LOGGER.debug("About to commit {} in transaction {}",
name, getTransactionIdentifier());
AutoCloseable instance = module.getInstance();
checkNotNull(instance, "Instance is null:{} in transaction {}", name, getTransactionIdentifier());
} catch (Exception e) {
- logger.error("Commit failed on {} in transaction {}", name,
+ LOGGER.error("Commit failed on {} in transaction {}", name,
getTransactionIdentifier(), e);
internalAbort();
throw new IllegalStateException(
// count dependency order
- logger.trace("Committed configuration {}", getTransactionIdentifier());
+ LOGGER.trace("Committed configuration {}", getTransactionIdentifier());
transactionStatus.setCommitted();
return dependencyResolverManager.getSortedModuleIdentifiers();
}
private void internalAbort() {
- logger.trace("Aborting {}", this);
+ LOGGER.trace("Aborting {}", this);
transactionStatus.setAborted();
close();
}
import java.util.concurrent.TimeUnit;
public class DeadlockMonitor implements AutoCloseable {
- private static final Logger logger = LoggerFactory.getLogger(DeadlockMonitorRunnable.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(DeadlockMonitorRunnable.class);
private static final long WARN_AFTER_MILLIS = 5000;
moduleIdentifierWithNanosStack.push(current);
top = current;
}
- logger.trace("setCurrentlyInstantiatedModule {}, top {}", currentlyInstantiatedModule, top);
+ LOGGER.trace("setCurrentlyInstantiatedModule {}, top {}", currentlyInstantiatedModule, top);
}
public boolean isAlive() {
// is the getInstance() running longer than WARN_AFTER_MILLIS ?
long runningTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - copy.nanoTime);
if (runningTime > WARN_AFTER_MILLIS) {
- logger.warn("{} did not finish after {} ms", copy.moduleIdentifier, runningTime);
+ LOGGER.warn("{} did not finish after {} ms", copy.moduleIdentifier, runningTime);
}
}
try {
interrupt();
}
}
- logger.trace("Exiting {}", this);
+ LOGGER.trace("Exiting {}", this);
}
@Override
@Override
public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
ModuleIdentifierWithNanos that = (ModuleIdentifierWithNanos) o;
- if (nanoTime != that.nanoTime) return false;
- if (moduleIdentifier != null ? !moduleIdentifier.equals(that.moduleIdentifier) : that.moduleIdentifier != null)
+ if (nanoTime != that.nanoTime) {
return false;
+ }
+ if (moduleIdentifier != null ? !moduleIdentifier.equals(that.moduleIdentifier) : that.moduleIdentifier != null) {
+ return false;
+ }
return true;
}
import org.slf4j.LoggerFactory;
public class ServiceReferenceRegistryImpl implements CloseableServiceReferenceReadableRegistry, SearchableServiceReferenceWritableRegistry {
- private static final Logger logger = LoggerFactory.getLogger(ServiceReferenceRegistryImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ServiceReferenceRegistryImpl.class);
private final Map<String, ModuleFactory> factories;
private final Map<String, Set<String>> factoryNamesToQNames;
boolean skipChecks = true;
newRegistry.saveServiceReference(refNameEntry.getKey(), currentImplementation, skipChecks);
} catch (InstanceNotFoundException e) {
- logger.error("Cannot save service reference({}, {})", refNameEntry.getKey(), currentImplementation);
+ LOGGER.error("Cannot save service reference({}, {})", refNameEntry.getKey(), currentImplementation);
throw new IllegalStateException("Possible code error", e);
}
}
Map<String, Set<String /* QName */>> modifiableFactoryNamesToQNames = new HashMap<>();
Set<ServiceInterfaceAnnotation> allAnnotations = new HashSet<>();
- Set<String /* qName */> allQNames = new HashSet<>();
+ Set<String /* qName */> allQNameSet = new HashSet<>();
for (Entry<String, ModuleFactory> entry : factories.entrySet()) {
if (entry.getKey().equals(entry.getValue().getImplementationName()) == false) {
- logger.error("Possible error in code: Mismatch between supplied and actual name of {}", entry);
+ LOGGER.error("Possible error in code: Mismatch between supplied and actual name of {}", entry);
throw new IllegalArgumentException("Possible error in code: Mismatch between supplied and actual name of " + entry);
}
Set<ServiceInterfaceAnnotation> siAnnotations = InterfacesHelper.getServiceInterfaceAnnotations(entry.getValue());
Set<String> qNames = InterfacesHelper.getQNames(siAnnotations);
allAnnotations.addAll(siAnnotations);
- allQNames.addAll(qNames);
+ allQNameSet.addAll(qNames);
modifiableFactoryNamesToQNames.put(entry.getKey(), Collections.unmodifiableSet(qNames));
}
this.factoryNamesToQNames = Collections.unmodifiableMap(modifiableFactoryNamesToQNames);
- this.allQNames = Collections.unmodifiableSet(allQNames);
+ this.allQNames = Collections.unmodifiableSet(allQNameSet);
// fill namespacesToAnnotations
Map<String /* namespace */, Map<String /* localName */, ServiceInterfaceAnnotation>> modifiableNamespacesToAnnotations =
new HashMap<>();
modifiableNamespacesToAnnotations.put(sia.namespace(), ofNamespace);
}
if (ofNamespace.containsKey(sia.localName())) {
- logger.error("Cannot construct namespacesToAnnotations map, conflict between local names in {}, offending local name: {}, map so far {}",
+ LOGGER.error("Cannot construct namespacesToAnnotations map, conflict between local names in {}, offending local name: {}, map so far {}",
sia.namespace(), sia.localName(), modifiableNamespacesToAnnotations);
throw new IllegalArgumentException("Conflict between local names in " + sia.namespace() + " : " + sia.localName());
}
}
this.namespacesToAnnotations = Collections.unmodifiableMap(modifiableNamespacesToAnnotations);
this.serviceQNamesToAnnotations = Collections.unmodifiableMap(modifiableServiceQNamesToAnnotations);
- logger.trace("factoryNamesToQNames:{}", this.factoryNamesToQNames);
+ LOGGER.trace("factoryNamesToQNames:{}", this.factoryNamesToQNames);
}
@Override
String factoryName = ObjectNameUtil.getFactoryName(objectName);
Set<String> serviceInterfaceAnnotations = factoryNamesToQNames.get(factoryName);
if (serviceInterfaceAnnotations == null) {
- logger.error("Possible error in code: cannot find factory annotations of '{}' extracted from ON {} in {}",
+ LOGGER.error("Possible error in code: cannot find factory annotations of '{}' extracted from ON {} in {}",
factoryName, objectName, factoryNamesToQNames);
throw new IllegalArgumentException("Cannot find factory with name " + factoryName);
}
public synchronized String getServiceInterfaceName(String namespace, String localName) {
Map<String /* localName */, ServiceInterfaceAnnotation> ofNamespace = namespacesToAnnotations.get(namespace);
if (ofNamespace == null) {
- logger.error("Cannot find namespace {} in {}", namespace, namespacesToAnnotations);
+ LOGGER.error("Cannot find namespace {} in {}", namespace, namespacesToAnnotations);
throw new IllegalArgumentException("Cannot find namespace " + namespace);
}
ServiceInterfaceAnnotation sia = ofNamespace.get(localName);
if (sia == null) {
- logger.error("Cannot find local name {} in namespace {}, found only {}", localName, namespace, ofNamespace);
+ LOGGER.error("Cannot find local name {} in namespace {}, found only {}", localName, namespace, ofNamespace);
throw new IllegalArgumentException("Cannot find local name " + localName + " in namespace " + namespace);
}
return sia.value();
try {
on = lookupRegistry.lookupConfigBean(moduleIdentifier.getFactoryName(), moduleIdentifier.getInstanceName());
} catch (InstanceNotFoundException e) {
- logger.error("Cannot find instance {}", moduleIdentifier);
+ LOGGER.error("Cannot find instance {}", moduleIdentifier);
throw new IllegalStateException("Cannot find instance " + moduleIdentifier, e);
}
return on;
ServiceReference serviceReference = new ServiceReference(serviceInterfaceQName, refName);
ModuleIdentifier moduleIdentifier = refNames.get(serviceReference);
if (moduleIdentifier == null) {
- logger.error("Cannot find qname {} and refName {} in {}", serviceInterfaceQName, refName, refName);
+ LOGGER.error("Cannot find qname {} and refName {} in {}", serviceInterfaceQName, refName, refName);
throw new IllegalArgumentException("Cannot find " + serviceReference);
}
return getObjectName(moduleIdentifier);
Map<String, Map<String, ObjectName>> serviceMapping = getServiceMapping();
Map<String, ObjectName> innerMap = serviceMapping.get(serviceInterfaceQName);
if (innerMap == null) {
- logger.error("Cannot find qname {} in {}", serviceInterfaceQName, refNames);
+ LOGGER.error("Cannot find qname {} in {}", serviceInterfaceQName, refNames);
throw new IllegalArgumentException("Cannot find " + serviceInterfaceQName);
}
return innerMap;
String referenceName = ObjectNameUtil.getReferenceName(objectName);
ServiceReference serviceReference = new ServiceReference(serviceQName, referenceName);
if (refNames.containsKey(serviceReference) == false) {
- logger.warn("Cannot find {} in {}", serviceReference, refNames);
+ LOGGER.warn("Cannot find {} in {}", serviceReference, refNames);
throw new InstanceNotFoundException("Service reference not found:" + objectName);
}
}
// check that service interface name exist
Set<String> serviceInterfaceQNames = factoryNamesToQNames.get(moduleIdentifier.getFactoryName());
if (serviceInterfaceQNames == null) {
- logger.error("Possible error in code: cannot find factoryName {} in {}, {}", moduleIdentifier.getFactoryName(),
+ LOGGER.error("Possible error in code: cannot find factoryName {} in {}, {}", moduleIdentifier.getFactoryName(),
factoryNamesToQNames, moduleIdentifier);
throw new IllegalStateException("Possible error in code: cannot find annotations of existing factory " + moduleIdentifier.getFactoryName());
}
// supplied serviceInterfaceName must exist in this collection
if (serviceInterfaceQNames.contains(serviceReference.getServiceInterfaceQName()) == false) {
- logger.error("Cannot find qName {} with factory name {}, found {}", serviceReference.getServiceInterfaceQName(), moduleIdentifier.getFactoryName(), serviceInterfaceQNames);
+ LOGGER.error("Cannot find qName {} with factory name {}, found {}", serviceReference.getServiceInterfaceQName(), moduleIdentifier.getFactoryName(), serviceInterfaceQNames);
throw new IllegalArgumentException("Cannot find service interface " + serviceReference.getServiceInterfaceQName() + " within factory " + moduleIdentifier.getFactoryName());
}
}
private synchronized void removeServiceReference(ServiceReference serviceReference) throws InstanceNotFoundException {
- logger.debug("Removing service reference {} from {}", serviceReference, this);
+ LOGGER.debug("Removing service reference {} from {}", serviceReference, this);
assertWritable();
// is the qName known?
if (allQNames.contains(serviceReference.getServiceInterfaceQName()) == false) {
- logger.error("Cannot find qname {} in {}", serviceReference.getServiceInterfaceQName(), allQNames);
+ LOGGER.error("Cannot find qname {} in {}", serviceReference.getServiceInterfaceQName(), allQNames);
throw new IllegalArgumentException("Cannot find service interface " + serviceReference.getServiceInterfaceQName());
}
ModuleIdentifier removed = refNames.remove(serviceReference);
private Set<ServiceReference> findServiceReferencesLinkingTo(ObjectName moduleObjectName, Set<String> serviceInterfaceQNames) {
String factoryName = ObjectNameUtil.getFactoryName(moduleObjectName);
if (serviceInterfaceQNames == null) {
- logger.warn("Possible error in code: cannot find factoryName {} in {}, object name {}", factoryName, factoryNamesToQNames, moduleObjectName);
+ LOGGER.warn("Possible error in code: cannot find factoryName {} in {}, object name {}", factoryName, factoryNamesToQNames, moduleObjectName);
throw new IllegalStateException("Possible error in code: cannot find annotations of existing factory " + factoryName);
}
String instanceName = ObjectNameUtil.getInstanceName(moduleObjectName);
*/
final class DependencyResolverImpl implements DependencyResolver,
Comparable<DependencyResolverImpl> {
- private static final Logger logger = LoggerFactory.getLogger(DependencyResolverImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(DependencyResolverImpl.class);
private final ModulesHolder modulesHolder;
private final ModuleIdentifier name;
), jmxAttribute
);
- dependentReadOnlyON = translateServiceRefIfPossible(dependentReadOnlyON);
+ ObjectName newDependentReadOnlyON = translateServiceRefIfPossible(dependentReadOnlyON);
- ModuleIdentifier moduleIdentifier = ObjectNameUtil.fromON(dependentReadOnlyON, ObjectNameUtil
+ ModuleIdentifier moduleIdentifier = ObjectNameUtil.fromON(newDependentReadOnlyON, ObjectNameUtil
.TYPE_MODULE);
ModuleFactory foundFactory = modulesHolder.findModuleFactory(moduleIdentifier, jmxAttribute);
+ "Module name is %s : %s, expected service interface %s, dependent module ON %s , "
+ "attribute %s",
foundFactory.getImplementationName(), foundFactory,
- expectedServiceInterface, dependentReadOnlyON,
+ expectedServiceInterface, newDependentReadOnlyON,
jmxAttribute
);
throw new JmxAttributeValidationException(message, jmxAttribute);
// translate from serviceref to module ON
private ObjectName translateServiceRefIfPossible(ObjectName dependentReadOnlyON) {
- if (ObjectNameUtil.isServiceReference(dependentReadOnlyON)) {
- String serviceQName = ObjectNameUtil.getServiceQName(dependentReadOnlyON);
- String refName = ObjectNameUtil.getReferenceName(dependentReadOnlyON);
- dependentReadOnlyON = ObjectNameUtil.withoutTransactionName( // strip again of transaction name
+ ObjectName translatedDependentReadOnlyON = dependentReadOnlyON;
+ if (ObjectNameUtil.isServiceReference(translatedDependentReadOnlyON)) {
+ String serviceQName = ObjectNameUtil.getServiceQName(translatedDependentReadOnlyON);
+ String refName = ObjectNameUtil.getReferenceName(translatedDependentReadOnlyON);
+ translatedDependentReadOnlyON = ObjectNameUtil.withoutTransactionName( // strip again of transaction name
readableRegistry.lookupConfigBeanByServiceInterfaceName(serviceQName, refName));
}
- return dependentReadOnlyON;
+ return translatedDependentReadOnlyON;
}
/**
"Null parameters not allowed, got %s %s %s", expectedType,
dependentReadOnlyON, jmxAttribute));
}
- dependentReadOnlyON = translateServiceRefIfPossible(dependentReadOnlyON);
+ ObjectName translatedDependentReadOnlyON = translateServiceRefIfPossible(dependentReadOnlyON);
transactionStatus.checkCommitStarted();
transactionStatus.checkNotCommitted();
ModuleIdentifier dependentModuleIdentifier = ObjectNameUtil.fromON(
- dependentReadOnlyON, ObjectNameUtil.TYPE_MODULE);
+ translatedDependentReadOnlyON, ObjectNameUtil.TYPE_MODULE);
Module module = modulesHolder.findModule(dependentModuleIdentifier,
jmxAttribute);
synchronized (this) {
if (expectedBaseClass.isAssignableFrom(deserialized)) {
return (Class<T>) deserialized;
} else {
- logger.error("Cannot resolve class of identity {} : deserialized class {} is not a subclass of {}.",
+ LOGGER.error("Cannot resolve class of identity {} : deserialized class {} is not a subclass of {}.",
identityRef, deserialized, expectedBaseClass);
throw new IllegalArgumentException("Deserialized identity " + deserialized + " cannot be cast to " + expectedBaseClass);
}
@Override
public Object getAttribute(ObjectName name, String attribute)
throws MBeanException, AttributeNotFoundException, InstanceNotFoundException, ReflectionException {
- name = translateServiceRefIfPossible(name);
+ ObjectName newName = translateServiceRefIfPossible(name);
// add transaction name
- name = ObjectNameUtil.withTransactionName(name, transactionName);
- return mBeanServer.getAttribute(name, attribute);
+ newName = ObjectNameUtil.withTransactionName(newName, transactionName);
+ return mBeanServer.getAttribute(newName, attribute);
}
@Override
public <T> T newMXBeanProxy(ObjectName name, Class<T> interfaceClass) {
- name = translateServiceRefIfPossible(name);
+ ObjectName newName = translateServiceRefIfPossible(name);
// add transaction name
- name = ObjectNameUtil.withTransactionName(name, transactionName);
- return JMX.newMXBeanProxy(mBeanServer, name, interfaceClass);
+ newName = ObjectNameUtil.withTransactionName(newName, transactionName);
+ return JMX.newMXBeanProxy(mBeanServer, newName, interfaceClass);
}
}
@Override
protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable {
- boolean isGetInstance = method.getName().equals("getInstance");
+ boolean isGetInstance = "getInstance".equals(method.getName());
if (isGetInstance) {
if (cachedInstance != null) {
return cachedInstance;
*/
public class DestroyedModule implements AutoCloseable,
Comparable<DestroyedModule>, Identifiable<ModuleIdentifier> {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(DestroyedModule.class);
private final ModuleIdentifier identifier;
@Override
public void close() {
- logger.trace("Destroying {}", identifier);
+ LOGGER.trace("Destroying {}", identifier);
try {
instance.close();
} catch (Exception e) {
- logger.error("Error while closing instance of {}", identifier, e);
+ LOGGER.error("Error while closing instance of {}", identifier, e);
}
try {
oldJMXRegistrator.close();
} catch (Exception e) {
- logger.error("Error while closing jmx registrator of {}", identifier, e);
+ LOGGER.error("Error while closing jmx registrator of {}", identifier, e);
}
try {
osgiRegistration.close();
} catch (Exception e) {
- logger.error("Error while closing osgi registration of {}", identifier, e);
+ LOGGER.error("Error while closing osgi registration of {}", identifier, e);
}
}
* a read only wrapper.
*/
abstract class AbstractDynamicWrapper implements DynamicMBeanModuleWrapper {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(AbstractDynamicWrapper.class);
protected final boolean writable;
public Object getAttribute(String attributeName)
throws AttributeNotFoundException, MBeanException,
ReflectionException {
- if (attributeName.equals("MBeanInfo")) {
+ if ("MBeanInfo".equals(attributeName)) {
return getMBeanInfo();
}
result.add(new Attribute(attributeName, value));
} catch (Exception e) {
- logger.debug("Getting attribute {} failed", attributeName, e);
+ LOGGER.debug("Getting attribute {} failed", attributeName, e);
}
}
return result;
&& signature[0].equals(AttributeList.class.getName())) {
return setAttributes((AttributeList) params[0]);
} else {
- logger.debug("Operation not found {} ", actionName);
+ LOGGER.debug("Operation not found {} ", actionName);
throw new UnsupportedOperationException(
format("Operation not found on %s. Method invoke is only supported for getInstance and getAttribute(s) "
+ "method, got actionName %s, params %s, signature %s ",
public class AnnotationsHelper {
+ private AnnotationsHelper() {
+ }
+
/**
* Look for annotation specified by annotationType on method. First observe
* method's class, then its super classes, then all provided interfaces.
for (RequireInterface ri : foundRequireInterfaces) {
foundValues.add(ri.value());
}
- if (foundValues.size() == 0) {
+ if (foundValues.isEmpty()) {
return null;
} else if (foundValues.size() > 1) {
throw new IllegalStateException("Error finding @RequireInterface. "
public Object getAttribute(String attributeName)
throws AttributeNotFoundException, MBeanException,
ReflectionException {
- if (attributeName.equals("getInstance")) {
+ if ("getInstance".equals(attributeName)) {
return getInstance();
}
return super.getAttribute(attributeName);
*/
@ThreadSafe
public class DynamicWritableWrapper extends AbstractDynamicWrapper {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(DynamicWritableWrapper.class);
private final ReadOnlyAtomicBoolean configBeanModificationDisabled;
@Override
public synchronized void setAttribute(Attribute attribute)
throws AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
+ Attribute newAttribute = attribute;
if (configBeanModificationDisabled.get() == true) {
throw new IllegalStateException("Operation is not allowed now");
}
- if (attribute.getName().equals("Attribute")) {
- setAttribute((Attribute) attribute.getValue());
+ if ("Attribute".equals(newAttribute.getName())) {
+ setAttribute((Attribute) newAttribute.getValue());
return;
}
try {
- if (attribute.getValue() instanceof ObjectName) {
- attribute = fixDependencyAttribute(attribute);
- } else if (attribute.getValue() instanceof ObjectName[]) {
- attribute = fixDependencyListAttribute(attribute);
+ if (newAttribute.getValue() instanceof ObjectName) {
+ newAttribute = fixDependencyAttribute(newAttribute);
+ } else if (newAttribute.getValue() instanceof ObjectName[]) {
+ newAttribute = fixDependencyListAttribute(newAttribute);
}
- internalServer.setAttribute(objectNameInternal, attribute);
+ internalServer.setAttribute(objectNameInternal, newAttribute);
} catch (InstanceNotFoundException e) {
throw new MBeanException(e);
}
}
private Attribute fixDependencyListAttribute(Attribute attribute) {
- AttributeHolder attributeHolder = attributeHolderMap.get(attribute.getName());
+ Attribute newAttribute = attribute;
+ AttributeHolder attributeHolder = attributeHolderMap.get(newAttribute.getName());
if (attributeHolder.getRequireInterfaceOrNull() != null) {
- attribute = new Attribute(attribute.getName(), fixObjectNames((ObjectName[]) attribute.getValue()));
+ newAttribute = new Attribute(newAttribute.getName(), fixObjectNames((ObjectName[]) newAttribute.getValue()));
}
- return attribute;
+ return newAttribute;
}
private Attribute fixDependencyAttribute(Attribute attribute) {
- AttributeHolder attributeHolder = attributeHolderMap.get(attribute.getName());
+ Attribute newAttribute = attribute;
+ AttributeHolder attributeHolder = attributeHolderMap.get(newAttribute.getName());
if (attributeHolder.getRequireInterfaceOrNull() != null) {
- attribute = new Attribute(attribute.getName(), fixObjectName((ObjectName) attribute.getValue()));
+ newAttribute = new Attribute(newAttribute.getName(), fixObjectName((ObjectName) newAttribute.getValue()));
} else {
- attribute = new Attribute(attribute.getName(), attribute.getValue());
+ newAttribute = new Attribute(newAttribute.getName(), newAttribute.getValue());
}
- return attribute;
+ return newAttribute;
}
private ObjectName[] fixObjectNames(ObjectName[] dependencies) {
setAttribute(attribute);
result.add(attribute);
} catch (Exception e) {
- logger.warn("Setting attribute {} failed on {}", attribute.getName(), moduleIdentifier, e);
+ LOGGER.warn("Setting attribute {} failed on {}", attribute.getName(), moduleIdentifier, e);
throw new IllegalArgumentException(
"Setting attribute failed - " + attribute.getName()
+ " on " + moduleIdentifier, e);
import java.util.Set;
public class InternalJMXRegistrator implements Closeable {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(InternalJMXRegistrator.class);
private final MBeanServer configMBeanServer;
@GuardedBy("this")
private final Set<ObjectName> registeredObjectNames = new HashSet<>();
+ @GuardedBy("this")
private final List<InternalJMXRegistrator> children = new ArrayList<>();
public synchronized InternalJMXRegistration registerMBean(Object object,
}
}
- public InternalJMXRegistrator createChild() {
- InternalJMXRegistrator child = new InternalJMXRegistrator(
- configMBeanServer);
+ public synchronized InternalJMXRegistrator createChild() {
+ InternalJMXRegistrator child = new InternalJMXRegistrator(configMBeanServer);
children.add(child);
return child;
}
try {
configMBeanServer.unregisterMBean(on);
} catch (Exception e) {
- logger.warn("Ignoring error while unregistering {}", on, e);
+ LOGGER.warn("Ignoring error while unregistering {}", on, e);
}
}
registeredObjectNames.clear();
return getSameNames(result);
}
- private Set<ObjectName> getSameNames(Set<ObjectName> superSet) {
+ private synchronized Set<ObjectName> getSameNames(Set<ObjectName> superSet) {
Set<ObjectName> result = new HashSet<>(superSet);
result.retainAll(registeredObjectNames);
for (InternalJMXRegistrator child : children) {
public ModuleJMXRegistration registerMBean(Object object, ObjectName on)
throws InstanceAlreadyExistsException {
ObjectNameUtil.checkType(on, ObjectNameUtil.TYPE_MODULE);
- if (ObjectNameUtil.getTransactionName(on) != null)
+ if (ObjectNameUtil.getTransactionName(on) != null) {
throw new IllegalArgumentException(
"Transaction name not expected in " + on);
+ }
return new ModuleJMXRegistration(childJMXRegistrator.registerMBean(
object, on));
}
public static class OsgiRegistration implements AutoCloseable {
- private static final Logger logger = LoggerFactory.getLogger(OsgiRegistration.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(OsgiRegistration.class);
@GuardedBy("this")
private AutoCloseable instance;
try {
serviceRegistration.unregister();
} catch(IllegalStateException e) {
- logger.trace("Cannot unregister {}", serviceRegistration, e);
+ LOGGER.trace("Cannot unregister {}", serviceRegistration, e);
}
}
serviceRegistrations.clear();
notEquals |= newAnnotationMapping.equals(serviceNamesToAnnotations) == false;
if (notEquals) {
// FIXME: changing from old state to new state can be improved by computing the diff
- logger.debug("Detected change in service registrations for {}: old: {}, new: {}", moduleIdentifier,
+ LOGGER.debug("Detected change in service registrations for {}: old: {}, new: {}", moduleIdentifier,
serviceNamesToAnnotations, newAnnotationMapping);
close();
this.instance = newInstance;
* functionality.
*/
public class BlankTransactionServiceTracker implements ServiceTrackerCustomizer<ModuleFactory, Object> {
- private static final Logger logger = LoggerFactory.getLogger(BlankTransactionServiceTracker.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(BlankTransactionServiceTracker.class);
public static final int DEFAULT_MAX_ATTEMPTS = 10;
try {
// create transaction
CommitStatus commitStatus = blankTransaction.hit();
- logger.debug("Committed blank transaction with status {}", commitStatus);
+ LOGGER.debug("Committed blank transaction with status {}", commitStatus);
return;
} catch (ConflictingVersionException e) {
lastException = e;
throw new IllegalStateException(interruptedException);
}
} catch (ValidationException e) {
- logger.error("Validation exception while running blank transaction indicates programming error", e);
+ LOGGER.error("Validation exception while running blank transaction indicates programming error", e);
throw new RuntimeException("Validation exception while running blank transaction indicates programming error", e);
}
}
*/
public class BundleContextBackedModuleFactoriesResolver implements
ModuleFactoriesResolver {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(BundleContextBackedModuleFactoriesResolver.class);
private final BundleContext bundleContext;
if (serviceReference.getBundle() == null || serviceReference.getBundle().getBundleContext() == null) {
throw new NullPointerException("Bundle context of " + factory + " ModuleFactory not found.");
}
- logger.debug("Reading factory {} {}", moduleName, factory);
+ LOGGER.debug("Reading factory {} {}", moduleName, factory);
Map.Entry<ModuleFactory, BundleContext> conflicting = result.get(moduleName);
if (conflicting != null) {
String error = String
.format("Module name is not unique. Found two conflicting factories with same name '%s': '%s' '%s'",
moduleName, conflicting.getKey(), factory);
- logger.error(error);
+ LOGGER.error(error);
throw new IllegalArgumentException(error);
} else {
result.put(moduleName, new AbstractMap.SimpleImmutableEntry<>(factory,
*/
public class ModuleFactoryBundleTracker implements BundleTrackerCustomizer<Object> {
private final BlankTransactionServiceTracker blankTransactionServiceTracker;
- private static final Logger logger = LoggerFactory.getLogger(ModuleFactoryBundleTracker.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ModuleFactoryBundleTracker.class);
public ModuleFactoryBundleTracker(BlankTransactionServiceTracker blankTransactionServiceTracker) {
this.blankTransactionServiceTracker = blankTransactionServiceTracker;
@Override
public Object addingBundle(Bundle bundle, BundleEvent event) {
URL resource = bundle.getEntry("META-INF/services/" + ModuleFactory.class.getName());
- logger.trace("Got addingBundle event of bundle {}, resource {}, event {}",
+ LOGGER.trace("Got addingBundle event of bundle {}, resource {}, event {}",
bundle, resource, event);
if (resource != null) {
try (InputStream inputStream = resource.openStream()) {
registerFactory(factoryClassName, bundle);
}
} catch (Exception e) {
- logger.error("Error while reading {}", resource, e);
+ LOGGER.error("Error while reading {}", resource, e);
throw new RuntimeException(e);
}
}
Class<?> clazz = bundle.loadClass(factoryClassName);
if (ModuleFactory.class.isAssignableFrom(clazz)) {
try {
- logger.debug("Registering {} in bundle {}",
+ LOGGER.debug("Registering {} in bundle {}",
clazz.getName(), bundle);
return bundle.getBundleContext().registerService(
ModuleFactory.class.getName(), clazz.newInstance(),
}
public static String logMessage(String slfMessage, Object... params) {
- logger.info(slfMessage, params);
+ LOGGER.info(slfMessage, params);
String formatMessage = slfMessage.replaceAll("\\{\\}", "%s");
return format(formatMessage, params);
}
*/
public final class ModuleInfoBundleTracker implements BundleTrackerCustomizer<Collection<ObjectRegistration<YangModuleInfo>>> {
- private static final Logger logger = LoggerFactory.getLogger(ModuleInfoBundleTracker.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ModuleInfoBundleTracker.class);
public static final String MODULE_INFO_PROVIDER_PATH_PREFIX = "META-INF/services/";
@Override
public Collection<ObjectRegistration<YangModuleInfo>> addingBundle(Bundle bundle, BundleEvent event) {
URL resource = bundle.getEntry(MODULE_INFO_PROVIDER_PATH_PREFIX + YangModelBindingProvider.class.getName());
- logger.debug("Got addingBundle({}) with YangModelBindingProvider resource {}", bundle, resource);
+ LOGGER.debug("Got addingBundle({}) with YangModelBindingProvider resource {}", bundle, resource);
if(resource==null) {
return null;
}
try (InputStream inputStream = resource.openStream()) {
List<String> lines = IOUtils.readLines(inputStream);
for (String moduleInfoName : lines) {
- logger.trace("Retrieve ModuleInfo({}, {})", moduleInfoName, bundle);
+ LOGGER.trace("Retrieve ModuleInfo({}, {})", moduleInfoName, bundle);
YangModuleInfo moduleInfo = retrieveModuleInfo(moduleInfoName, bundle);
registrations.add(moduleInfoRegistry.registerModuleInfo(moduleInfo));
}
} catch (Exception e) {
- logger.error("Error while reading {}", resource, e);
+ LOGGER.error("Error while reading {}", resource, e);
throw new RuntimeException(e);
}
- logger.trace("Got following registrations {}", registrations);
+ LOGGER.trace("Got following registrations {}", registrations);
return registrations;
}
} catch (NoClassDefFoundError e) {
- logger.error("Error while executing getModuleInfo on {}", instance, e);
+ LOGGER.error("Error while executing getModuleInfo on {}", instance, e);
throw e;
}
}
}
public static String logMessage(String slfMessage, Object... params) {
- logger.info(slfMessage, params);
+ LOGGER.info(slfMessage, params);
String formatMessage = slfMessage.replaceAll("\\{\\}", "%s");
return format(formatMessage, params);
}
public class InterfacesHelper {
+ private InterfacesHelper() {
+ }
+
public static Set<Class<?>> getAllInterfaces(Class<?> clazz) {
if (clazz.isInterface()) {
throw new IllegalArgumentException(clazz
}
private static Set<Class<?>> getAllSuperInterfaces(Set<Class<?>> ifcs) {
- ifcs = new HashSet<>(ifcs); // create copy to modify
+ Set<Class<?>> interfaces = new HashSet<>(ifcs); // create copy to modify
// each interface can extend other interfaces
Set<Class<?>> result = new HashSet<>();
- while (ifcs.size() > 0) {
- Iterator<Class<?>> iterator = ifcs.iterator();
+ while (!interfaces.isEmpty()) {
+ Iterator<Class<?>> iterator = interfaces.iterator();
Class<?> ifc = iterator.next();
iterator.remove();
if (ifc.isInterface() == false) {
throw new IllegalArgumentException(ifc + " should be an interface");
}
- ifcs.addAll(Arrays.asList(ifc.getInterfaces()));
+ interfaces.addAll(Arrays.asList(ifc.getInterfaces()));
result.add(ifc);
}
return result;
public class LookupBeansUtil {
+ private LookupBeansUtil() {
+ }
+
public static ObjectName lookupConfigBean(LookupRegistry lookupRegistry,
String moduleName, String instanceName)
throws InstanceNotFoundException {
Set<ObjectName> objectNames = lookupRegistry.lookupConfigBeans(
moduleName, instanceName);
- if (objectNames.size() == 0) {
+ if (objectNames.isEmpty()) {
throw new InstanceNotFoundException("No instance found");
} else if (objectNames.size() > 1) {
throw new InstanceNotFoundException("Too many instances found");
public class ModuleQNameUtil {
+ private ModuleQNameUtil() {
+ }
+
public static Set<String> getQNames(Map<String, Entry<ModuleFactory, BundleContext>> resolved) {
Set<String> result = new HashSet<>();
for (Entry<ModuleFactory, BundleContext> entry : resolved.values()) {
import org.slf4j.LoggerFactory;
public class OsgiRegistrationUtil {
- private static final Logger logger = LoggerFactory.getLogger(OsgiRegistrationUtil.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(OsgiRegistrationUtil.class);
+
+ private OsgiRegistrationUtil() {
+ }
@SafeVarargs
public static <T> AutoCloseable registerService(BundleContext bundleContext, T service, Class<? super T> ... interfaces) {
try {
ac.close();
} catch (Exception e) {
- logger.warn("Exception while closing {}", ac, e);
+ LOGGER.warn("Exception while closing {}", ac, e);
if (firstException == null) {
firstException = e;
} else {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>config-module-archetype</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>config-netty-config</artifactId>
<description>Configuration files for sal-rest-connector</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-directory-xml-adapter</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
* Delegates the the contained feature and provides additional methods.
*/
public class AbstractFeatureWrapper implements Feature {
- private static final Logger logger = LoggerFactory.getLogger(AbstractFeatureWrapper.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AbstractFeatureWrapper.class);
protected Feature feature = null;
protected AbstractFeatureWrapper() {
try {
snapShotHolders.add(new FeatureConfigSnapshotHolder(c,this));
} catch (JAXBException e) {
- logger.debug("{} is not a config subsystem config file",c.getFinalname());
+ LOGGER.debug("{} is not a config subsystem config file",c.getFinalname());
}
}
return snapShotHolders;
@Override
public boolean equals(Object obj) {
- if (this == obj)
+ if (this == obj) {
return true;
- if (obj == null)
+ }
+ if (obj == null) {
return false;
- if (getClass() != obj.getClass())
+ }
+ if (getClass() != obj.getClass()) {
return false;
+ }
AbstractFeatureWrapper other = (AbstractFeatureWrapper) obj;
if (feature == null) {
- if (other.feature != null)
+ if (other.feature != null) {
return false;
- } else if (!feature.equals(other.feature))
+ }
+ } else if (!feature.equals(other.feature)) {
return false;
+ }
return true;
}
* Delegates the the contained feature and provides additional methods.
*/
public class ChildAwareFeatureWrapper extends AbstractFeatureWrapper implements Feature {
- private static final Logger logger = LoggerFactory.getLogger(ChildAwareFeatureWrapper.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ChildAwareFeatureWrapper.class);
private FeaturesService featuresService= null;
protected ChildAwareFeatureWrapper(Feature f) {
f = new FeatureConfigSnapshotHolder(h,this);
snapShotHolders.add(f);
} catch (JAXBException e) {
- logger.debug("{} is not a config subsystem config file",h.getFileInfo().getFinalname());
+ LOGGER.debug("{} is not a config subsystem config file",h.getFileInfo().getFinalname());
}
}
}
for(Feature f: features) {
if (f.getName().equals(dependency.getName())) {
Version v = VersionTable.getVersion(f.getVersion());
- if (range.contains(v)) {
- if (fi == null || VersionTable.getVersion(fi.getVersion()).compareTo(v) < 0) {
+ if (range.contains(v) &&
+ (fi == null || VersionTable.getVersion(fi.getVersion()).compareTo(v) < 0)) {
fi = f;
break;
- }
}
}
}
import org.slf4j.LoggerFactory;
public class ConfigFeaturesListener implements FeaturesListener, AutoCloseable {
- private static final Logger logger = LoggerFactory.getLogger(ConfigFeaturesListener.class);
- private static final int QUEUE_SIZE = 100;
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigFeaturesListener.class);
+ private static final int QUEUE_SIZE = 1000;
private BlockingQueue<FeatureEvent> queue = new LinkedBlockingQueue<FeatureEvent>(QUEUE_SIZE);
Thread pushingThread = null;
@Override
public void repositoryEvent(RepositoryEvent event) {
- logger.debug("Repository: " + event.getType() + " " + event.getRepository());
+ LOGGER.debug("Repository: " + event.getType() + " " + event.getRepository());
}
@Override
import org.slf4j.LoggerFactory;
public class ConfigPusherCustomizer implements ServiceTrackerCustomizer<ConfigPusher, ConfigPusher>, AutoCloseable {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
private ConfigFeaturesListener configFeaturesListener = null;
private FeatureServiceCustomizer featureServiceCustomizer = null;
private ServiceTracker<FeaturesService,FeaturesService> fsst = null;
@Override
public ConfigPusher addingService(ServiceReference<ConfigPusher> configPusherServiceReference) {
- logger.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
+ LOGGER.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
BundleContext bc = configPusherServiceReference.getBundle().getBundleContext();
ConfigPusher cpService = bc.getService(configPusherServiceReference);
featureServiceCustomizer = new FeatureServiceCustomizer(cpService);
import com.google.common.collect.LinkedHashMultimap;
public class ConfigPushingRunnable implements Runnable {
- private static final Logger logger = LoggerFactory.getLogger(ConfigPushingRunnable.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigPushingRunnable.class);
private static final int POLL_TIME = 1;
private BlockingQueue<FeatureEvent> queue;
private FeatureConfigPusher configPusher;
processFeatureEvent(event,toInstall);
}
} else if(toInstall.isEmpty()) {
- logger.error("ConfigPushingRunnable - exiting");
+ LOGGER.error("ConfigPushingRunnable - exiting");
return;
}
} catch (InterruptedException e) {
- logger.error("ConfigPushingRunnable - interupted");
+ LOGGER.error("ConfigPushingRunnable - interupted");
interuppted = true;
} catch (Exception e) {
- logger.error("Exception while processing features {}", e);
+ LOGGER.error("Exception while processing features {}", e);
}
}
}
protected void logPushResult(LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> results) {
for(Feature f:results.keySet()) {
- logger.info("Pushed configs for feature {} {}",f,results.get(f));
+ LOGGER.info("Pushed configs for feature {} {}",f,results.get(f));
}
}
}
* Simple class to push configs to the config subsystem from Feature's configfiles
*/
public class FeatureConfigPusher {
- private static final Logger logger = LoggerFactory.getLogger(FeatureConfigPusher.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FeatureConfigPusher.class);
+ private static final int MAX_RETRIES=100;
+ private static final int RETRY_PAUSE_MILLIS=1;
private FeaturesService featuresService = null;
private ConfigPusher pusher = null;
/*
}
private boolean isInstalled(Feature feature) {
- List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
- return installedFeatures.contains(feature);
+ for(int retries=0;retries<MAX_RETRIES;retries++) {
+ try {
+ List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
+ if(installedFeatures.contains(feature)) {
+ return true;
+ } else {
+ LOGGER.warn("Karaf featuresService.listInstalledFeatures() has not yet finished installing feature (retry {}) {} {}",retries,feature.getName(),feature.getVersion());
+ }
+ } catch (Exception e) {
+ if(retries < MAX_RETRIES) {
+ LOGGER.warn("Karaf featuresService.listInstalledFeatures() has thrown an exception, retry {}, Exception {}", retries,e);
+ } else {
+ LOGGER.error("Giving up on Karaf featuresService.listInstalledFeatures() which has thrown an exception, retry {}, Exception {}", retries,e);
+ throw e;
+ }
+ }
+ try {
+ Thread.sleep(RETRY_PAUSE_MILLIS);
+ } catch (InterruptedException e1) {
+ throw new IllegalStateException(e1);
+ }
+ }
+ LOGGER.error("Giving up (after {} retries) on Karaf featuresService.listInstalledFeatures() which has not yet finished installing feature {} {}",MAX_RETRIES,feature.getName(),feature.getVersion());
+ return false;
}
private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(LinkedHashSet<FeatureConfigSnapshotHolder> configs) throws InterruptedException {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-persister-file-xml-adapter</artifactId>
* StorageAdapter that stores configuration in an xml file.
*/
public class XmlFileStorageAdapter implements StorageAdapter, Persister {
- private static final Logger logger = LoggerFactory.getLogger(XmlFileStorageAdapter.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(XmlFileStorageAdapter.class);
public static final String FILE_STORAGE_PROP = "fileStorage";
public static final String NUMBER_OF_BACKUPS = "numberOfBackups";
@Override
public Persister instantiate(PropertiesProvider propertiesProvider) {
File storage = extractStorageFileFromProperties(propertiesProvider);
- logger.debug("Using file {}", storage.getAbsolutePath());
+ LOGGER.debug("Using file {}", storage.getAbsolutePath());
// Create file if it does not exist
File parentFile = storage.getAbsoluteFile().getParentFile();
if (parentFile.exists() == false) {
- logger.debug("Creating parent folders {}", parentFile);
+ LOGGER.debug("Creating parent folders {}", parentFile);
parentFile.mkdirs();
}
if (storage.exists() == false) {
- logger.debug("Storage file does not exist, creating empty file");
+ LOGGER.debug("Storage file does not exist, creating empty file");
try {
boolean result = storage.createNewFile();
if (result == false)
} else {
numberOfStoredBackups = Integer.MAX_VALUE;
}
- logger.trace("Property {} set to {}", NUMBER_OF_BACKUPS, numberOfStoredBackups);
+ LOGGER.trace("Property {} set to {}", NUMBER_OF_BACKUPS, numberOfStoredBackups);
return result;
}
Optional<ConfigSnapshot> lastSnapshot = Config.fromXml(storage).getLastSnapshot();
- if (lastSnapshot.isPresent())
+ if (lastSnapshot.isPresent()) {
return Lists.newArrayList(toConfigSnapshot(lastSnapshot.get()));
- else
+ } else {
return Collections.emptyList();
+ }
}
@Override
public String toString() {
- final StringBuffer sb = new StringBuffer("ConfigSnapshot{");
+ final StringBuilder sb = new StringBuilder("ConfigSnapshot{");
sb.append("configSnapshot='").append(configSnapshot).append('\'');
sb.append(", capabilities=").append(capabilities);
sb.append('}');
final class StringTrimAdapter extends XmlAdapter<String, String> {
@Override
public String unmarshal(String v) throws Exception {
- if (v == null)
+ if (v == null) {
return null;
+ }
return v.trim();
}
@Override
public String marshal(String v) throws Exception {
- if (v == null)
+ if (v == null) {
return null;
+ }
return v.trim();
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>config-plugin-parent</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>config-util</artifactId>
*/
@Deprecated
public <T> T newMBeanProxy(ObjectName on, Class<T> clazz) {
- on = translateServiceRefIfPossible(on, clazz, configMBeanServer);
- return JMX.newMBeanProxy(configMBeanServer, on, clazz);
+ ObjectName onObj = translateServiceRefIfPossible(on, clazz, configMBeanServer);
+ return JMX.newMBeanProxy(configMBeanServer, onObj, clazz);
}
static ObjectName translateServiceRefIfPossible(ObjectName on, Class<?> clazz, MBeanServer configMBeanServer) {
- if (ObjectNameUtil.isServiceReference(on) && clazz.equals(ServiceReferenceMXBean.class) == false) {
- ServiceReferenceMXBean proxy = JMX.newMXBeanProxy(configMBeanServer, on, ServiceReferenceMXBean.class);
- on = proxy.getCurrentImplementation();
+ ObjectName onObj = on;
+ if (ObjectNameUtil.isServiceReference(onObj) && clazz.equals(ServiceReferenceMXBean.class) == false) {
+ ServiceReferenceMXBean proxy = JMX.newMXBeanProxy(configMBeanServer, onObj, ServiceReferenceMXBean.class);
+ onObj = proxy.getCurrentImplementation();
}
- return on;
+ return onObj;
}
}
public <T> T newMXBeanProxy(ObjectName on, Class<T> clazz) {
+ ObjectName onName = on;
// if on is without transaction, add it. Reason is that when using getters on MXBeans the transaction name is stripped
- on = ObjectNameUtil.withTransactionName(on, getTransactionName());
+ onName = ObjectNameUtil.withTransactionName(onName, getTransactionName());
// if this is service reference and user requests for implementation, look it up
- on = ConfigRegistryJMXClient.translateServiceRefIfPossible(on, clazz, configMBeanServer);
- on = ObjectNameUtil.withTransactionName(on, getTransactionName());
- return JMX.newMXBeanProxy(configMBeanServer, on, clazz);
+ onName = ConfigRegistryJMXClient.translateServiceRefIfPossible(onName, clazz, configMBeanServer);
+ onName = ObjectNameUtil.withTransactionName(onName, getTransactionName());
+ return JMX.newMXBeanProxy(configMBeanServer, onName, clazz);
}
/**
@Override
public void setAttribute(ObjectName on, String attrName, Attribute attribute) {
- if (ObjectNameUtil.getTransactionName(on) == null)
+ if (ObjectNameUtil.getTransactionName(on) == null) {
throw new IllegalArgumentException("Not in transaction instance "
+ on + ", no transaction name present");
+ }
try {
configMBeanServer.setAttribute(on, attribute);
@Override
public Attribute getAttribute(ObjectName on, String attrName) {
- if (ObjectNameUtil.getTransactionName(on) == null)
+ if (ObjectNameUtil.getTransactionName(on) == null) {
throw new IllegalArgumentException("Not in transaction instance "
+ on + ", no transaction name present");
+ }
try {
return new Attribute(attrName, configMBeanServer.getAttribute(on,attrName));
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>logback-config-loader</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>logback-config</artifactId>
public class ContextSetterImpl implements ContextSetter, Closeable {
private final LogbackStatusListener statusListener;
- private static final org.slf4j.Logger classLogger = LoggerFactory.getLogger(ContextSetterImpl.class);
+ private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ContextSetterImpl.class);
public ContextSetterImpl(LogbackRuntimeRegistrator rootRuntimeBeanRegistratorWrapper) {
statusListener = new LogbackStatusListener(rootRuntimeBeanRegistratorWrapper);
Map<String, Appender<ILoggingEvent>> appendersMap = getAppenders(module, context);
for (LoggerTO logger : module.getLoggerTO()) {
- classLogger.trace("Setting configuration for logger {}", logger.getLoggerName());
+ LOGGER.trace("Setting configuration for logger {}", logger.getLoggerName());
final ch.qos.logback.classic.Logger logbackLogger = context.getLogger(logger.getLoggerName());
Optional<Set<Appender<ILoggingEvent>>> appendersBefore = getAppendersBefore(loggersBefore, logbackLogger);
- classLogger.trace("Logger {}: Appenders registered before: {}", logger.getLoggerName(),
+ LOGGER.trace("Logger {}: Appenders registered before: {}", logger.getLoggerName(),
appendersBefore.isPresent() ? appendersBefore.get() : "NO APPENDERS BEFORE");
logbackLogger.setLevel(Level.toLevel(logger.getLevel()));
for (String appenderName : logger.getAppenders()) {
if (appendersMap.containsKey(appenderName)) {
logbackLogger.addAppender(appendersMap.get(appenderName));
- classLogger.trace("Logger {}: Adding new appender: {}", logger.getLoggerName(), appenderName);
+ LOGGER.trace("Logger {}: Adding new appender: {}", logger.getLoggerName(), appenderName);
} else {
throw new IllegalStateException("No appender " + appenderName
+ " found. This error should have been discovered by validation");
for (Appender<ILoggingEvent> appenderBefore : appendersBefore.get()) {
logbackLogger.detachAppender(appenderBefore);
appenderBefore.stop();
- classLogger.trace("Logger {}: Removing old appender: {}", logger.getLoggerName(),
+ LOGGER.trace("Logger {}: Removing old appender: {}", logger.getLoggerName(),
appenderBefore.getName());
}
loggersBefore.remove(logbackLogger);
appendersBefore.add(appenderIt.next());
}
return Optional.of(appendersBefore);
- } else
+ } else {
return Optional.absent();
+ }
}
context.getLogger(Logger.ROOT_LOGGER_NAME));
for (org.slf4j.Logger log : loggersToBeAdd) {
LoggerTO logger = new LoggerTO();
- if (((Logger) log).getLevel() != null)
+ if (((Logger) log).getLevel() != null) {
logger.setLevel(((Logger) log).getLevel().levelStr);
- else
+ } else {
logger.setLevel(((Logger) log).getEffectiveLevel().levelStr);
+ }
logger.setLoggerName(log.getName());
Iterator<Appender<ILoggingEvent>> iter = ((Logger) log).iteratorForAppenders();
while (iter.hasNext()) {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-config-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-event-executor-config</artifactId>
public class ImmediateEventExecutorModuleTest extends AbstractConfigTest {
- private GlobalEventExecutorModuleFactory factory;
+ private ImmediateEventExecutorModuleFactory factory;
private final String instanceName = ImmediateEventExecutorModuleFactory.SINGLETON_NAME;
@Before
public void setUp() {
- factory = new GlobalEventExecutorModuleFactory();
+ factory = new ImmediateEventExecutorModuleFactory();
super.initConfigTransactionManagerImpl(new HardcodedModuleFactoriesResolver(mockedContext,factory));
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-threadgroup-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>netty-timer-config</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<prerequisites>
<configuration>
<outputDirectory>${project.build.directory}/jacoco</outputDirectory>
<haltOnFailure>false</haltOnFailure>
- <check>
- <classRatio>80</classRatio>
- </check>
+ <rules>
+ <rule>
+ <element>CLASS</element>
+ <excludes>
+ <exclude>*Test</exclude>
+ </excludes>
+ <limits>
+ <limit>
+ <counter>LINE</counter>
+ <value>COVEREDRATIO</value>
+ <minimum>0.50</minimum>
+ </limit>
+ </limits>
+ </rule>
+ </rules>
</configuration>
</execution>
</executions>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>shutdown-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>shutdown-impl</artifactId>
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-/**
- * Generated file
-
- * Generated from: yang module name: shutdown-impl yang module local name: shutdown
- * Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
- * Generated at: Wed Dec 18 14:02:06 CET 2013
- *
- * Do not modify this file unless it is present under src/main directory
- */
package org.opendaylight.controller.config.yang.shutdown.impl;
+import java.util.Arrays;
+import java.util.Set;
import org.opendaylight.controller.config.api.DependencyResolver;
import org.opendaylight.controller.config.api.DependencyResolverFactory;
import org.opendaylight.controller.config.api.ModuleIdentifier;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleContext;
-import java.util.Arrays;
-import java.util.Set;
-
public class ShutdownModuleFactory extends AbstractShutdownModuleFactory {
public ShutdownModule instantiateModule(String instanceName, DependencyResolver dependencyResolver,
package org.opendaylight.controller.config.yang.shutdown.impl;
import com.google.common.base.Optional;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
import org.opendaylight.controller.config.shutdown.ShutdownService;
import org.osgi.framework.Bundle;
import org.osgi.framework.BundleException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadInfo;
-
public class ShutdownServiceImpl implements ShutdownService, AutoCloseable {
private final ShutdownService impl;
private final ShutdownRuntimeRegistration registration;
}
class Impl implements ShutdownService {
- private static final Logger logger = LoggerFactory.getLogger(Impl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(Impl.class);
private final String secret;
private final Bundle systemBundle;
@Override
public void shutdown(String inputSecret, Long maxWaitTime, Optional<String> reason) {
- logger.warn("Shutdown issued with secret {} and reason {}", inputSecret, reason);
+ LOG.warn("Shutdown issued with secret {} and reason {}", inputSecret, reason);
try {
Thread.sleep(1000); // prevent brute force attack
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- logger.warn("Shutdown process interrupted", e);
+ LOG.warn("Shutdown process interrupted", e);
}
if (this.secret.equals(inputSecret)) {
- logger.info("Server is shutting down");
+ LOG.info("Server is shutting down");
// actual work:
Thread stopSystemBundleThread = new StopSystemBundleThread(systemBundle);
stopSystemBundleThread.start();
if (maxWaitTime != null && maxWaitTime > 0) {
Thread systemExitThread = new CallSystemExitThread(maxWaitTime);
- logger.debug("Scheduling {}", systemExitThread);
+ LOG.debug("Scheduling {}", systemExitThread);
systemExitThread.start();
}
// end
} else {
- logger.warn("Unauthorized attempt to shut down server");
+ LOG.warn("Unauthorized attempt to shut down server");
throw new IllegalArgumentException("Invalid secret");
}
}
}
class StopSystemBundleThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(StopSystemBundleThread.class);
+ private static final Logger LOG = LoggerFactory.getLogger(StopSystemBundleThread.class);
private final Bundle systemBundle;
StopSystemBundleThread(Bundle systemBundle) {
try {
// wait so that JMX response is received
Thread.sleep(1000);
- logger.debug("Stopping system bundle");
+ LOG.debug("Stopping system bundle");
systemBundle.stop();
} catch (BundleException e) {
- logger.warn("Can not stop OSGi server", e);
+ LOG.warn("Can not stop OSGi server", e);
} catch (InterruptedException e) {
- logger.warn("Shutdown process interrupted", e);
+ LOG.warn("Shutdown process interrupted", e);
}
}
}
class CallSystemExitThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(CallSystemExitThread.class);
+ private static final Logger LOG = LoggerFactory.getLogger(CallSystemExitThread.class);
private final long maxWaitTime;
CallSystemExitThread(long maxWaitTime) {
super("call-system-exit-daemon");
try {
// wait specified time
Thread.sleep(maxWaitTime);
- logger.error("Since some threads are still running, server is going to shut down via System.exit(1) !");
+ LOG.error("Since some threads are still running, server is going to shut down via System.exit(1) !");
// do a thread dump
ThreadInfo[] threads = ManagementFactory.getThreadMXBean().dumpAllThreads(true, true);
StringBuffer sb = new StringBuffer();
sb.append(info);
sb.append("\n");
}
- logger.warn("Thread dump:{}", sb);
+ LOG.warn("Thread dump:{}", sb);
System.exit(1);
} catch (InterruptedException e) {
- logger.warn("Interrupted, not going to call System.exit(1)");
+ LOG.warn("Interrupted, not going to call System.exit(1)");
}
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>threadpool-config-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>threadpool-config-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>yang-jmx-generator-it</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>yang-jmx-generator-plugin</artifactId>
final class CodeWriter {
- private static final Logger logger = LoggerFactory.getLogger(CodeWriter.class);
- private static final Optional<String> copyright = StringUtil.loadCopyright();
+ private static final Logger LOGGER = LoggerFactory.getLogger(CodeWriter.class);
+ private static final Optional<String> COPYRIGHT = StringUtil.loadCopyright();
public File writeSie(ServiceInterfaceEntry sie, File outputBaseDir) {
try {
GeneralInterfaceTemplate generalInterfaceTemplate = TemplateFactory.serviceInterfaceFromSie(sie);
- GeneratedObject go = new GenericGeneratedObjectFactory().toGeneratedObject(generalInterfaceTemplate, copyright);
+ GeneratedObject go = new GenericGeneratedObjectFactory().toGeneratedObject(generalInterfaceTemplate, COPYRIGHT);
return go.persist(outputBaseDir).get().getValue();
} catch (Exception e) {
String message = "An error occurred during Service interface generating, sie:"
+ sie.getTypeName() + ", " + sie.getFullyQualifiedName();
- logger.error(message, e);
+ LOGGER.error(message, e);
throw new RuntimeException(message, e);
}
}
// TOs
Map<String,GeneralClassTemplate> tosFromMbe = TemplateFactory.tOsFromMbe(mbe);
for(GeneralClassTemplate template: tosFromMbe.values()) {
- gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(template, copyright), true);
+ gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(template, COPYRIGHT), true);
}
// MXBean interface
GeneralInterfaceTemplate ifcTemplate = TemplateFactory.mXBeanInterfaceTemplateFromMbe(mbe);
- gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(ifcTemplate, copyright), true);
+ gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(ifcTemplate, COPYRIGHT), true);
// generate abstract factory
- gos.put(new AbsFactoryGeneratedObjectFactory().toGeneratedObject(mbe, copyright), true);
+ gos.put(new AbsFactoryGeneratedObjectFactory().toGeneratedObject(mbe, COPYRIGHT), true);
// generate abstract module
- gos.put(new AbsModuleGeneratedObjectFactory().toGeneratedObject(mbe, copyright), true);
+ gos.put(new AbsModuleGeneratedObjectFactory().toGeneratedObject(mbe, COPYRIGHT), true);
// generate concrete factory
StubFactoryTemplate concreteFactory = TemplateFactory.stubFactoryTemplateFromMbe(mbe);
- gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(concreteFactory, copyright), false);
+ gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(concreteFactory, COPYRIGHT), false);
// generate concrete module
- gos.put(new ConcreteModuleGeneratedObjectFactory().toGeneratedObject(mbe, copyright, Optional.<String>absent()), false);
+ gos.put(new ConcreteModuleGeneratedObjectFactory().toGeneratedObject(mbe, COPYRIGHT, Optional.<String>absent()), false);
// write runtime bean MXBeans and registrators
List<FtlTemplate> allFtlFiles = getRuntimeBeanFtlTemplates(mbe.getRuntimeBeans());
for(FtlTemplate template: allFtlFiles) {
- gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(template, copyright), true);
+ gos.put(new GenericGeneratedObjectFactory().toGeneratedObject(template, COPYRIGHT), true);
}
generatedFiles.addAll(persistGeneratedObjects(targetBaseDir, mainBaseDir, gos));
} catch (Exception e) {
String message = "An error occurred during Module generating, mbe:"
+ mbe.getJavaNamePrefix();
- logger.error(message, e);
+ LOGGER.error(message, e);
throw new RuntimeException(message, e);
}
}
private PackageTranslator packageTranslator;
private final CodeWriter codeWriter;
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(JMXGenerator.class);
private Map<String, String> namespaceToPackageMapping;
private File resourceBaseDir;
packageTranslator = new PackageTranslator(namespaceToPackageMapping);
- if (!outputBaseDir.exists())
+ if (!outputBaseDir.exists()) {
outputBaseDir.mkdirs();
+ }
GeneratedFilesTracker generatedFiles = new GeneratedFilesTracker();
// create SIE structure qNamesToSIEs
Preconditions.checkNotNull(resourceBaseDir,
"resource base dir attribute was null");
- StringBuffer fullyQualifiedNamesOfFactories = new StringBuffer();
+ StringBuilder fullyQualifiedNamesOfFactories = new StringBuilder();
// create MBEs
for (Module module : yangModulesInCurrentMavenModule) {
String packageName = packageTranslator.getPackageName(module);
fullyQualifiedNamesOfFactories.toString());
} catch (IOException e) {
String message = "Cannot write to " + serviceLoaderFile;
- logger.error(message);
+ LOGGER.error(message);
throw new RuntimeException(message, e);
}
}
@Override
public void setAdditionalConfig(Map<String, String> additionalCfg) {
- if (logger != null)
- logger.debug(getClass().getCanonicalName(),
+ if (LOGGER != null) {
+ LOGGER.debug(getClass().getCanonicalName(),
": Additional configuration received: ",
additionalCfg.toString());
+ }
this.namespaceToPackageMapping = extractNamespaceMapping(additionalCfg);
this.generateModuleFactoryFile = extractModuleFactoryBoolean(additionalCfg);
}
private boolean extractModuleFactoryBoolean(
Map<String, String> additionalCfg) {
String bool = additionalCfg.get(MODULE_FACTORY_FILE_BOOLEAN);
- if (bool == null)
+ if (bool == null) {
return true;
- if (bool.equals("false"))
+ }
+ if ("false".equals(bool)) {
return false;
+ }
return true;
}
public void setMavenProject(MavenProject project) {
this.projectBaseDir = project.getBasedir();
- if (logger != null)
- logger.debug(getClass().getCanonicalName(), " project base dir: ",
+ if (LOGGER != null)
+ LOGGER.debug(getClass().getCanonicalName(), " project base dir: ",
projectBaseDir);
}
}
}
if (undeletedFiles.isEmpty() == false) {
- logger.error(
+ LOGGER.error(
"Illegal state occurred: Unable to delete already generated files, undeleted files: {}",
undeletedFiles);
}
public class AbstractFactoryTemplate extends GeneralClassTemplate {
- private static final List<String> implementedIfcs = Lists
+ private static final List<String> IMPLEMENTED_IFCS = Lists
.newArrayList(ModuleFactory.class.getCanonicalName());
public AbstractFactoryTemplate(Header header, String packageName,
String abstractFactoryName,
List<Field> fields) {
super(header, packageName, abstractFactoryName, Collections
- .<String> emptyList(), implementedIfcs, fields, Collections
+ .<String> emptyList(), IMPLEMENTED_IFCS, fields, Collections
.<MethodDefinition> emptyList(), true, false, Collections
.<Constructor> emptyList());
}
.getFullyQualifiedName(rootRB.getPackageName(), rootRB.getJavaNameOfRuntimeMXBean());
String childRegistratorFQN = rootFtlFile.getFullyQualifiedName();
Field rbParameter = new Field(fullyQualifiedNameOfMXBean, "rb");
- StringBuffer registerBody = new StringBuffer();
+ StringBuilder registerBody = new StringBuilder();
registerBody.append(format("%s %s = this.%s.registerRoot(%s);\n",
HierarchicalRuntimeBeanRegistration.class
.getCanonicalName(), hierachchicalRegistration
unorderedResult.put(entry.getKey(), entry.getValue());
}
- if (childRegistratorMap.size() > 0) {
+ if (!childRegistratorMap.isEmpty()) {
// first entry is the direct descendant according to the create
// contract
RuntimeRegistratorFtlTemplate childRegistrator = childRegistratorMap
.values().iterator().next();
- StringBuffer body = new StringBuffer();
+ StringBuilder body = new StringBuilder();
String key, value;
key = child.getJavaNamePrefix();
body.append(format(
static String serializeType(Type type, boolean addWildcards) {
if (type instanceof ParameterizedType){
ParameterizedType parameterizedType = (ParameterizedType) type;
- StringBuffer sb = new StringBuffer();
+ StringBuilder sb = new StringBuilder();
sb.append(parameterizedType.getRawType().getFullyQualifiedName());
sb.append(addWildcards ? "<? extends " : "<");
boolean first = true;
for (Rpc rpc : rbe.getRpcs()) {
AttributeIfc returnType = rpc.getReturnType();
- if (returnType == VoidAttribute.getInstance())
+ if (returnType == VoidAttribute.getInstance()) {
continue;
- if (returnType instanceof JavaAttribute)
+ }
+ if (returnType instanceof JavaAttribute) {
continue;
- if (returnType instanceof ListAttribute && returnType.getOpenType() instanceof SimpleType)
+ }
+ if (returnType instanceof ListAttribute && returnType.getOpenType() instanceof SimpleType) {
continue;
+ }
Preconditions.checkState(yangPropertiesToTypesMap.containsKey(returnType.getAttributeYangName()) == false,
"Duplicate TO %s for %s", returnType.getAttributeYangName(), rbe);
String returnType;
AttributeIfc attributeIfc = attrEntry.getValue();
- boolean isIdentityRef = false;
if (attributeIfc instanceof TypedAttribute) {
TypedAttribute typedAttribute = (TypedAttribute) attributeIfc;
returnType = serializeType(typedAttribute.getType());
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
public class ConfigConstants {
+ private ConfigConstants() {
+ }
+
public static final String CONFIG_NAMESPACE = "urn:opendaylight:params:xml:ns:yang:controller:config";
public static final String CONFIG_MODULE = "config";
return this;
}
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(ModuleMXBeanEntryBuilder.class);
// TODO: the XPath should be parsed by code generator IMO
private static final String MAGIC_STRING = "MAGIC_STRING";
private static final String MODULE_CONDITION_XPATH_TEMPLATE = "^/MAGIC_STRING:modules/MAGIC_STRING:module/MAGIC_STRING:type\\s*=\\s*['\"](.+)['\"]$";
- private static final SchemaPath expectedConfigurationAugmentationSchemaPath = SchemaPath.create(true,
+ private static final SchemaPath EXPECTED_CONFIGURATION_AUGMENTATION_SCHEMA_PATH = SchemaPath.create(true,
createConfigQName("modules"), createConfigQName("module"), createConfigQName("configuration"));
- private static final SchemaPath expectedStateAugmentationSchemaPath = SchemaPath.create(true,
+ private static final SchemaPath EXPECTED_STATE_AUGMENTATION_SCHEMA_PATH = SchemaPath.create(true,
createConfigQName("modules"), createConfigQName("module"), createConfigQName("state"));
private static final Pattern PREFIX_COLON_LOCAL_NAME = Pattern
.compile("^(.+):(.+)$");
public Map<String, ModuleMXBeanEntry> build() {
- logger.debug("Generating ModuleMXBeans of {} to package {}",
+ LOGGER.debug("Generating ModuleMXBeans of {} to package {}",
currentModule.getNamespace(), packageName);
String configModulePrefix;
checkAttributeNamesUniqueness(uniqueGeneratedClassesNames, result);
checkUnaugumentedIdentities(unaugmentedModuleIdentities);
- logger.debug("Number of ModuleMXBeans to be generated: {}", result.size());
+ LOGGER.debug("Number of ModuleMXBeans to be generated: {}", result.size());
return result;
}
private static void checkUnaugumentedIdentities(final Map<String, IdentitySchemaNode> unaugmentedModuleIdentities) {
if (unaugmentedModuleIdentities.size() > 0) {
- logger.warn("Augmentation not found for all currentModule identities: {}",
+ LOGGER.warn("Augmentation not found for all currentModule identities: {}",
unaugmentedModuleIdentities.keySet());
}
}
+ identityLocalName);
} else {
moduleIdentities.put(identityLocalName, id);
- logger.debug("Found identity {}", identityLocalName);
+ LOGGER.debug("Found identity {}", identityLocalName);
}
// validation check on unknown schema nodes
boolean providedServiceWasSet = false;
HAS_CHILDREN_AND_QNAME dataNodeContainer = getDataNodeContainer(choiceCaseNode);
- if (expectedConfigurationAugmentationSchemaPath.equals(augmentation.getTargetPath())) {
- logger.debug("Parsing configuration of {}", moduleLocalNameFromXPath);
+ if (EXPECTED_CONFIGURATION_AUGMENTATION_SCHEMA_PATH.equals(augmentation.getTargetPath())) {
+ LOGGER.debug("Parsing configuration of {}", moduleLocalNameFromXPath);
yangToAttributes = fillConfiguration(dataNodeContainer, currentModule, typeProviderWrapper, qNamesToSIEs,
schemaContext, packageName);
checkUniqueAttributesWithGeneratedClass(uniqueGeneratedClassesNames, when.getQName(), yangToAttributes);
- } else if (expectedStateAugmentationSchemaPath.equals(augmentation.getTargetPath())) {
- logger.debug("Parsing state of {}", moduleLocalNameFromXPath);
+ } else if (EXPECTED_STATE_AUGMENTATION_SCHEMA_PATH.equals(augmentation.getTargetPath())) {
+ LOGGER.debug("Parsing state of {}", moduleLocalNameFromXPath);
try {
runtimeBeans = fillRuntimeBeans(dataNodeContainer, currentModule, typeProviderWrapper, packageName,
moduleLocalNameFromXPath, javaNamePrefix);
public class ModuleUtil {
+ private ModuleUtil() {
+ }
+
public static QName getQName(final Module currentModule) {
return QName.create(currentModule.getNamespace(), currentModule.getRevision(), currentModule.getName());
}
// TODO add to PackageTranslator
private static String sanitizePackage(String namespace) {
- namespace = namespace.replace("://", ".");
- namespace = namespace.replace("/", ".");
- namespace = namespace.replace(":", ".");
- namespace = namespace.replace("-", "_");
- namespace = namespace.replace("@", ".");
- namespace = namespace.replace("$", ".");
- namespace = namespace.replace("#", ".");
- namespace = namespace.replace("'", ".");
- namespace = namespace.replace("*", ".");
- namespace = namespace.replace("+", ".");
- namespace = namespace.replace(",", ".");
- namespace = namespace.replace(";", ".");
- namespace = namespace.replace("=", ".");
- return namespace;
+ String newNamespace = namespace;
+ newNamespace = newNamespace.replace("://", ".");
+ newNamespace = newNamespace.replace("/", ".");
+ newNamespace = newNamespace.replace(":", ".");
+ newNamespace = newNamespace.replace("-", "_");
+ newNamespace = newNamespace.replace("@", ".");
+ newNamespace = newNamespace.replace("$", ".");
+ newNamespace = newNamespace.replace("#", ".");
+ newNamespace = newNamespace.replace("'", ".");
+ newNamespace = newNamespace.replace("*", ".");
+ newNamespace = newNamespace.replace("+", ".");
+ newNamespace = newNamespace.replace(",", ".");
+ newNamespace = newNamespace.replace(";", ".");
+ newNamespace = newNamespace.replace("=", ".");
+ return newNamespace;
}
}
final Map<QName, Set<RpcDefinition>> identitiesToRpcs) {
List<AttributeIfc> attributes = Lists.newArrayList();
- // List<JavaAttribute> javaAttributes = new ArrayList<>();
- // List<TOAttribute> toAttributes = new ArrayList<>();
List<RuntimeBeanEntry> runtimeBeanEntries = new ArrayList<>();
for (DataSchemaNode child : subtree.getChildNodes()) {
// child leaves can be java attributes, TO attributes, or child
.findJavaParameter(rpcDefinition);
AttributeIfc returnType;
if (rpcDefinition.getOutput() == null
- || rpcDefinition.getOutput().getChildNodes().size() == 0) {
+ || rpcDefinition.getOutput().getChildNodes().isEmpty()) {
returnType = VoidAttribute.getInstance();
} else if (rpcDefinition.getOutput().getChildNodes().size() == 1) {
DataSchemaNode returnDSN = rpcDefinition.getOutput()
currentModule, identitiesToRpcs);
Optional<String> keyYangName;
- if (listSchemaNode.getKeyDefinition().size() == 0) {
+ if (listSchemaNode.getKeyDefinition().isEmpty()) {
keyYangName = Optional.absent();
} else if (listSchemaNode.getKeyDefinition().size() == 1) {
// key must be either null or one of supported key types
* </p>
*/
public class ServiceInterfaceEntry extends AbstractEntry {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(ServiceInterfaceEntry.class);
private static final String CLASS_NAME_SUFFIX = "ServiceInterface";
*/
public static Map<QName, ServiceInterfaceEntry> create(Module currentModule,
String packageName,Map<IdentitySchemaNode, ServiceInterfaceEntry> definedSEItracker) {
- logger.debug("Generating ServiceInterfaces from {} to package {}",
+ LOGGER.debug("Generating ServiceInterfaces from {} to package {}",
currentModule.getNamespace(), packageName);
Map<IdentitySchemaNode, ServiceInterfaceEntry> identitiesToSIs = new HashMap<>();
Set<IdentitySchemaNode> notVisited = new HashSet<>(
currentModule.getIdentities());
int lastSize = notVisited.size() + 1;
- while (notVisited.size() > 0) {
+ while (!notVisited.isEmpty()) {
if (notVisited.size() == lastSize) {
- logger.debug(
+ LOGGER.debug(
"Following identities will be ignored while generating ServiceInterfaces, as they are not derived from {} : {}",
SERVICE_TYPE_Q_NAME, notVisited);
break;
for (ServiceInterfaceEntry sie : identitiesToSIs.values()) {
resultMap.put(sie.getQName(), sie);
}
- logger.debug("Number of ServiceInterfaces to be generated: {}",
+ LOGGER.debug("Number of ServiceInterfaces to be generated: {}",
resultMap.size());
return resultMap;
}
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (o == null || getClass() != o.getClass())
+ }
+ if (o == null || getClass() != o.getClass()) {
return false;
+ }
ServiceInterfaceEntry that = (ServiceInterfaceEntry) o;
- if (!maybeBaseCache.equals(that.maybeBaseCache))
+ if (!maybeBaseCache.equals(that.maybeBaseCache)) {
return false;
- if (!nullableDescription.equals(that.nullableDescription))
+ }
+ if (!nullableDescription.equals(that.nullableDescription)) {
return false;
- if (!exportedOsgiClassName.equals(that.exportedOsgiClassName))
+ }
+ if (!exportedOsgiClassName.equals(that.exportedOsgiClassName)) {
return false;
- if (!qName.equals(that.qName))
+ }
+ if (!qName.equals(that.qName)) {
return false;
- if (!packageName.equals(that.packageName))
+ }
+ if (!packageName.equals(that.packageName)) {
return false;
- if (!typeName.equals(that.typeName))
+ }
+ if (!typeName.equals(that.typeName)) {
return false;
+ }
return true;
}
try {
javaType = typeProvider.javaTypeForSchemaDefinitionType(
type, leaf);
- if (javaType == null)
+ if (javaType == null) {
throw new IllegalArgumentException("Unknown type received for "
+ leaf.toString());
+ }
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Error while resolving type of "
+ leaf, e);
try {
javaType = typeProvider.javaTypeForSchemaDefinitionType(
leaf.getType(), leaf);
- if (javaType == null)
+ if (javaType == null) {
throw new IllegalArgumentException(
"Unknown type received for " + leaf.toString());
+ }
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Error while resolving type of "
+ leaf, e);
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (!(o instanceof AbstractAttribute))
+ }
+ if (!(o instanceof AbstractAttribute)) {
return false;
+ }
AbstractAttribute that = (AbstractAttribute) o;
if (attributeYangName != null ? !attributeYangName
.equals(that.attributeYangName)
- : that.attributeYangName != null)
+ : that.attributeYangName != null) {
return false;
+ }
return true;
}
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (o == null || getClass() != o.getClass())
+ }
+ if (o == null || getClass() != o.getClass()) {
return false;
- if (!super.equals(o))
+ }
+ if (!super.equals(o)) {
return false;
+ }
AbstractDependencyAttribute that = (AbstractDependencyAttribute) o;
if (dependency != null ? !dependency.equals(that.dependency)
- : that.dependency != null)
+ : that.dependency != null) {
return false;
+ }
if (nullableDefault != null ? !nullableDefault
- .equals(that.nullableDefault) : that.nullableDefault != null)
+ .equals(that.nullableDefault) : that.nullableDefault != null) {
return false;
+ }
if (nullableDescription != null ? !nullableDescription
.equals(that.nullableDescription)
- : that.nullableDescription != null)
+ : that.nullableDescription != null) {
return false;
+ }
return true;
}
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (o == null || getClass() != o.getClass())
+ }
+ if (o == null || getClass() != o.getClass()) {
return false;
- if (!super.equals(o))
+ }
+ if (!super.equals(o)) {
return false;
+ }
JavaAttribute that = (JavaAttribute) o;
if (nullableDefault != null ? !nullableDefault
- .equals(that.nullableDefault) : that.nullableDefault != null)
+ .equals(that.nullableDefault) : that.nullableDefault != null) {
return false;
+ }
if (nullableDescription != null ? !nullableDescription
.equals(that.nullableDescription)
- : that.nullableDescription != null)
+ : that.nullableDescription != null) {
return false;
- if (type != null ? !type.equals(that.type) : that.type != null)
+ }
+ if (type != null ? !type.equals(that.type) : that.type != null) {
return false;
+ }
return true;
}
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (o == null || getClass() != o.getClass())
+ }
+ if (o == null || getClass() != o.getClass()) {
return false;
- if (!super.equals(o))
+ }
+ if (!super.equals(o)) {
return false;
+ }
ListAttribute that = (ListAttribute) o;
if (nullableDefault != null ? !nullableDefault
- .equals(that.nullableDefault) : that.nullableDefault != null)
+ .equals(that.nullableDefault) : that.nullableDefault != null) {
return false;
+ }
if (nullableDescription != null ? !nullableDescription
.equals(that.nullableDescription)
- : that.nullableDescription != null)
+ : that.nullableDescription != null) {
return false;
+ }
return true;
}
public class SimpleTypeResolver {
+ private SimpleTypeResolver() {
+ }
+
public static SimpleType<?> getSimpleType(Type type) {
SimpleType<?> expectedSimpleType = JAVA_TYPE_TO_SIMPLE_TYPE.get(type
.getFullyQualifiedName());
TypeProviderWrapper typeProviderWrapper, String packageName) {
Class<? extends DataSchemaNode> type = isAllowedType(dataSchemaNode);
- if (type.equals(LeafSchemaNode.class))
+ if (type.equals(LeafSchemaNode.class)) {
return new JavaAttribute((LeafSchemaNode) dataSchemaNode,
typeProviderWrapper);
- else if (type.equals(ListSchemaNode.class))
+ } else if (type.equals(ListSchemaNode.class)) {
return ListAttribute.create((ListSchemaNode) dataSchemaNode,
typeProviderWrapper, packageName);
- else if (type.equals(LeafListSchemaNode.class))
+ } else if (type.equals(LeafListSchemaNode.class)) {
return ListAttribute.create((LeafListSchemaNode) dataSchemaNode,
typeProviderWrapper);
- else if (type.equals(ContainerSchemaNode.class))
+ } else if (type.equals(ContainerSchemaNode.class)) {
return TOAttribute.create((ContainerSchemaNode) dataSchemaNode,
typeProviderWrapper, packageName);
+ }
throw new IllegalStateException("This should never happen");
}
private static Class<? extends DataSchemaNode> isAllowedType(
DataSchemaNode dataSchemaNode) {
for (Class<? extends DataSchemaNode> allowedType : ALLOWED_CHILDREN) {
- if (allowedType.isAssignableFrom(dataSchemaNode.getClass()) == true)
+ if (allowedType.isAssignableFrom(dataSchemaNode.getClass()) == true) {
return allowedType;
+ }
}
throw new IllegalArgumentException("Illegal child node for TO: "
+ dataSchemaNode.getClass() + " allowed node types: "
@Override
public boolean equals(Object o) {
- if (this == o)
+ if (this == o) {
return true;
- if (o == null || getClass() != o.getClass())
+ }
+ if (o == null || getClass() != o.getClass()) {
return false;
- if (!super.equals(o))
+ }
+ if (!super.equals(o)) {
return false;
+ }
TOAttribute that = (TOAttribute) o;
if (nullableDefault != null ? !nullableDefault
- .equals(that.nullableDefault) : that.nullableDefault != null)
+ .equals(that.nullableDefault) : that.nullableDefault != null) {
return false;
+ }
if (nullableDescription != null ? !nullableDescription
.equals(that.nullableDescription)
- : that.nullableDescription != null)
+ : that.nullableDescription != null) {
return false;
+ }
if (yangNameToAttributeMap != null ? !yangNameToAttributeMap
.equals(that.yangNameToAttributeMap)
- : that.yangNameToAttributeMap != null)
+ : that.yangNameToAttributeMap != null) {
return false;
+ }
return true;
}
final class Util {
+ private Util() {
+ }
/**
* Used for date <-> xml serialization
*/
- private static final SimpleDateFormat dateFormat = new SimpleDateFormat(
+ private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat(
"yyyy-MM-dd");
public static String writeDate(Date date) {
- return dateFormat.format(date);
+ return DATE_FORMAT.format(date);
}
public static Date readDate(String s) throws ParseException {
- return dateFormat.parse(s);
+ return DATE_FORMAT.parse(s);
}
}
package org.opendaylight.controller.config.yangjmxgenerator.plugin.util;
public class FullyQualifiedNameHelper {
+
+ private FullyQualifiedNameHelper() {
+ }
+
public static String getFullyQualifiedName(String packageName,
String className) {
if (packageName.isEmpty())
import java.util.Map;
import java.util.Set;
+import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.controller.config.yangjmxgenerator.plugin.util.NameConflictException;
import org.opendaylight.controller.config.yangjmxgenerator.plugin.util.YangModelSearchUtils;
Map<File, String> testedFilesToYangModules = new HashMap<>();
Map<String, String> testedYangModulesToExpectedConflictingName = new HashMap<>();
+ @Ignore
@Test
public void testNameConflicts() throws Exception {
prepareSamples();
}
- leaf simpleInt {
+ leaf simpleInt1 {
type uint32;
default 99L;
}
}
}
- leaf simpleInt {
+ leaf simpleInt2 {
type uint32;
}
case impl-netconf {
when "/config:modules/config:module/config:type = 'impl-netconf'";
// root runtime bean
- leaf created-sessions {
+ leaf created-sessions-1 {
type uint32;
}
case netconf {
when "/config:modules/config:module/config:type = 'netconf'";
// root runtime bean
- leaf created-sessions {
+ leaf created-sessions2 {
type uint32;
}
case netconf {
when "/config:modules/config:module/config:type = 'netconf'";
- container dto-a {
+ container dto-a2 {
leaf simple-arg {
type uint32;
}
case netconf {
when "/config:modules/config:module/config:type = 'netconf'";
// root runtime bean
- leaf created-sessions {
+ leaf created-sessions2 {
type uint32;
}
case netconf {
when "/config:modules/config:module/config:type = 'netconf'";
// root runtime bean
- leaf created-sessions {
+ leaf created-sessions2 {
type uint32;
}
case netconf1 {
when "/config:modules/config:module/config:type = 'netconf1'";
// root runtime bean
- leaf created-sessions {
+ leaf created-sessions2 {
type uint32;
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<artifactId>yang-test-plugin</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>config-plugin-parent</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../config-plugin-parent</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration</artifactId>
- <version>0.4.3-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>configuration.implementation</artifactId>
- <version>0.4.3-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>configuration.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager.implementation</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.implementation</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.it.implementation</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.shell</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>opendaylight-karaf-empty</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>opendaylight-karaf-resources</artifactId>
--- /dev/null
+@echo off
+rem
+rem
+rem Licensed to the Apache Software Foundation (ASF) under one or more
+rem contributor license agreements. See the NOTICE file distributed with
+rem this work for additional information regarding copyright ownership.
+rem The ASF licenses this file to You under the Apache License, Version 2.0
+rem (the "License"); you may not use this file except in compliance with
+rem the License. You may obtain a copy of the License at
+rem
+rem http://www.apache.org/licenses/LICENSE-2.0
+rem
+rem Unless required by applicable law or agreed to in writing, software
+rem distributed under the License is distributed on an "AS IS" BASIS,
+rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+rem See the License for the specific language governing permissions and
+rem limitations under the License.
+rem
+
+rem
+rem handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+rem script; for example karaf.bat, start.bat, stop.bat, admin.bat, client.bat, ...
+rem
+rem if "%KARAF_SCRIPT%" == "SCRIPT_NAME" (
+rem Actions go here...
+rem )
+
+rem
+rem general settings which should be applied for all scripts go here; please keep
+rem in mind that it is possible that scripts might be executed more than once, e.g.
+rem in example of the start script where the start script is executed first and the
+rem karaf script afterwards.
+rem
+
+rem
+rem The following section shows the possible configuration options for the default
+rem karaf scripts
+rem
+rem Window name of the windows console
+rem SET KARAF_TITLE
+rem Location of Java installation
+rem SET JAVA_HOME
+rem Minimum memory for the JVM
+rem SET JAVA_MIN_MEM
+rem Maximum memory for the JVM
+rem SET JAVA_MAX_MEM
+rem Minimum perm memory for the JVM
+rem SET JAVA_PERM_MEM
+rem Maximum perm memory for the JVM
+rem SET JAVA_MAX_PERM_MEM
+rem Karaf home folder
+rem SET KARAF_HOME
+rem Karaf data folder
+rem SET KARAF_DATA
+rem Karaf base folder
+rem SET KARAF_BASE
+rem Karaf etc folder
+rem SET KARAF_ETC
+rem Additional available Karaf options
+rem SET KARAF_OPTS
+rem Enable debug mode
+rem SET KARAF_DEBUG
+IF "%JAVA_MAX_PERM_MEM%"=="" SET JAVA_MAX_PERM_MEM=512m
+IF "%JAVA_MAX_MEM%"=="" SET JAVA_MAX_MEM=2048m
# default Openflow version = 1.0, we also support 1.3.
# ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is
+# disabled by default.
+# ovsdb.l3.fwd.enabled=yes
+
# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
# specific arp/neighDiscovery lookup.
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>distribution.opendaylight-karaf</artifactId>
<artifactId>features-base</artifactId>
<classifier>features</classifier>
<type>xml</type>
+ <scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-adsal</artifactId>
<classifier>features</classifier>
<type>xml</type>
+ <scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-nsf</artifactId>
<classifier>features</classifier>
<type>xml</type>
+ <scope>runtime</scope>
</dependency>
<!-- MD-SAL Related Features -->
<dependency>
<artifactId>features-mdsal</artifactId>
<classifier>features</classifier>
<type>xml</type>
+ <scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>features-flow</artifactId>
<classifier>features</classifier>
<type>xml</type>
+ <scope>runtime</scope>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-restconf</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ <!-- Netconf connector features. When this is included, users can test the netconf connector using netconf-testtool -->
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>features-netconf-connector</artifactId>
+ <classifier>features</classifier>
+ <type>xml</type>
+ </dependency>
+
</dependencies>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>distribution.opendaylight</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<dependency>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-topology</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<!-- toaster example I'm pretty sure we should trim -->
<artifactId>sample-toaster-provider</artifactId>
<version>${mdsal.version}</version>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-generator-api</artifactId>
- <version>${yangtools.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-model-api</artifactId>
- <version>${yangtools.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-config</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlets</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-client</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-continuation</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-util</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-server</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-io</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-http</artifactId>
+ <version>8.1.14.v20131031</version>
+ </dependency>
</dependencies>
</profile>
<profile>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>swagger-ui</artifactId>
- <version>0.0.1-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
</dependency>
</dependencies>
</profile>
# default Openflow version = 1.3, we also support 1.0.
ovsdb.of.version=1.3
+# ovsdb can be configured with ml2 to perform l3 forwarding. The config below enables that functionality, which is
+# disabled by default.
+# ovsdb.l3.fwd.enabled=yes
+
# ovsdb can be configured with ml2 to perform l3 forwarding. When used in that scenario, the mac address of the default
# gateway --on the external subnet-- is expected to be resolved from its inet address. The config below overrides that
# specific arp/neighDiscovery lookup.
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.0-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>distribution.p2site</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>sanitytest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>dummy-console</artifactId>
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>Dummy Console Interfaces for Equinox-specific CLI</description>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwarding.staticrouting</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager</artifactId>
- <version>0.6.0-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwardingrulesmanager.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>forwardingrulesmanager.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.implementation</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>hosttracker.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.shell</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker_new.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
<groupId>org.opendaylight.controller</groupId>
- <artifactId>karaf.branding</artifactId>
- <version>1.0.0-SNAPSHOT</version>
- <packaging>bundle</packaging>
- <name>OpenDaylight :: Karaf :: Branding</name>
+ <artifactId>releasepom</artifactId>
+ <version>0.2.0-SNAPSHOT</version>
+ <relativePath>../..</relativePath>
+ </parent>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>karaf.branding</artifactId>
+ <version>1.1.0-SNAPSHOT</version>
+ <packaging>bundle</packaging>
+ <name>OpenDaylight :: Karaf :: Branding</name>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <version>2.4.0</version>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
- <Import-Package>*</Import-Package>
- <Private-Package>!*</Private-Package>
- <Export-Package>
- org.apache.karaf.branding
- </Export-Package>
- <Spring-Context>*;public-context:=false</Spring-Context>
- </instructions>
- </configuration>
- </plugin>
- </plugins>
- </build>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.felix</groupId>
+ <artifactId>maven-bundle-plugin</artifactId>
+ <version>2.4.0</version>
+ <extensions>true</extensions>
+ <configuration>
+ <instructions>
+ <Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
+ <Import-Package>*</Import-Package>
+ <Private-Package>!*</Private-Package>
+ <Export-Package>org.apache.karaf.branding</Export-Package>
+ <Spring-Context>*;public-context:=false</Spring-Context>
+ </instructions>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
</project>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>karaf-tomcat-security</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>logging.bridge</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<artifactId>sal-parent</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>compatibility-parent</artifactId>
<packaging>pom</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>compatibility-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-compatibility</artifactId>
<packaging>bundle</packaging>
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+public abstract class AbstractDataChangeListener <T extends DataObject> implements AutoCloseable,DataChangeListener{
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractDataChangeListener.class);
+ protected InventoryAndReadAdapter adapter;
+ protected final Class<T> clazz;
+ protected ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ public AbstractDataChangeListener(final InventoryAndReadAdapter adapter, DataBroker db, final Class<T> clazz) {
+ this.adapter = Preconditions.checkNotNull(adapter, "InventoryAndReadAdapter can not be null!");
+ this.clazz = Preconditions.checkNotNull(clazz, "Class can not be null!");
+ Preconditions.checkNotNull(db, "DataBroker can not be null!");
+ registrationListener(db, 5);
+ }
+
+ @Override
+ public void onDataChanged(AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /* All DataObjects for create */
+ final Map<InstanceIdentifier<?>, DataObject> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+ /* All DataObjects for remove */
+ final Set<InstanceIdentifier<?>> removeData = changeEvent.getRemovedPaths() != null
+ ? changeEvent.getRemovedPaths() : Collections.<InstanceIdentifier<?>> emptySet();
+ /* All DataObjects for updates */
+ final Map<InstanceIdentifier<?>, DataObject> updateData = changeEvent.getUpdatedData() != null
+ ? changeEvent.getUpdatedData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+ /* All Original DataObjects */
+ final Map<InstanceIdentifier<?>, DataObject> originalData = changeEvent.getOriginalData() != null
+ ? changeEvent.getOriginalData() : Collections.<InstanceIdentifier<?>, DataObject> emptyMap();
+ this.createData(createdData);
+ this.updateData(updateData, originalData);
+ this.removeData(removeData, originalData);
+ }
+
+ @SuppressWarnings("unchecked")
+ private void createData(final Map<InstanceIdentifier<?>, DataObject> createdData) {
+ final Set<InstanceIdentifier<?>> keys = createdData.keySet() != null
+ ? createdData.keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+ for (InstanceIdentifier<?> key : keys) {
+ if (clazz.equals(key.getTargetType())) {
+ InstanceIdentifier<T> createKeyIdent = key.firstIdentifierOf(clazz);
+ final Optional<DataObject> value = Optional.of(createdData.get(key));
+ if (value.isPresent()) {
+ this.add(createKeyIdent, (T)value.get());
+ }
+ }
+ }
+ }
+
+ abstract protected void add(InstanceIdentifier<T> createKeyIdent, T node);
+
+ @SuppressWarnings("unchecked")
+ private void updateData(final Map<InstanceIdentifier<?>, DataObject> updateData, final Map<InstanceIdentifier<?>, DataObject> originalData) {
+
+ final Set<InstanceIdentifier<?>> keys = updateData.keySet() != null
+ ? updateData.keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+ for (InstanceIdentifier<?> key : keys) {
+ if (clazz.equals(key.getTargetType())) {
+ InstanceIdentifier<T> updateKeyIdent = key.firstIdentifierOf(clazz);
+ final Optional<DataObject> value = Optional.of(updateData.get(key));
+ final Optional<DataObject> original = Optional.of(originalData.get(key));
+ if (value.isPresent() && original.isPresent()) {
+ this.update(updateKeyIdent, (T)original.get(), (T)value.get());
+ }
+ }
+ }
+ }
+
+ abstract protected void update(InstanceIdentifier<T> updateKeyIdent, T node,
+ T node2);
+
+ @SuppressWarnings("unchecked")
+ private void removeData(final Set<InstanceIdentifier<?>> removeData, final Map<InstanceIdentifier<?>, DataObject> originalData) {
+
+ for (InstanceIdentifier<?> key : removeData) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<T> ident = key.firstIdentifierOf(clazz);
+ final DataObject removeValue = originalData.get(key);
+ this.remove(ident, (T)removeValue);
+ }
+ }
+ }
+
+ abstract protected void remove(InstanceIdentifier<T> ident, T removeValue);
+
+ protected void registrationListener(final DataBroker db, int i) {
+ try {
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ getWildCardPath(), this, DataChangeScope.BASE);
+ } catch (final Exception e) {
+ if (i >= 1) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e1) {
+ LOG.error("Thread interrupted '{}'", e1);
+ Thread.currentThread().interrupt();
+ }
+ registrationListener(db, --i);
+ } else {
+ LOG.error("AbstractDataChangeListener registration fail!", e);
+ throw new IllegalStateException("AbstractDataChangeListener registration Listener fail! System needs restart.", e);
+ }
+ }
+ }
+
+ protected abstract InstanceIdentifier<?> getWildCardPath();
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Error by stop AbstractDataChangeListener.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+}
\ No newline at end of file
*/
package org.opendaylight.controller.sal.compatibility;
-import java.util.Dictionary;
-import java.util.Hashtable;
-
+import com.google.common.base.Preconditions;
import org.apache.felix.dm.Component;
import org.opendaylight.controller.clustering.services.IClusterGlobalServices;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
import org.opendaylight.controller.sal.utils.INodeFactory;
import org.osgi.framework.BundleContext;
-import com.google.common.base.Preconditions;
+import java.util.Dictionary;
+import java.util.Hashtable;
public class ComponentActivator extends ComponentActivatorAbstractBase {
private final INodeConnectorFactory nodeConnectorFactory = new MDSalNodeConnectorFactory();
@Override
public void start(final BundleContext context) {
- super.start(context);
this.context = Preconditions.checkNotNull(context);
+ super.start(context);
}
public ProviderContext setBroker(final BindingAwareBroker broker) {
@Override
protected Object[] getGlobalImplementations() {
return new Object[] {
+ this, // Used for setBroker callback
flow,
inventory,
dataPacket,
nodeFactory,
nodeConnectorFactory,
topology,
- tpProvider,
- this // Used for setBroker callback
+ tpProvider
};
}
protected Object[] getImplementations() {
return new Object[] {
dataPacketService,
+ inventory,
};
}
_instanceConfigure((ComponentActivator)imp, c, containerName);
} else if (imp instanceof DataPacketServiceAdapter) {
_instanceConfigure((DataPacketServiceAdapter)imp, c, containerName);
+ } else if (imp instanceof InventoryAndReadAdapter) {
+ _instanceConfigure((InventoryAndReadAdapter)imp, c, containerName);
} else {
throw new IllegalArgumentException(String.format("Unhandled implementation class %s", imp.getClass()));
}
.setService(IDiscoveryService.class)
.setCallbacks("setDiscoveryPublisher", "setDiscoveryPublisher")
.setRequired(false));
+ it.add(createServiceDependency()
+ .setService(BindingAwareBroker.class)
+ .setRequired(true));
+ }
+
+ private void _instanceConfigure(final InventoryAndReadAdapter imp, final Component it, String containerName) {
+ it.setInterface(new String[] {
+ IPluginInInventoryService.class.getName(),
+ IPluginInReadService.class.getName(),
+ }, properties());
+
+ it.add(createServiceDependency()
+ .setService(IPluginOutReadService.class)
+ .setCallbacks("setReadPublisher", "unsetReadPublisher")
+ .setRequired(false));
+ it.add(createServiceDependency()
+ .setService(IPluginOutInventoryService.class)
+ .setCallbacks("setInventoryPublisher", "unsetInventoryPublisher")
+ .setRequired(false));
+ it.add(createServiceDependency()
+ .setService(BindingAwareBroker.class)
+ .setRequired(true));
}
private void _configure(final TopologyAdapter imp, final Component it) {
*/
package org.opendaylight.controller.sal.compatibility;
+import com.google.common.base.Optional;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Iterables;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-
import org.opendaylight.controller.md.sal.binding.util.TypeSafeDataReader;
import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutput;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInputBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatistics;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.FlowTopologyDiscoveryService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.Link;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.statistics.types.rev130925.GenericStatistics;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.common.RpcResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInInventoryService, OpendaylightInventoryListener, OpendaylightFlowStatisticsListener, OpendaylightFlowTableStatisticsListener, OpendaylightPortStatisticsListener {
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInInventoryService, OpendaylightFlowStatisticsListener, OpendaylightFlowTableStatisticsListener, OpendaylightPortStatisticsListener {
private static final Logger LOG = LoggerFactory.getLogger(InventoryAndReadAdapter.class);
private static final short OPENFLOWV10_TABLE_ID = 0;
+ private static final int SLEEP_FOR_NOTIFICATIONS_MILLIS = 500;
private final InventoryNotificationProvider inventoryNotificationProvider = new InventoryNotificationProvider();
private final Map<PathArgument,List<PathArgument>> nodeToNodeConnectorsMap = new ConcurrentHashMap<>();
private List<IPluginOutInventoryService> inventoryPublisher = new CopyOnWriteArrayList<>();
private List<IPluginOutReadService> statisticsPublisher = new CopyOnWriteArrayList<>();
+ private Cache<String, TransactionNotificationList<? extends TransactionAware>> txCache;
private OpendaylightFlowTableStatisticsService flowTableStatisticsService;
private OpendaylightPortStatisticsService nodeConnectorStatisticsService;
public void startAdapter() {
inventoryNotificationProvider.setDataProviderService(getDataProviderService());
inventoryNotificationProvider.setInventoryPublisher(getInventoryPublisher());
+ txCache = CacheBuilder.newBuilder().expireAfterWrite(60L, TimeUnit.SECONDS).maximumSize(10000).build();
// inventoryNotificationProvider.start();
}
* @param id Table id
* @return Table contents, or null if not present
*/
- private Table readConfigTable(final Node node, final short id) {
+ private Table readOperationalTable(final Node node, final short id) {
final InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class)
- .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, InventoryMapping.toNodeKey(node))
+ .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, NodeMapping.toNodeKey(node))
.augmentation(FlowCapableNode.class)
.child(Table.class, new TableKey(id))
.build();
- return (Table) startChange().readConfigurationData(tableRef);
+ return (Table) startChange().readOperationalData(tableRef);
}
@Override
public List<FlowOnNode> readAllFlow(final Node node, final boolean cached) {
- final ArrayList<FlowOnNode> output = new ArrayList<>();
- final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
- if (table != null) {
- final List<Flow> flows = table.getFlow();
- LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
+ final ArrayList<FlowOnNode> ret= new ArrayList<>();
+ if (cached) {
+ final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
+ if (table != null) {
+ final List<Flow> flows = table.getFlow();
+ LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
+
+ for (final Flow flow : flows) {
+ final FlowStatisticsData statsFromDataStore = flow.getAugmentation(FlowStatisticsData.class);
+ if (statsFromDataStore != null) {
+ final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flow, node));
+ ret.add(addFlowStats(it, statsFromDataStore.getFlowStatistics()));
+ }
+ }
+ }
+ } else {
+ LOG.debug("readAllFlow cached:{}", cached);
+ GetAllFlowStatisticsFromFlowTableInput input =
+ new GetAllFlowStatisticsFromFlowTableInputBuilder()
+ .setNode(NodeMapping.toNodeRef(node))
+ .setTableId(new TableId(OPENFLOWV10_TABLE_ID))
+ .build();
+
+ Future<RpcResult<GetAllFlowStatisticsFromFlowTableOutput>> future =
+ getFlowStatisticsService().getAllFlowStatisticsFromFlowTable(input);
+
+ RpcResult<GetAllFlowStatisticsFromFlowTableOutput> result = null;
+ try {
+ // having a blocking call is fine here, as we need to join
+ // the notifications and return the result
+ result = future.get();
+ } catch (Exception e) {
+ LOG.error("Exception in getAllFlowStatisticsFromFlowTable ", e);
+ return ret;
+ }
+
+ GetAllFlowStatisticsFromFlowTableOutput output = result.getResult();
+ if (output == null) {
+ return ret;
+ }
+
+ TransactionId transactionId = output.getTransactionId();
+ String cacheKey = buildCacheKey(transactionId, NodeMapping.toNodeId(node));
+ LOG.info("readAllFlow transactionId:{} cacheKey:{}", transactionId, cacheKey);
- for (final Flow flow : flows) {
- final FlowStatisticsData statsFromDataStore = flow.getAugmentation(FlowStatisticsData.class);
- if (statsFromDataStore != null) {
- final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flow, node));
- output.add(addFlowStats(it, statsFromDataStore.getFlowStatistics()));
+ // insert an entry in tempcache, will get updated when notification is received
+ txCache.put(cacheKey, new TransactionNotificationList<FlowsStatisticsUpdate>(
+ transactionId, node.getNodeIDString()));
+
+ TransactionNotificationList<FlowsStatisticsUpdate> txnList =
+ (TransactionNotificationList<FlowsStatisticsUpdate>) txCache.getIfPresent(cacheKey);
+
+ // this loop would not be infinite as the cache will remove an entry
+ // after defined time if not written to
+ while (txnList != null && !txnList.areAllNotificationsGathered()) {
+ LOG.debug("readAllFlow waiting for notification...");
+ waitForNotification();
+ txnList = (TransactionNotificationList<FlowsStatisticsUpdate>) txCache.getIfPresent(cacheKey);
+ }
+
+ if (txnList == null) {
+ return ret;
+ }
+
+ List<FlowsStatisticsUpdate> notifications = txnList.getNotifications();
+ for (FlowsStatisticsUpdate flowsStatisticsUpdate : notifications) {
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapList = flowsStatisticsUpdate.getFlowAndStatisticsMapList();
+ if (flowAndStatisticsMapList != null) {
+ for (FlowAndStatisticsMapList flowAndStatistics : flowAndStatisticsMapList) {
+ final FlowOnNode it = new FlowOnNode(ToSalConversionsUtils.toFlow(flowAndStatistics, node));
+ ret.add(addFlowStats(it, flowAndStatistics));
+ }
}
}
}
+ return ret;
+ }
- // TODO (main): Shall we send request to the switch? It will make async request to the switch.
- // Once the plugin receives a response, it will let the adaptor know through onFlowStatisticsUpdate()
- // If we assume that md-sal statistics manager will always be running, then it is not required
- // But if not, then sending request will collect the latest data for adaptor at least.
- getFlowStatisticsService().getAllFlowsStatisticsFromAllFlowTables(
- new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder().setNode(NodeMapping.toNodeRef(node)).build());
- return output;
+ private String buildCacheKey(final TransactionId id, final NodeId nodeId) {
+ return String.valueOf(id.getValue()) + "-" + nodeId.getValue();
+ }
+
+ private void waitForNotification() {
+ try {
+ // going for a simple sleep approach,as wait-notify on a monitor would require
+ // us to maintain monitors per txn-node combo
+ Thread.sleep(SLEEP_FOR_NOTIFICATIONS_MILLIS);
+ LOG.trace("statCollector is waking up from a wait stat Response sleep");
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted waiting stat Response sleep", e);
+ }
}
@Override
@Override
public FlowOnNode readFlow(final Node node, final org.opendaylight.controller.sal.flowprogrammer.Flow targetFlow, final boolean cached) {
FlowOnNode ret = null;
- final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
+ final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
if (table != null) {
final List<Flow> flows = table.getFlow();
InventoryAndReadAdapter.LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
@Override
public NodeTableStatistics readNodeTable(final NodeTable nodeTable, final boolean cached) {
NodeTableStatistics nodeStats = null;
- final Table table = readConfigTable(nodeTable.getNode(), (short) nodeTable.getID());
+ final Table table = readOperationalTable(nodeTable.getNode(), (short) nodeTable.getID());
if (table != null) {
final FlowTableStatisticsData tableStats = table.getAugmentation(FlowTableStatisticsData.class);
if (tableStats != null) {
return nodeStats;
}
- @Override
- public void onNodeConnectorRemoved(final NodeConnectorRemoved update) {
+ public void onNodeConnectorRemovedInternal(final NodeConnectorRemoved update) {
// Never received
}
- @Override
- public void onNodeRemoved(final NodeRemoved notification) {
+ public void onNodeRemovedInternal(final NodeRemoved notification) {
this.removeNodeConnectors(notification.getNodeRef().getValue());
try {
final Node aDNode = NodeMapping.toADNode(notification.getNodeRef());
}
}
- @Override
- public void onNodeConnectorUpdated(final NodeConnectorUpdated update) {
+ public void onNodeConnectorUpdatedInternal(final NodeConnectorUpdated update) {
final NodeConnectorRef ref = update.getNodeConnectorRef();
final UpdateType updateType;
if (!this.isKnownNodeConnector(ref.getValue())) {
}
}
- @Override
- public void onNodeUpdated(final NodeUpdated notification) {
+ public void onNodeUpdatedInternal(final NodeUpdated notification) {
final NodeRef ref = notification.getNodeRef();
final UpdateType updateType;
for (final IPluginOutReadService statsPublisher : getStatisticsPublisher()) {
statsPublisher.nodeFlowStatisticsUpdated(aDNode, adsalFlowsStatistics);
}
+
+ updateTransactionCache(notification, notification.getId(), !notification.isMoreReplies());
}
/**
private List<PathArgument> removeNodeConnectors(final InstanceIdentifier<? extends Object> nodeIdentifier) {
return this.nodeToNodeConnectorsMap.remove(Iterables.get(nodeIdentifier.getPathArguments(), 1));
}
+
+ private <T extends TransactionAware> void updateTransactionCache(T notification, NodeId nodeId, boolean lastNotification) {
+
+ String cacheKey = buildCacheKey(notification.getTransactionId(), nodeId);
+ TransactionNotificationList<T> txnList = (TransactionNotificationList<T>) txCache.getIfPresent(cacheKey);
+ final Optional<TransactionNotificationList<T>> optional = Optional.<TransactionNotificationList<T>>fromNullable(txnList);
+ if (optional.isPresent()) {
+ LOG.info("updateTransactionCache cacheKey:{}, lastNotification:{}, txnList-present:{}", cacheKey, lastNotification, optional.isPresent());
+ TransactionNotificationList<T> txn = optional.get();
+ txn.addNotification(notification);
+ txn.setAllNotificationsGathered(lastNotification);
+ }
+ }
+
+ private class TransactionNotificationList<T extends TransactionAware> {
+ private TransactionId id;
+ private String nId;
+ private List<T> notifications;
+ private boolean allNotificationsGathered;
+
+ public TransactionNotificationList(TransactionId id, String nId) {
+ this.nId = nId;
+ this.id = id;
+ notifications = new ArrayList<T>();
+ }
+
+ public void addNotification(T notification) {
+ notifications.add(notification);
+ }
+
+ public void setAllNotificationsGathered(boolean allNotificationsGathered) {
+ this.allNotificationsGathered = allNotificationsGathered;
+ }
+
+ public boolean areAllNotificationsGathered() {
+ return allNotificationsGathered;
+ }
+
+ public List<T> getNotifications() {
+ return notifications;
+ }
+
+ }
+
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NCDataChangeListener extends AbstractDataChangeListener<NodeConnector> {
+ private static final Logger LOG = LoggerFactory.getLogger(NodeDataChangeListener.class);
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+ public NCDataChangeListener (final InventoryAndReadAdapter adapter, final DataBroker db) {
+ super(adapter,db,NodeConnector.class);
+ }
+
+ @Override
+ protected void add(InstanceIdentifier<NodeConnector> createKeyIdent, NodeConnector node) {
+ FlowCapableNodeConnector fcnc = node.getAugmentation(FlowCapableNodeConnector.class);
+ if(fcnc != null) {
+ FlowCapableNodeConnectorUpdatedBuilder fcncub = new FlowCapableNodeConnectorUpdatedBuilder(fcnc);
+ NodeConnectorUpdatedBuilder builder = new NodeConnectorUpdatedBuilder();
+ builder.setId(node.getId());
+ builder.setNodeConnectorRef(new NodeConnectorRef(createKeyIdent));
+ builder.addAugmentation(FlowCapableNodeConnectorUpdated.class, fcncub.build());
+ adapter.onNodeConnectorUpdatedInternal(builder.build());
+ }
+ }
+
+ @Override
+ protected void update(InstanceIdentifier<NodeConnector> updateKeyIdent, NodeConnector original,
+ NodeConnector update) {
+ add(updateKeyIdent,update);
+ }
+
+ @Override
+ protected void remove(InstanceIdentifier<NodeConnector> ident, NodeConnector removeValue) {
+ NodeConnectorRemovedBuilder builder = new NodeConnectorRemovedBuilder();
+ builder.setNodeConnectorRef(new NodeConnectorRef(ident));
+ adapter.onNodeConnectorRemovedInternal(builder.build());
+ }
+
+ protected InstanceIdentifier<NodeConnector> getWildCardPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).child(NodeConnector.class);
+ }
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class NodeDataChangeListener extends AbstractDataChangeListener<Node> {
+ private static final Logger LOG = LoggerFactory.getLogger(NodeDataChangeListener.class);
+
+
+ public NodeDataChangeListener (final InventoryAndReadAdapter adapter, final DataBroker db) {
+ super(adapter,db,Node.class);
+ }
+
+ protected void add(InstanceIdentifier<Node> createKeyIdent, Node node) {
+ FlowCapableNode fcn = node.getAugmentation(FlowCapableNode.class);
+ if(fcn != null) {
+ FlowCapableNodeUpdatedBuilder fcbnu = new FlowCapableNodeUpdatedBuilder(fcn);
+ NodeUpdatedBuilder builder = new NodeUpdatedBuilder();
+ builder.setId(node.getId());
+ builder.setNodeRef(new NodeRef(createKeyIdent));
+ builder.setNodeConnector(node.getNodeConnector());
+ builder.addAugmentation(FlowCapableNodeUpdated.class, fcbnu.build());
+ adapter.onNodeUpdatedInternal(builder.build());
+ }
+ }
+
+ protected void update(InstanceIdentifier<Node> updateKeyIdent, Node original,
+ Node update) {
+ this.add(updateKeyIdent, update);
+ }
+
+ protected void remove(InstanceIdentifier<Node> ident, Node removeValue) {
+ NodeRemovedBuilder builder = new NodeRemovedBuilder();
+ builder.setNodeRef(new NodeRef(ident));
+ adapter.onNodeRemovedInternal(builder.build());
+ }
+
+ protected InstanceIdentifier<Node> getWildCardPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class);
+ }
+
+}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
-import java.math.BigInteger;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.List;
-import java.util.regex.Pattern;
import org.opendaylight.controller.sal.common.util.Arguments;
import org.opendaylight.controller.sal.core.AdvertisedBandwidth;
import org.opendaylight.controller.sal.core.Bandwidth;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.math.BigInteger;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.List;
+import java.util.regex.Pattern;
+
public final class NodeMapping {
private static final Logger LOG = LoggerFactory
* @param aDNode
* @return
*/
- private static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
- return new NodeId(aDNode.getType() + ":" + String.valueOf(aDNode.getID()));
+ public static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
+ String targetPrefix = null;
+ if (NodeIDType.OPENFLOW.equals(aDNode.getType())) {
+ targetPrefix = OPENFLOW_ID_PREFIX;
+ } else {
+ targetPrefix = aDNode.getType() + ":";
+ }
+
+ return new NodeId(targetPrefix + String.valueOf(aDNode.getID()));
+ }
+
+ /**
+ * @param aDNode
+ * @return md-sal {@link NodeKey}
+ */
+ public static NodeKey toNodeKey(org.opendaylight.controller.sal.core.Node aDNode) {
+ return new NodeKey(toNodeId(aDNode));
}
public static String toNodeConnectorType(final NodeConnectorId ncId, final NodeId nodeId) {
import java.util.Collection;
import java.util.Collections;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
inv.setNodeConnectorStatisticsService(session.getRpcService(OpendaylightPortStatisticsService.class));
inv.setTopologyDiscovery(session.getRpcService(FlowTopologyDiscoveryService.class));
inv.setDataProviderService(session.getSALService(DataProviderService.class));
+
+ final NodeDataChangeListener ndcl = new NodeDataChangeListener(inv,session.getSALService(DataBroker.class));
+ final NCDataChangeListener ncdcl = new NCDataChangeListener(inv,session.getSALService(DataBroker.class));
+
// FIXME: remember registration for clean shutdown
subscribe.registerNotificationListener(inv);
*/
package org.opendaylight.controller.sal.compatibility.topology;
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CopyOnWriteArrayList;
-
+import com.google.common.base.Function;
+import com.google.common.collect.FluentIterable;
import org.opendaylight.controller.md.sal.binding.util.TypeSafeDataReader;
import org.opendaylight.controller.sal.compatibility.NodeMapping;
import org.opendaylight.controller.sal.core.ConstructionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Function;
-import com.google.common.collect.FluentIterable;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.regex.Pattern;
+
+import static com.google.common.base.Preconditions.checkNotNull;
public final class TopologyMapping {
private static final Logger LOG = LoggerFactory.getLogger(TopologyMapping.class);
+ private final static Pattern NUMBERS_ONLY = Pattern.compile("[0-9]+");
private TopologyMapping() {
throw new UnsupportedOperationException("Utility class. Instantiation is not allowed.");
public static NodeConnector toADNodeConnector(final TpId source, final NodeId nodeId) throws ConstructionException {
checkNotNull(source);
- return new NodeConnector(NodeConnectorIDType.OPENFLOW, Short.valueOf(toADNodeConnectorId(source)), toADNode(nodeId));
+ String nodeConnectorIdStripped = toADNodeConnectorId(source);
+ if (NUMBERS_ONLY.matcher(nodeConnectorIdStripped).matches()) {
+ return new NodeConnector(NodeConnectorIDType.OPENFLOW, Short.valueOf(nodeConnectorIdStripped), toADNode(nodeId));
+ }
+ LOG.debug("NodeConnectorId does not match openflow id type, using " + NodeMapping.MD_SAL_TYPE + "instead");
+ NodeConnectorIDType.registerIDType(NodeMapping.MD_SAL_TYPE, String.class, NodeMapping.MD_SAL_TYPE);
+ return new NodeConnector(NodeMapping.MD_SAL_TYPE, nodeConnectorIdStripped, toADNode(nodeId));
}
public static String toADNodeConnectorId(final TpId nodeConnectorId) {
public static Node toADNode(final NodeId nodeId) throws ConstructionException {
checkNotNull(nodeId);
- return new Node(NodeIDType.OPENFLOW, Long.valueOf(toADNodeId(nodeId)));
+ String nodeIdStripped = toADNodeId(nodeId);
+ if (NUMBERS_ONLY.matcher(nodeIdStripped).matches()) {
+ return new Node(NodeIDType.OPENFLOW, Long.valueOf(nodeIdStripped));
+ }
+ LOG.debug("NodeId does not match openflow id type, using " + NodeMapping.MD_SAL_TYPE + "instead");
+ NodeIDType.registerIDType(NodeMapping.MD_SAL_TYPE, String.class);
+ return new Node(NodeMapping.MD_SAL_TYPE, nodeId.getValue());
}
}
Assert.assertEquals(0xCC4E241C4A000000L, NodeMapping.openflowFullNodeIdToLong("14721743935839928320").longValue());
}
+ /**
+ * Test method for
+ * {@link org.opendaylight.controller.sal.compatibility.NodeMapping#toNodeKey(org.opendaylight.controller.sal.core.Node)}
+ * .
+ * @throws ConstructionException
+ */
+ @Test
+ public void testToNodeKey() throws ConstructionException {
+ org.opendaylight.controller.sal.core.Node aDNode = new org.opendaylight.controller.sal.core.Node(NodeIDType.OPENFLOW, 42L);
+ NodeKey nodeKey = NodeMapping.toNodeKey(aDNode);
+ Assert.assertEquals("openflow:42", nodeKey.getId().getValue());
+ }
+
/**
* @param nodeId
* @param portId
Assert.assertEquals("OF|00:00:00:00:00:00:00:01", observedNode.toString());
}
+ /**
+ * Test method for {@link org.opendaylight.controller.sal.compatibility.topology.TopologyMapping#toADNodeConnector(org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId, org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId)}.
+ * @throws ConstructionException
+ */
+ @Test
+ public void bug1309ToADNodeConnector() throws ConstructionException {
+ NodeId nodeId = new NodeId("some_unknown_node");
+ TpId source = new TpId("192.168.0.1");
+ NodeConnector observedNodeConnector = TopologyMapping.toADNodeConnector(source, nodeId);
+
+ Assert.assertEquals("MD_SAL_DEPRECATED|192.168.0.1@MD_SAL_DEPRECATED|some_unknown_node", observedNodeConnector.toString());
+ }
+
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>forwardingrules-manager</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>inventory-manager</artifactId>
*/
package org.opendaylight.controller.md.inventory.manager;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
+import java.util.ArrayList;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
+
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
class FlowCapableInventoryProvider implements AutoCloseable, Runnable, TransactionChainListener {
private static final Logger LOG = LoggerFactory.getLogger(FlowCapableInventoryProvider.class);
private static final int QUEUE_DEPTH = 500;
final NodeChangeCommiter changeCommiter = new NodeChangeCommiter(FlowCapableInventoryProvider.this);
this.listenerRegistration = this.notificationService.registerNotificationListener(changeCommiter);
- this.txChain = dataBroker.createTransactionChain(this);
+ this.txChain = (dataBroker.createTransactionChain(this));
thread = new Thread(this);
thread.setDaemon(true);
thread.setName("FlowCapableInventoryProvider");
void enqueue(final InventoryOperation op) {
try {
queue.put(op);
- } catch (InterruptedException e) {
+ } catch (final InterruptedException e) {
LOG.warn("Failed to enqueue operation {}", op, e);
}
}
- @Override
- public void close() throws InterruptedException {
- LOG.info("Flow Capable Inventory Provider stopped.");
- if (this.listenerRegistration != null) {
- try {
- this.listenerRegistration.close();
- } catch (Exception e) {
- LOG.error("Failed to stop inventory provider", e);
- }
- listenerRegistration = null;
- }
-
- if (thread != null) {
- thread.interrupt();
- thread.join();
- thread = null;
- }
- if (txChain != null) {
- txChain.close();
- txChain = null;
- }
-
-
- }
-
@Override
public void run() {
try {
for (; ; ) {
InventoryOperation op = queue.take();
-
- final ReadWriteTransaction tx = txChain.newReadWriteTransaction();
- LOG.debug("New operations available, starting transaction {}", tx.getIdentifier());
-
int ops = 0;
+ final ArrayList<InventoryOperation> opsToApply = new ArrayList<>(MAX_BATCH);
do {
- op.applyOperation(tx);
-
+ opsToApply.add(op);
ops++;
if (ops < MAX_BATCH) {
op = queue.poll();
op = null;
}
} while (op != null);
-
- LOG.debug("Processed {} operations, submitting transaction {}", ops, tx.getIdentifier());
-
- final CheckedFuture<Void, TransactionCommitFailedException> result = tx.submit();
- Futures.addCallback(result, new FutureCallback<Void>() {
- @Override
- public void onSuccess(final Void aVoid) {
- //NOOP
- }
-
- @Override
- public void onFailure(final Throwable throwable) {
- LOG.error("Transaction {} failed.", tx.getIdentifier(), throwable);
- }
- });
+ submitOperations(opsToApply);
}
- } catch (InterruptedException e) {
+ } catch (final InterruptedException e) {
LOG.info("Processing interrupted, terminating", e);
}
}
}
+ /**
+ * Starts new empty transaction, custimizes it with submitted operations
+ * and submit it to data broker.
+ *
+ * If transaction chain failed during customization of transaction
+ * it allocates new chain and empty transaction and customizes it
+ * with submitted operations.
+ *
+ * This does not retry failed transaction. It only retries it when
+ * chain failed during customization of transaction chain.
+ *
+ * @param opsToApply
+ */
+ private void submitOperations(final ArrayList<InventoryOperation> opsToApply) {
+ final ReadWriteTransaction tx = createCustomizedTransaction(opsToApply);
+ LOG.debug("Processed {} operations, submitting transaction {}", opsToApply.size(), tx.getIdentifier());
+ try {
+ tx.submit();
+ } catch (final IllegalStateException e) {
+ /*
+ * Transaction chain failed during doing batch, so we need to null
+ * tx chain and continue processing queue.
+ *
+ * We fail current txChain which was allocated with createTransaction.
+ */
+ failCurrentChain(txChain);
+ /*
+ * We will retry transaction once in order to not loose any data.
+ *
+ */
+ final ReadWriteTransaction retryTx = createCustomizedTransaction(opsToApply);
+ retryTx.submit();
+ }
+ }
+
+ /**
+ * Creates new empty ReadWriteTransaction. If transaction chain
+ * was failed, it will allocate new transaction chain
+ * and assign it with this Operation Executor.
+ *
+ * This call is synchronized to prevent reace with {@link #failCurrentChain(TransactionChain)}.
+ *
+ * @return New Empty ReadWrite transaction, which continues this chain or starts new transaction
+ * chain.
+ */
+ private synchronized ReadWriteTransaction newEmptyTransaction() {
+ try {
+ if(txChain == null) {
+ // Chain was broken so we need to replace it.
+ txChain = dataBroker.createTransactionChain(this);
+ }
+ return txChain.newReadWriteTransaction();
+ } catch (final IllegalStateException e) {
+ LOG.debug("Chain is broken, need to allocate new transaction chain.",e);
+ /*
+ * Chain was broken by previous transaction,
+ * but there was race between this.
+ * Chain will be closed by #onTransactionChainFailed method.
+ */
+ txChain = dataBroker.createTransactionChain(this);
+ return txChain.newReadWriteTransaction();
+ }
+ }
+
+ /**
+ * Creates customized not-submitted transaction, which is ready to be submitted.
+ *
+ * @param opsToApply Operations which are used to customize transaction.
+ * @return Non-empty transaction.
+ */
+ private ReadWriteTransaction createCustomizedTransaction(final ArrayList<InventoryOperation> opsToApply) {
+ final ReadWriteTransaction tx = newEmptyTransaction();
+ for(final InventoryOperation op : opsToApply) {
+ op.applyOperation(tx);
+ }
+ return tx;
+ }
+
+ private synchronized void failCurrentChain(final TransactionChain<?, ?> chain) {
+ if(txChain == chain) {
+ txChain = null;
+ }
+ }
+
@Override
public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
final Throwable cause) {
LOG.error("Failed to export Flow Capable Inventory, Transaction {} failed.", transaction.getIdentifier(), cause);
-
+ chain.close();
+ if(txChain == chain) {
+ // Current chain is broken, so we will null it, in order to not use it.
+ failCurrentChain(chain);
+ }
}
@Override
public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
// NOOP
}
+
+ @Override
+ public void close() throws InterruptedException {
+ LOG.info("Flow Capable Inventory Provider stopped.");
+ if (this.listenerRegistration != null) {
+ try {
+ this.listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Failed to stop inventory provider", e);
+ }
+ listenerRegistration = null;
+ }
+
+ if (thread != null) {
+ thread.interrupt();
+ thread.join();
+ thread = null;
+ }
+ if (txChain != null) {
+ try {
+ txChain.close();
+ } catch (final IllegalStateException e) {
+ // It is possible chain failed and was closed by #onTransactionChainFailed
+ LOG.debug("Chain was already closed.");
+ }
+ txChain = null;
+ }
+ }
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>md-sal-config</artifactId>
<description>Configuration files for md-sal</description>
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>model-flow-base</artifactId>
<packaging>bundle</packaging>
type uint32;
}
- leaf perc_level {
+ leaf prec_level {
description "Number of drop precedence level to add";
type uint8;
}
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>model-flow-service</artifactId>
<packaging>bundle</packaging>
}
augment "/inv:nodes/inv:node/table" {
- ext:augment-identifier "flow-cookie-mapping";
- list flow-cookie-map {
- key "cookie";
- leaf cookie {
- type flow:flow-cookie;
+ ext:augment-identifier "flow-hash-id-mapping";
+ description "Flow is identified by match and priority on device. So Operational/DS
+ has to simulate that behavior and contract between FlowId and match+priority
+ identification should represent Flow hashCode. Flow has to contain only
+ match priority and flowCookie for create a hashCode";
+ list flow-hash-id-map {
+ key "hash";
+ leaf hash {
+ type string;
}
- leaf-list flow-ids {
+ leaf flow-id {
type flow-id;
}
}
--- /dev/null
+module node-config {
+ namespace "urn:opendaylight:module:config";
+ prefix node-config;
+
+ import flow-capable-transaction {prefix tr;}
+ import opendaylight-inventory {prefix inv;revision-date "2013-08-19";}
+
+ revision "2014-10-15" {
+ description "Initial revision of node configuration service";
+ }
+
+ grouping node-ref {
+ uses "inv:node-context-ref";
+ }
+
+
+
+ /** Base configuration structure **/
+ grouping node-config {
+ leaf flag {
+ type string;
+ description "Switch config flag. Expected values FRAGNORMAL, OFPCFRAGDROP, OFPCFRAGREASM, OFPCFRAGMASK";
+ }
+ leaf miss-search-length{
+ type uint16;
+ }
+ }
+
+ rpc set-config {
+ input {
+ uses node-config;
+ uses tr:transaction-aware;
+ uses node-ref;
+ }
+ output {
+ uses tr:transaction-aware;
+ }
+ }
+}
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>model-flow-statistics</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>model-inventory</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>model-topology</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.model</groupId>
<artifactId>model-parent</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<modules>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-akka-raft</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
import akka.japi.Creator;
import com.google.common.base.Optional;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.example.messages.KeyValue;
import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
import org.opendaylight.controller.cluster.example.messages.PrintRole;
public class ExampleActor extends RaftActor {
private final Map<String, String> state = new HashMap();
+ private final DataPersistenceProvider dataPersistenceProvider;
private long persistIdentifier = 1;
public ExampleActor(String id, Map<String, String> peerAddresses,
Optional<ConfigParams> configParams) {
super(id, peerAddresses, configParams);
+ this.dataPersistenceProvider = new PersistentDataProvider();
}
public static Props props(final String id, final Map<String, String> peerAddresses,
});
}
- @Override public void onReceiveCommand(Object message){
+ @Override public void onReceiveCommand(Object message) throws Exception{
if(message instanceof KeyValue){
if(isLeader()) {
String persistId = Long.toString(persistIdentifier++);
try {
bs = fromObject(state);
} catch (Exception e) {
- LOG.error("Exception in creating snapshot", e);
+ LOG.error(e, "Exception in creating snapshot");
}
getSelf().tell(new CaptureSnapshotReply(bs), null);
}
try {
state.putAll((HashMap) toObject(snapshot));
} catch (Exception e) {
- LOG.error("Exception in applying snapshot", e);
+ LOG.error(e, "Exception in applying snapshot");
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+ LOG.debug("Snapshot applied to state : {}", ((HashMap) state).size());
}
}
}
- @Override public void onReceiveRecover(Object message) {
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return dataPersistenceProvider;
+ }
+
+ @Override public void onReceiveRecover(Object message)throws Exception {
super.onReceiveRecover(message);
}
@Override public String persistenceId() {
return getId();
}
+
+ @Override
+ protected void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
+ }
+
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ }
+
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ }
}
*/
public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
- protected List<ReplicatedLogEntry> journal;
+ // We define this as ArrayList so we can use ensureCapacity.
+ protected ArrayList<ReplicatedLogEntry> journal;
protected ByteString snapshot;
protected long snapshotIndex = -1;
protected long snapshotTerm = -1;
// to be used for rollback during save snapshot failure
- protected List<ReplicatedLogEntry> snapshottedJournal;
+ protected ArrayList<ReplicatedLogEntry> snapshottedJournal;
protected ByteString previousSnapshot;
protected long previousSnapshotIndex = -1;
protected long previousSnapshotTerm = -1;
journal.add(replicatedLogEntry);
}
+ @Override
+ public void increaseJournalLogCapacity(int amount) {
+ journal.ensureCapacity(journal.size() + amount);
+ }
+
@Override
public List<ReplicatedLogEntry> getFrom(long logEntryIndex) {
return getFrom(logEntryIndex, journal.size());
@Override
public void snapshotCommit() {
- snapshottedJournal.clear();
snapshottedJournal = null;
previousSnapshotIndex = -1;
previousSnapshotTerm = -1;
@Override
public void snapshotRollback() {
snapshottedJournal.addAll(journal);
- journal.clear();
journal = snapshottedJournal;
snapshottedJournal = null;
*
* @return long
*/
- public long getSnapshotBatchCount();
+ long getSnapshotBatchCount();
/**
* The interval at which a heart beat message will be sent to the remote
*
* @return FiniteDuration
*/
- public FiniteDuration getHeartBeatInterval();
+ FiniteDuration getHeartBeatInterval();
/**
* The interval in which a new election would get triggered if no leader is found
*
* @return FiniteDuration
*/
- public FiniteDuration getElectionTimeOutInterval();
+ FiniteDuration getElectionTimeOutInterval();
/**
* The maximum election time variance. The election is scheduled using both
*
* @return int
*/
- public int getElectionTimeVariance();
+ int getElectionTimeVariance();
/**
* The size (in bytes) of the snapshot chunk sent from Leader
*/
- public int getSnapshotChunkSize();
+ int getSnapshotChunkSize();
+
+ /**
+ * The number of journal log entries to batch on recovery before applying.
+ */
+ int getJournalRecoveryLogBatchSize();
}
private static final int SNAPSHOT_BATCH_COUNT = 20000;
+ private static final int JOURNAL_RECOVERY_LOG_BATCH_SIZE = 1000;
+
/**
* The maximum election time variance
*/
private static final int ELECTION_TIME_MAX_VARIANCE = 100;
- private final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+ private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
/**
new FiniteDuration(100, TimeUnit.MILLISECONDS);
+ private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
+ private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
+ private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
+
+ public void setHeartBeatInterval(FiniteDuration heartBeatInterval) {
+ this.heartBeatInterval = heartBeatInterval;
+ }
+
+ public void setSnapshotBatchCount(long snapshotBatchCount) {
+ this.snapshotBatchCount = snapshotBatchCount;
+ }
+
+ public void setJournalRecoveryLogBatchSize(int journalRecoveryLogBatchSize) {
+ this.journalRecoveryLogBatchSize = journalRecoveryLogBatchSize;
+ }
+
@Override
public long getSnapshotBatchCount() {
- return SNAPSHOT_BATCH_COUNT;
+ return snapshotBatchCount;
}
@Override
public FiniteDuration getHeartBeatInterval() {
- return HEART_BEAT_INTERVAL;
+ return heartBeatInterval;
}
-
@Override
public FiniteDuration getElectionTimeOutInterval() {
// returns 2 times the heart beat interval
public int getSnapshotChunkSize() {
return SNAPSHOT_CHUNK_SIZE;
}
+
+ @Override
+ public int getJournalRecoveryLogBatchSize() {
+ return journalRecoveryLogBatchSize;
+ }
}
import akka.persistence.SaveSnapshotSuccess;
import akka.persistence.SnapshotOffer;
import akka.persistence.SnapshotSelectionCriteria;
-import akka.persistence.UntypedPersistentActor;
import com.google.common.base.Optional;
+import com.google.common.base.Stopwatch;
import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
-import org.opendaylight.controller.cluster.raft.behaviors.Candidate;
import org.opendaylight.controller.cluster.raft.behaviors.Follower;
-import org.opendaylight.controller.cluster.raft.behaviors.Leader;
import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
* <li> when a snapshot should be saved </li>
* </ul>
*/
-public abstract class RaftActor extends UntypedPersistentActor {
+public abstract class RaftActor extends AbstractUntypedPersistentActor {
protected final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
* This context should NOT be passed directly to any other actor it is
* only to be consumed by the RaftActorBehaviors
*/
- protected RaftActorContext context;
+ private final RaftActorContext context;
/**
* The in-memory journal
private volatile boolean hasSnapshotCaptureInitiated = false;
+ private Stopwatch recoveryTimer;
+
+ private int currentRecoveryBatchCount;
+
public RaftActor(String id, Map<String, String> peerAddresses) {
this(id, peerAddresses, Optional.<ConfigParams>absent());
}
LOG);
}
- @Override public void onReceiveRecover(Object message) {
- if (message instanceof SnapshotOffer) {
- LOG.info("SnapshotOffer called..");
- SnapshotOffer offer = (SnapshotOffer) message;
- Snapshot snapshot = (Snapshot) offer.snapshot();
+ private void initRecoveryTimer() {
+ if(recoveryTimer == null) {
+ recoveryTimer = new Stopwatch();
+ recoveryTimer.start();
+ }
+ }
- // Create a replicated log with the snapshot information
- // The replicated log can be used later on to retrieve this snapshot
- // when we need to install it on a peer
- replicatedLog = new ReplicatedLogImpl(snapshot);
+ @Override
+ public void preStart() throws Exception {
+ LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
+ context.getConfigParams().getJournalRecoveryLogBatchSize());
- context.setReplicatedLog(replicatedLog);
- context.setLastApplied(snapshot.getLastAppliedIndex());
- context.setCommitIndex(snapshot.getLastAppliedIndex());
+ super.preStart();
+ }
- LOG.info("Applied snapshot to replicatedLog. " +
- "snapshotIndex={}, snapshotTerm={}, journal-size={}",
- replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
- replicatedLog.size()
- );
+ @Override
+ public void handleRecover(Object message) {
+ if(persistence().isRecoveryApplicable()) {
+ if (message instanceof SnapshotOffer) {
+ onRecoveredSnapshot((SnapshotOffer) message);
+ } else if (message instanceof ReplicatedLogEntry) {
+ onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
+ } else if (message instanceof ApplyLogEntries) {
+ onRecoveredApplyLogEntries((ApplyLogEntries) message);
+ } else if (message instanceof DeleteEntries) {
+ replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ } else if (message instanceof UpdateElectionTerm) {
+ context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+ ((UpdateElectionTerm) message).getVotedFor());
+ } else if (message instanceof RecoveryCompleted) {
+ onRecoveryCompletedMessage();
+ }
+ } else {
+ if (message instanceof RecoveryCompleted) {
+ // Delete all the messages from the akka journal so that we do not end up with consistency issues
+ // Note I am not using the dataPersistenceProvider and directly using the akka api here
+ deleteMessages(lastSequenceNr());
- // Apply the snapshot to the actors state
- applySnapshot(ByteString.copyFrom(snapshot.getState()));
+ // Delete all the akka snapshots as they will not be needed
+ deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), scala.Long.MaxValue()));
+
+ onRecoveryComplete();
+ currentBehavior = new Follower(context);
+ onStateChanged();
+ }
+ }
+ }
- } else if (message instanceof ReplicatedLogEntry) {
- ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
- LOG.info("Received ReplicatedLogEntry for recovery:{}", logEntry.getIndex());
- replicatedLog.append(logEntry);
+ private void onRecoveredSnapshot(SnapshotOffer offer) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("SnapshotOffer called..");
+ }
- } else if (message instanceof ApplyLogEntries) {
- ApplyLogEntries ale = (ApplyLogEntries) message;
+ initRecoveryTimer();
- LOG.info("Received ApplyLogEntries for recovery, applying to state:{} to {}",
- context.getLastApplied() + 1, ale.getToIndex());
+ Snapshot snapshot = (Snapshot) offer.snapshot();
- for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
- applyState(null, "recovery", replicatedLog.get(i).getData());
- }
- context.setLastApplied(ale.getToIndex());
- context.setCommitIndex(ale.getToIndex());
+ // Create a replicated log with the snapshot information
+ // The replicated log can be used later on to retrieve this snapshot
+ // when we need to install it on a peer
+ replicatedLog = new ReplicatedLogImpl(snapshot);
+
+ context.setReplicatedLog(replicatedLog);
+ context.setLastApplied(snapshot.getLastAppliedIndex());
+ context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+ Stopwatch timer = new Stopwatch();
+ timer.start();
+
+ // Apply the snapshot to the actors state
+ applyRecoverySnapshot(ByteString.copyFrom(snapshot.getState()));
+
+ timer.stop();
+ LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
+ replicatedLog.size(), persistenceId(), timer.toString(),
+ replicatedLog.snapshotIndex, replicatedLog.snapshotTerm);
+ }
- } else if (message instanceof DeleteEntries) {
- replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+ private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ReplicatedLogEntry for recovery: {}", logEntry.getIndex());
+ }
+
+ replicatedLog.append(logEntry);
+ }
+
+ private void onRecoveredApplyLogEntries(ApplyLogEntries ale) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received ApplyLogEntries for recovery, applying to state: {} to {}",
+ context.getLastApplied() + 1, ale.getToIndex());
+ }
+
+ for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
+ batchRecoveredLogEntry(replicatedLog.get(i));
+ }
+
+ context.setLastApplied(ale.getToIndex());
+ context.setCommitIndex(ale.getToIndex());
+ }
+
+ private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+ initRecoveryTimer();
+
+ int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+ if(currentRecoveryBatchCount == 0) {
+ startLogRecoveryBatch(batchSize);
+ }
- } else if (message instanceof UpdateElectionTerm) {
- context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
- ((UpdateElectionTerm) message).getVotedFor());
+ appendRecoveredLogEntry(logEntry.getData());
- } else if (message instanceof RecoveryCompleted) {
- LOG.info(
- "RecoveryCompleted - Switching actor to Follower - " +
- "Persistence Id = " + persistenceId() +
- " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
- "journal-size={}",
- replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
- replicatedLog.snapshotTerm, replicatedLog.size());
- currentBehavior = switchBehavior(RaftState.Follower);
- onStateChanged();
+ if(++currentRecoveryBatchCount >= batchSize) {
+ endCurrentLogRecoveryBatch();
}
}
- @Override public void onReceiveCommand(Object message) {
+ private void endCurrentLogRecoveryBatch() {
+ applyCurrentLogRecoveryBatch();
+ currentRecoveryBatchCount = 0;
+ }
+
+ private void onRecoveryCompletedMessage() {
+ if(currentRecoveryBatchCount > 0) {
+ endCurrentLogRecoveryBatch();
+ }
+
+ onRecoveryComplete();
+
+ String recoveryTime = "";
+ if(recoveryTimer != null) {
+ recoveryTimer.stop();
+ recoveryTime = " in " + recoveryTimer.toString();
+ recoveryTimer = null;
+ }
+
+ LOG.info(
+ "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+ "Persistence Id = " + persistenceId() +
+ " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
+ "journal-size={}",
+ replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
+ replicatedLog.snapshotTerm, replicatedLog.size());
+
+ currentBehavior = new Follower(context);
+ onStateChanged();
+ }
+
+ @Override public void handleCommand(Object message) {
if (message instanceof ApplyState){
ApplyState applyState = (ApplyState) message;
} else if (message instanceof ApplyLogEntries){
ApplyLogEntries ale = (ApplyLogEntries) message;
- LOG.info("Persisting ApplyLogEntries with index={}", ale.getToIndex());
- persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Persisting ApplyLogEntries with index={}", ale.getToIndex());
+ }
+ persistence().persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
@Override
public void apply(ApplyLogEntries param) throws Exception {
}
SaveSnapshotSuccess success = (SaveSnapshotSuccess) message;
LOG.info("SaveSnapshotSuccess received for snapshot");
- context.getReplicatedLog().snapshotCommit();
+ long sequenceNumber = success.metadata().sequenceNr();
- // TODO: Not sure if we want to be this aggressive with trimming stuff
- trimPersistentData(success.metadata().sequenceNr());
+ commitSnapshot(sequenceNumber);
} else if (message instanceof SaveSnapshotFailure) {
SaveSnapshotFailure saveSnapshotFailure = (SaveSnapshotFailure) message;
if (!(message instanceof AppendEntriesMessages.AppendEntries)
&& !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
if(LOG.isDebugEnabled()) {
- LOG.debug("onReceiveCommand: message:" + message.getClass());
+ LOG.debug("onReceiveCommand: message: {}", message.getClass());
}
}
- RaftState state =
- currentBehavior.handleMessage(getSender(), message);
RaftActorBehavior oldBehavior = currentBehavior;
- currentBehavior = switchBehavior(state);
+ currentBehavior = currentBehavior.handleMessage(getSender(), message);
+
if(oldBehavior != currentBehavior){
onStateChanged();
}
return context.getLastApplied();
}
+ protected RaftActorContext getRaftActorContext() {
+ return context;
+ }
+
/**
* setPeerAddress sets the address of a known peer at a later time.
* <p>
context.setPeerAddress(peerId, peerAddress);
}
+ protected void commitSnapshot(long sequenceNumber) {
+ context.getReplicatedLog().snapshotCommit();
+ // TODO: Not sure if we want to be this aggressive with trimming stuff
+ trimPersistentData(sequenceNumber);
+ }
/**
* The applyState method will be called by the RaftActor when some data
protected abstract void applyState(ActorRef clientActor, String identifier,
Object data);
+ /**
+ * This method is called during recovery at the start of a batch of state entries. Derived
+ * classes should perform any initialization needed to start a batch.
+ */
+ protected abstract void startLogRecoveryBatch(int maxBatchSize);
+
+ /**
+ * This method is called during recovery to append state data to the current batch. This method
+ * is called 1 or more times after {@link #startLogRecoveryBatch}.
+ *
+ * @param data the state data
+ */
+ protected abstract void appendRecoveredLogEntry(Payload data);
+
+ /**
+ * This method is called during recovery to reconstruct the state of the actor.
+ *
+ * @param snapshot A snapshot of the state of the actor
+ */
+ protected abstract void applyRecoverySnapshot(ByteString snapshot);
+
+ /**
+ * This method is called during recovery at the end of a batch to apply the current batched
+ * log entries. This method is called after {@link #appendRecoveredLogEntry}.
+ */
+ protected abstract void applyCurrentLogRecoveryBatch();
+
+ /**
+ * This method is called when recovery is complete.
+ */
+ protected abstract void onRecoveryComplete();
+
/**
* This method will be called by the RaftActor when a snapshot needs to be
* created. The derived actor should respond with its current state.
protected abstract void createSnapshot();
/**
- * This method will be called by the RaftActor during recovery to
- * reconstruct the state of the actor.
- * <p/>
- * This method may also be called at any other point during normal
+ * This method can be called at any other point during normal
* operations when the derived actor is out of sync with it's peers
* and the only way to bring it in sync is by applying a snapshot
*
*/
protected abstract void onStateChanged();
- protected void onLeaderChanged(String oldLeader, String newLeader){};
-
- private RaftActorBehavior switchBehavior(RaftState state) {
- if (currentBehavior != null) {
- if (currentBehavior.state() == state) {
- return currentBehavior;
- }
- LOG.info("Switching from state " + currentBehavior.state() + " to "
- + state);
-
- try {
- currentBehavior.close();
- } catch (Exception e) {
- LOG.error(e,
- "Failed to close behavior : " + currentBehavior.state());
- }
-
- } else {
- LOG.info("Switching behavior to " + state);
- }
- RaftActorBehavior behavior = null;
- if (state == RaftState.Candidate) {
- behavior = new Candidate(context);
- } else if (state == RaftState.Follower) {
- behavior = new Follower(context);
- } else {
- behavior = new Leader(context);
- }
-
+ protected abstract DataPersistenceProvider persistence();
-
- return behavior;
- }
+ protected void onLeaderChanged(String oldLeader, String newLeader){};
private void trimPersistentData(long sequenceNumber) {
// Trim akka snapshots
// FIXME : Not sure how exactly the SnapshotSelectionCriteria is applied
// For now guessing that it is ANDed.
- deleteSnapshots(new SnapshotSelectionCriteria(
+ persistence().deleteSnapshots(new SnapshotSelectionCriteria(
sequenceNumber - context.getConfigParams().getSnapshotBatchCount(), 43200000));
// Trim akka journal
- deleteMessages(sequenceNumber);
+ persistence().deleteMessages(sequenceNumber);
}
private String getLeaderAddress(){
}
String peerAddress = context.getPeerAddress(leaderId);
if(LOG.isDebugEnabled()) {
- LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
- + peerAddress);
+ LOG.debug("getLeaderAddress leaderId = {} peerAddress = {}",
+ leaderId, peerAddress);
}
return peerAddress;
captureSnapshot.getLastIndex(), captureSnapshot.getLastTerm(),
captureSnapshot.getLastAppliedIndex(), captureSnapshot.getLastAppliedTerm());
- saveSnapshot(sn);
+ persistence().saveSnapshot(sn);
LOG.info("Persisting of snapshot done:{}", sn.getLogMessage());
// FIXME: Maybe this should be done after the command is saved
journal.subList(adjustedIndex , journal.size()).clear();
- persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
+ persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>(){
@Override public void apply(DeleteEntries param)
throws Exception {
public void appendAndPersist(final ActorRef clientActor,
final String identifier,
final ReplicatedLogEntry replicatedLogEntry) {
- context.getLogger().debug(
- "Append log entry and persist {} ", replicatedLogEntry);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Append log entry and persist {} ", replicatedLogEntry);
+ }
+
// FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
journal.add(replicatedLogEntry);
// persist call and the execution(s) of the associated event
// handler. This also holds for multiple persist calls in context
// of a single command.
- persist(replicatedLogEntry,
+ persistence().persist(replicatedLogEntry,
new Procedure<ReplicatedLogEntry>() {
+ @Override
public void apply(ReplicatedLogEntry evt) throws Exception {
// when a snaphsot is being taken, captureSnapshot != null
if (hasSnapshotCaptureInitiated == false &&
}
- private static class DeleteEntries implements Serializable {
+ static class DeleteEntries implements Serializable {
private final int fromIndex;
private long currentTerm = 0;
private String votedFor = null;
+ @Override
public long getCurrentTerm() {
return currentTerm;
}
+ @Override
public String getVotedFor() {
return votedFor;
}
public void updateAndPersist(long currentTerm, String votedFor){
update(currentTerm, votedFor);
// FIXME : Maybe first persist then update the state
- persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
+ persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
@Override public void apply(UpdateElectionTerm param)
throws Exception {
}
}
- private static class UpdateElectionTerm implements Serializable {
+ static class UpdateElectionTerm implements Serializable {
private final long currentTerm;
private final String votedFor;
}
}
+ protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
+
+ public NonPersistentRaftDataProvider(){
+
+ }
+
+ /**
+ * The way snapshotting works is,
+ * <ol>
+ * <li> RaftActor calls createSnapshot on the Shard
+ * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+ * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
+ * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
+ * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
+ * in SaveSnapshotSuccess.
+ * </ol>
+ * @param o
+ */
+ @Override
+ public void saveSnapshot(Object o) {
+ // Make saving Snapshot successful
+ commitSnapshot(-1L);
+ }
+ }
+
}
this.LOG = logger;
}
+ @Override
public ActorRef actorOf(Props props){
return context.actorOf(props);
}
+ @Override
public ActorSelection actorSelection(String path){
return context.actorSelection(path);
}
+ @Override
public String getId() {
return id;
}
+ @Override
public ActorRef getActor() {
return actor;
}
+ @Override
public ElectionTerm getTermInformation() {
return termInformation;
}
+ @Override
public long getCommitIndex() {
return commitIndex;
}
this.commitIndex = commitIndex;
}
+ @Override
public long getLastApplied() {
return lastApplied;
}
*/
void append(ReplicatedLogEntry replicatedLogEntry);
+ /**
+ * Optimization method to increase the capacity of the journal log prior to appending entries.
+ *
+ * @param amount the amount to increase by
+ */
+ void increaseJournalLogCapacity(int amount);
+
/**
*
* @param replicatedLogEntry
import akka.actor.ActorRef;
import akka.actor.Cancellable;
+import akka.event.LoggingAdapter;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
*/
protected final RaftActorContext context;
+ /**
+ *
+ */
+ protected final LoggingAdapter LOG;
+
/**
*
*/
protected AbstractRaftActorBehavior(RaftActorContext context) {
this.context = context;
+ this.LOG = context.getLogger();
}
/**
* @param appendEntries The AppendEntries message
* @return
*/
- protected abstract RaftState handleAppendEntries(ActorRef sender,
+ protected abstract RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries);
* @param appendEntries
* @return
*/
- protected RaftState appendEntries(ActorRef sender,
+ protected RaftActorBehavior appendEntries(ActorRef sender,
AppendEntries appendEntries) {
// 1. Reply false if term < currentTerm (§5.1)
if (appendEntries.getTerm() < currentTerm()) {
- context.getLogger().debug(
- "Cannot append entries because sender term " + appendEntries
- .getTerm() + " is less than " + currentTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Cannot append entries because sender term {} is less than {}",
+ appendEntries.getTerm(), currentTerm());
+ }
+
sender.tell(
new AppendEntriesReply(context.getId(), currentTerm(), false,
lastIndex(), lastTerm()), actor()
);
- return state();
+ return this;
}
* @param appendEntriesReply The AppendEntriesReply message
* @return
*/
- protected abstract RaftState handleAppendEntriesReply(ActorRef sender,
+ protected abstract RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply);
/**
* @param requestVote
* @return
*/
- protected RaftState requestVote(ActorRef sender,
+ protected RaftActorBehavior requestVote(ActorRef sender,
RequestVote requestVote) {
-
- context.getLogger().debug(requestVote.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(requestVote.toString());
+ }
boolean grantVote = false;
sender.tell(new RequestVoteReply(currentTerm(), grantVote), actor());
- return state();
+ return this;
}
/**
* @param requestVoteReply The RequestVoteReply message
* @return
*/
- protected abstract RaftState handleRequestVoteReply(ActorRef sender,
+ protected abstract RaftActorBehavior handleRequestVoteReply(ActorRef sender,
RequestVoteReply requestVoteReply);
/**
} else {
//if one index is not present in the log, no point in looping
// around as the rest wont be present either
- context.getLogger().warning(
- "Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
+ LOG.warning(
+ "Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index);
break;
}
}
- context.getLogger().debug("Setting last applied to {}", newLastApplied);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Setting last applied to {}", newLastApplied);
+ }
context.setLastApplied(newLastApplied);
// send a message to persist a ApplyLogEntries marker message into akka's persistent journal
}
@Override
- public RaftState handleMessage(ActorRef sender, Object message) {
+ public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
if (message instanceof AppendEntries) {
return appendEntries(sender, (AppendEntries) message);
} else if (message instanceof AppendEntriesReply) {
} else if (message instanceof RequestVoteReply) {
return handleRequestVoteReply(sender, (RequestVoteReply) message);
}
- return state();
+ return this;
}
@Override public String getLeaderId() {
return leaderId;
}
+
+ protected RaftActorBehavior switchBehavior(RaftActorBehavior behavior) {
+ LOG.info("Switching from behavior {} to {}", this.state(), behavior.state());
+ try {
+ close();
+ } catch (Exception e) {
+ LOG.error(e, "Failed to close behavior : {}", this.state());
+ }
+
+ return behavior;
+ }
}
peers = context.getPeerAddresses().keySet();
- context.getLogger().debug("Election:Candidate has following peers:"+ peers);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Election:Candidate has following peers: {}", peers);
+ }
if(peers.size() > 0) {
// Votes are required from a majority of the peers including self.
scheduleElection(electionDuration());
}
- @Override protected RaftState handleAppendEntries(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
- context.getLogger().debug(appendEntries.toString());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(appendEntries.toString());
+ }
- return state();
+ return this;
}
- @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply) {
- return state();
+ return this;
}
- @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
RequestVoteReply requestVoteReply) {
if (requestVoteReply.isVoteGranted()) {
}
if (voteCount >= votesRequired) {
- return RaftState.Leader;
+ return switchBehavior(new Leader(context));
}
- return state();
+ return this;
}
@Override public RaftState state() {
}
@Override
- public RaftState handleMessage(ActorRef sender, Object originalMessage) {
+ public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Object message = fromSerializableMessage(originalMessage);
RaftRPC rpc = (RaftRPC) message;
- context.getLogger().debug("RaftRPC message received {} my term is {}", rpc.toString(), context.getTermInformation().getCurrentTerm());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("RaftRPC message received {} my term is {}", rpc, context.getTermInformation().getCurrentTerm());
+ }
// If RPC request or response contains term T > currentTerm:
// set currentTerm = T, convert to follower (§5.1)
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
- return RaftState.Follower;
+
+ return switchBehavior(new Follower(context));
}
}
// ourselves the leader. This gives enough time for a leader
// who we do not know about (as a peer)
// to send a message to the candidate
- return RaftState.Leader;
+
+ return switchBehavior(new Leader(context));
}
startNewTerm();
scheduleElection(electionDuration());
- return state();
+ return this;
}
return super.handleMessage(sender, message);
context.getTermInformation().updateAndPersist(currentTerm + 1,
context.getId());
- context.getLogger().debug("Starting new term " + (currentTerm + 1));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Starting new term {}", (currentTerm + 1));
+ }
// Request for a vote
// TODO: Retry request for vote if replies do not arrive in a reasonable
package org.opendaylight.controller.cluster.raft.behaviors;
import akka.actor.ActorRef;
-import akka.event.LoggingAdapter;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftState;
public class Follower extends AbstractRaftActorBehavior {
private ByteString snapshotChunksCollected = ByteString.EMPTY;
- private final LoggingAdapter LOG;
-
public Follower(RaftActorContext context) {
super(context);
- LOG = context.getLogger();
-
scheduleElection(electionDuration());
}
- @Override protected RaftState handleAppendEntries(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
new AppendEntriesReply(context.getId(), currentTerm(), false,
lastIndex(), lastTerm()), actor()
);
- return state();
+ return this;
}
if (appendEntries.getEntries() != null
&& appendEntries.getEntries().size() > 0) {
if(LOG.isDebugEnabled()) {
LOG.debug(
- "Number of entries to be appended = " + appendEntries
- .getEntries().size()
+ "Number of entries to be appended = {}", appendEntries.getEntries().size()
);
}
if(LOG.isDebugEnabled()) {
LOG.debug(
- "Removing entries from log starting at "
- + matchEntry.getIndex()
+ "Removing entries from log starting at {}", matchEntry.getIndex()
);
}
}
if(LOG.isDebugEnabled()) {
- context.getLogger().debug(
- "After cleanup entries to be added from = " + (addEntriesFrom
- + lastIndex())
+ LOG.debug("After cleanup entries to be added from = {}", (addEntriesFrom + lastIndex())
);
}
for (int i = addEntriesFrom;
i < appendEntries.getEntries().size(); i++) {
- context.getLogger().info(
- "Append entry to log " + appendEntries.getEntries().get(
- i).getData()
- .toString()
- );
- context.getReplicatedLog()
- .appendAndPersist(appendEntries.getEntries().get(i));
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Append entry to log {}", appendEntries.getEntries().get(i).getData());
+ }
+ context.getReplicatedLog().appendAndPersist(appendEntries.getEntries().get(i));
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Log size is now " + context.getReplicatedLog().size());
+ LOG.debug("Log size is now {}", context.getReplicatedLog().size());
}
}
if (prevCommitIndex != context.getCommitIndex()) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Commit index set to " + context.getCommitIndex());
+ LOG.debug("Commit index set to {}", context.getCommitIndex());
}
}
sender.tell(new AppendEntriesReply(context.getId(), currentTerm(), true,
lastIndex(), lastTerm()), actor());
- return state();
+ return this;
}
- @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply) {
- return state();
+ return this;
}
- @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
RequestVoteReply requestVoteReply) {
- return state();
+ return this;
}
@Override public RaftState state() {
return RaftState.Follower;
}
- @Override public RaftState handleMessage(ActorRef sender, Object originalMessage) {
+ @Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Object message = fromSerializableMessage(originalMessage);
}
if (message instanceof ElectionTimeout) {
- return RaftState.Candidate;
+ return switchBehavior(new Candidate(context));
} else if (message instanceof InstallSnapshot) {
InstallSnapshot installSnapshot = (InstallSnapshot) message;
// this is the last chunk, create a snapshot object and apply
snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
- context.getLogger().debug("Last chunk received: snapshotChunksCollected.size:{}",
- snapshotChunksCollected.size());
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Last chunk received: snapshotChunksCollected.size:{}",
+ snapshotChunksCollected.size());
+ }
Snapshot snapshot = Snapshot.create(snapshotChunksCollected.toByteArray(),
new ArrayList<ReplicatedLogEntry>(),
true), actor());
} catch (Exception e) {
- context.getLogger().error("Exception in InstallSnapshot of follower", e);
+ LOG.error(e, "Exception in InstallSnapshot of follower:");
//send reply with success as false. The chunk will be sent again on failure
sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
installSnapshot.getChunkIndex(), false), actor());
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.Cancellable;
-import akka.event.LoggingAdapter;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
private final int minReplicationCount;
- private final LoggingAdapter LOG;
-
public Leader(RaftActorContext context) {
super(context);
- LOG = context.getLogger();
-
- if (lastIndex() >= 0) {
- context.setCommitIndex(lastIndex());
- }
-
followers = context.getPeerAddresses().keySet();
for (String followerId : followers) {
FollowerLogInformation followerLogInformation =
new FollowerLogInformationImpl(followerId,
- new AtomicLong(lastIndex()),
+ new AtomicLong(context.getCommitIndex()),
new AtomicLong(-1));
followerToLog.put(followerId, followerLogInformation);
}
if(LOG.isDebugEnabled()) {
- LOG.debug("Election:Leader has following peers:" + followers);
+ LOG.debug("Election:Leader has following peers: {}", followers);
}
if (followers.size() > 0) {
}
- @Override protected RaftState handleAppendEntries(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntries(ActorRef sender,
AppendEntries appendEntries) {
if(LOG.isDebugEnabled()) {
LOG.debug(appendEntries.toString());
}
- return state();
+ return this;
}
- @Override protected RaftState handleAppendEntriesReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender,
AppendEntriesReply appendEntriesReply) {
if(! appendEntriesReply.isSuccess()) {
if(followerLogInformation == null){
LOG.error("Unknown follower {}", followerId);
- return state();
+ return this;
}
if (appendEntriesReply.isSuccess()) {
applyLogToStateMachine(context.getCommitIndex());
}
- return state();
+ return this;
}
protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
return null;
}
- @Override protected RaftState handleRequestVoteReply(ActorRef sender,
+ @Override protected RaftActorBehavior handleRequestVoteReply(ActorRef sender,
RequestVoteReply requestVoteReply) {
- return state();
+ return this;
}
@Override public RaftState state() {
return RaftState.Leader;
}
- @Override public RaftState handleMessage(ActorRef sender, Object originalMessage) {
+ @Override public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
Preconditions.checkNotNull(sender, "sender should not be null");
Object message = fromSerializableMessage(originalMessage);
// This applies to all RPC messages and responses
if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
- return RaftState.Follower;
+
+ return switchBehavior(new Follower(context));
}
}
try {
if (message instanceof SendHeartBeat) {
- return sendHeartBeat();
+ sendHeartBeat();
+ return this;
} else if(message instanceof SendInstallSnapshot) {
installSnapshotIfNeeded();
} else if (message instanceof Replicate) {
long logIndex = replicate.getReplicatedLogEntry().getIndex();
if(LOG.isDebugEnabled()) {
- LOG.debug("Replicate message " + logIndex);
+ LOG.debug("Replicate message {}", logIndex);
}
// Create a tracker entry we will use this later to notify the
followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
mapFollowerToSnapshot.get(followerId).getTotalChunks());
} catch (IOException e) {
- LOG.error("InstallSnapshot failed for Leader.", e);
+ LOG.error(e, "InstallSnapshot failed for Leader.");
}
}
return nextChunk;
}
- private RaftState sendHeartBeat() {
+ private void sendHeartBeat() {
if (followers.size() > 0) {
sendAppendEntries();
}
- return state();
}
private void stopHeartBeat() {
* differently.
*/
public interface RaftActorBehavior extends AutoCloseable{
+
/**
* Handle a message. If the processing of the message warrants a state
- * change then a new state should be returned otherwise this method should
- * return the state for the current behavior.
+ * change then a new behavior should be returned otherwise this method should
+ * return the current behavior.
*
* @param sender The sender of the message
* @param message A message that needs to be processed
*
- * @return The new state or self (this)
+ * @return The new behavior or current behavior
*/
- RaftState handleMessage(ActorRef sender, Object message);
+ RaftActorBehavior handleMessage(ActorRef sender, Object message);
/**
* The state associated with a given behavior
public static class MockPayload extends Payload implements Serializable {
private String value = "";
+ public MockPayload(){
+
+ }
+
public MockPayload(String s) {
this.value = s;
}
return index;
}
}
+
+ public static class MockReplicatedLogBuilder {
+ private ReplicatedLog mockLog = new SimpleReplicatedLog();
+
+ public MockReplicatedLogBuilder createEntries(int start, int end, int term) {
+ for (int i=start; i<end; i++) {
+ this.mockLog.append(new ReplicatedLogImplEntry(i, term, new MockRaftActorContext.MockPayload("foo" + i)));
+ }
+ return this;
+ }
+
+ public MockReplicatedLogBuilder addEntry(int index, int term, MockPayload payload) {
+ this.mockLog.append(new ReplicatedLogImplEntry(index, term, payload));
+ return this;
+ }
+
+ public ReplicatedLog build() {
+ return this.mockLog;
+ }
+ }
}
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
+import akka.actor.Terminated;
import akka.event.Logging;
import akka.japi.Creator;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SaveSnapshotSuccess;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotOffer;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
+import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import org.junit.After;
import org.junit.Test;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+import scala.concurrent.duration.FiniteDuration;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.TestCase.assertEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.mockito.Mockito.mock;
public class RaftActorTest extends AbstractActorTest {
public static class MockRaftActor extends RaftActor {
- private boolean applySnapshotCalled = false;
- private List<Object> state;
+ private final DataPersistenceProvider dataPersistenceProvider;
- public MockRaftActor(String id,
- Map<String, String> peerAddresses) {
- super(id, peerAddresses);
+ public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+ private final Map<String, String> peerAddresses;
+ private final String id;
+ private final Optional<ConfigParams> config;
+ private final DataPersistenceProvider dataPersistenceProvider;
+
+ private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider) {
+ this.peerAddresses = peerAddresses;
+ this.id = id;
+ this.config = config;
+ this.dataPersistenceProvider = dataPersistenceProvider;
+ }
+
+ @Override
+ public MockRaftActor create() throws Exception {
+ return new MockRaftActor(id, peerAddresses, config, dataPersistenceProvider);
+ }
+ }
+
+ private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ private final CountDownLatch applyRecoverySnapshot = new CountDownLatch(1);
+ private final CountDownLatch applyStateLatch = new CountDownLatch(1);
+
+ private final List<Object> state;
+
+ public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider) {
+ super(id, peerAddresses, config);
state = new ArrayList<>();
+ if(dataPersistenceProvider == null){
+ this.dataPersistenceProvider = new PersistentDataProvider();
+ } else {
+ this.dataPersistenceProvider = dataPersistenceProvider;
+ }
}
- public RaftActorContext getRaftActorContext() {
- return context;
+ public void waitForRecoveryComplete() {
+ try {
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
}
- public boolean isApplySnapshotCalled() {
- return applySnapshotCalled;
+ public CountDownLatch getApplyRecoverySnapshotLatch(){
+ return applyRecoverySnapshot;
}
public List<Object> getState() {
return state;
}
- public static Props props(final String id, final Map<String, String> peerAddresses){
- return Props.create(new Creator<MockRaftActor>(){
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, null));
+ }
- @Override public MockRaftActor create() throws Exception {
- return new MockRaftActor(id, peerAddresses);
- }
- });
+ public static Props props(final String id, final Map<String, String> peerAddresses,
+ Optional<ConfigParams> config, DataPersistenceProvider dataPersistenceProvider){
+ return Props.create(new MockRaftActorCreator(peerAddresses, id, config, dataPersistenceProvider));
}
+
@Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+ applyStateLatch.countDown();
+ }
+
+ @Override
+ protected void startLogRecoveryBatch(int maxBatchSize) {
+ }
+
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
state.add(data);
}
- @Override protected void createSnapshot() {
- throw new UnsupportedOperationException("createSnapshot");
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
}
- @Override protected void applySnapshot(ByteString snapshot) {
- applySnapshotCalled = true;
+ @Override
+ protected void onRecoveryComplete() {
+ recoveryComplete.countDown();
+ }
+
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ applyRecoverySnapshot.countDown();
try {
Object data = toObject(snapshot);
+ System.out.println("!!!!!applyRecoverySnapshot: "+data);
if (data instanceof List) {
state.addAll((List) data);
}
- } catch (ClassNotFoundException e) {
- e.printStackTrace();
- } catch (IOException e) {
+ } catch (Exception e) {
e.printStackTrace();
}
}
+ @Override protected void createSnapshot() {
+ }
+
+ @Override protected void applySnapshot(ByteString snapshot) {
+ }
+
@Override protected void onStateChanged() {
}
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return this.dataPersistenceProvider;
+ }
+
@Override public String persistenceId() {
return this.getId();
}
return obj;
}
+ public ReplicatedLog getReplicatedLog(){
+ return this.getRaftActorContext().getReplicatedLog();
+ }
}
public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
super(actorSystem);
- raftActor = this.getSystem()
- .actorOf(MockRaftActor.props(actorName,
- Collections.EMPTY_MAP), actorName);
+ raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>absent()), actorName);
}
return
new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
) {
+ @Override
protected Boolean run() {
return true;
}
}.from(raftActor.path().toString())
- .message("Switching from state Candidate to Leader")
+ .message("Switching from behavior Candidate to Leader")
.occurrences(1).exec();
}
public void findLeader(final String expectedLeader){
+ raftActor.tell(new FindLeader(), getRef());
-
- new Within(duration("1 seconds")) {
- protected void run() {
-
- raftActor.tell(new FindLeader(), getRef());
-
- String s = new ExpectMsg<String>(duration("1 seconds"),
- "findLeader") {
- // do not put code outside this method, will run afterwards
- protected String match(Object in) {
- if (in instanceof FindLeaderReply) {
- return ((FindLeaderReply) in).getLeaderActor();
- } else {
- throw noMatch();
- }
- }
- }.get();// this extracts the received message
-
- assertEquals(expectedLeader, s);
-
- }
-
-
- };
+ FindLeaderReply reply = expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertEquals("getLeaderActor", expectedLeader, reply.getLeaderActor());
}
public ActorRef getRaftActor() {
return raftActor;
}
-
}
}
@Test
- public void testRaftActorRecovery() {
+ public void testRaftActorRecovery() throws Exception {
new JavaTestKit(getSystem()) {{
- new Within(duration("1 seconds")) {
- protected void run() {
-
- String persistenceId = "follower10";
-
- ActorRef followerActor = getSystem().actorOf(
- MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
-
- List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
- ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
- snapshotUnappliedEntries.add(entry1);
-
- int lastAppliedDuringSnapshotCapture = 3;
- int lastIndexDuringSnapshotCapture = 4;
-
- ByteString snapshotBytes = null;
- try {
- // 4 messages as part of snapshot, which are applied to state
- snapshotBytes = fromObject(Arrays.asList(new MockRaftActorContext.MockPayload("A"),
- new MockRaftActorContext.MockPayload("B"),
- new MockRaftActorContext.MockPayload("C"),
- new MockRaftActorContext.MockPayload("D")));
- } catch (Exception e) {
- e.printStackTrace();
- }
- Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
- snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
- lastAppliedDuringSnapshotCapture, 1);
- MockSnapshotStore.setMockSnapshot(snapshot);
- MockSnapshotStore.setPersistenceId(persistenceId);
-
- // add more entries after snapshot is taken
- List<ReplicatedLogEntry> entries = new ArrayList<>();
- ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
- ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6, new MockRaftActorContext.MockPayload("G"));
- ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7, new MockRaftActorContext.MockPayload("H"));
- entries.add(entry2);
- entries.add(entry3);
- entries.add(entry4);
-
- int lastAppliedToState = 5;
- int lastIndex = 7;
-
- MockAkkaJournal.addToJournal(5, entry2);
- // 2 entries are applied to state besides the 4 entries in snapshot
- MockAkkaJournal.addToJournal(6, new ApplyLogEntries(lastAppliedToState));
- MockAkkaJournal.addToJournal(7, entry3);
- MockAkkaJournal.addToJournal(8, entry4);
-
- // kill the actor
- followerActor.tell(PoisonPill.getInstance(), null);
-
- try {
- // give some time for actor to die
- Thread.sleep(200);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ String persistenceId = "follower10";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+ // Set the heartbeat interval high to essentially disable election otherwise the test
+ // may fail if the actor is switched to Leader and the commitIndex is set to the last
+ // log entry.
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ ActorRef followerActor = getSystem().actorOf(MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config)), persistenceId);
+
+ watch(followerActor);
+
+ List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
+ ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4,
+ new MockRaftActorContext.MockPayload("E"));
+ snapshotUnappliedEntries.add(entry1);
+
+ int lastAppliedDuringSnapshotCapture = 3;
+ int lastIndexDuringSnapshotCapture = 4;
+
+ // 4 messages as part of snapshot, which are applied to state
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
+ lastAppliedDuringSnapshotCapture, 1);
+ MockSnapshotStore.setMockSnapshot(snapshot);
+ MockSnapshotStore.setPersistenceId(persistenceId);
+
+ // add more entries after snapshot is taken
+ List<ReplicatedLogEntry> entries = new ArrayList<>();
+ ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
+ new MockRaftActorContext.MockPayload("F"));
+ ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
+ new MockRaftActorContext.MockPayload("G"));
+ ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
+ new MockRaftActorContext.MockPayload("H"));
+ entries.add(entry2);
+ entries.add(entry3);
+ entries.add(entry4);
+
+ int lastAppliedToState = 5;
+ int lastIndex = 7;
+
+ MockAkkaJournal.addToJournal(5, entry2);
+ // 2 entries are applied to state besides the 4 entries in snapshot
+ MockAkkaJournal.addToJournal(6, new ApplyLogEntries(lastAppliedToState));
+ MockAkkaJournal.addToJournal(7, entry3);
+ MockAkkaJournal.addToJournal(8, entry4);
+
+ // kill the actor
+ followerActor.tell(PoisonPill.getInstance(), null);
+ expectMsgClass(duration("5 seconds"), Terminated.class);
+
+ unwatch(followerActor);
+
+ //reinstate the actor
+ TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
+ MockRaftActor.props(persistenceId, Collections.EMPTY_MAP,
+ Optional.<ConfigParams>of(config)));
+
+ ref.underlyingActor().waitForRecoveryComplete();
+
+ RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+ assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+ context.getReplicatedLog().size());
+ assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
+ assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
+ assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
+ assertEquals("Recovered state size", 6, ref.underlyingActor().getState().size());
+ }};
+ }
- //reinstate the actor
- TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
- MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
+ /**
+ * This test verifies that when recovery is applicable (typically when persistence is true) the RaftActor does
+ * process recovery messages
+ *
+ * @throws Exception
+ */
- try {
- //give some time for snapshot offer to get called.
- Thread.sleep(200);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ @Test
+ public void testHandleRecoveryWhenDataPersistenceRecoveryApplicable() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryApplicable";
- RaftActorContext context = ref.underlyingActor().getRaftActorContext();
- assertEquals(snapshotUnappliedEntries.size() + entries.size(), context.getReplicatedLog().size());
- assertEquals(lastIndex, context.getReplicatedLog().lastIndex());
- assertEquals(lastAppliedToState, context.getLastApplied());
- assertEquals(lastAppliedToState, context.getCommitIndex());
- assertTrue(ref.underlyingActor().isApplySnapshotCalled());
- assertEquals(6, ref.underlyingActor().getState().size());
- }
- };
- }};
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config)), persistenceId);
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
+
+ CountDownLatch applyRecoverySnapshotLatch = mockRaftActor.getApplyRecoverySnapshotLatch();
+
+ assertEquals("apply recovery snapshot", true, applyRecoverySnapshotLatch.await(5, TimeUnit.SECONDS));
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
+
+ ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
+
+ assertEquals("add replicated log entry", 1, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+
+ assertEquals("add replicated log entry", 2, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ApplyLogEntries(1));
+
+ assertEquals("commit index 1", 1, mockRaftActor.getRaftActorContext().getCommitIndex());
+
+ // The snapshot had 4 items + we added 2 more items during the test
+ // We start removing from 5 and we should get 1 item in the replicated log
+ mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(5));
+
+ assertEquals("remove log entries", 1, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+
+ assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
+ assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
+
+ mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
+
+ mockRaftActor.waitForRecoveryComplete();
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }};
+ }
+
+ /**
+ * This test verifies that when recovery is not applicable (typically when persistence is false) the RaftActor does
+ * not process recovery messages
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testHandleRecoveryWhenDataPersistenceRecoveryNotApplicable";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), new DataPersistenceProviderMonitor()), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+ Lists.<ReplicatedLogEntry>newArrayList(), 3, 1 ,3, 1);
+
+ mockRaftActor.onReceiveRecover(new SnapshotOffer(new SnapshotMetadata(persistenceId, 100, 100), snapshot));
+
+ CountDownLatch applyRecoverySnapshotLatch = mockRaftActor.getApplyRecoverySnapshotLatch();
+
+ assertEquals("apply recovery snapshot", false, applyRecoverySnapshotLatch.await(1, TimeUnit.SECONDS));
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(0, 1, new MockRaftActorContext.MockPayload("A")));
+
+ ReplicatedLog replicatedLog = mockRaftActor.getReplicatedLog();
+
+ assertEquals("add replicated log entry", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ReplicatedLogImplEntry(1, 1, new MockRaftActorContext.MockPayload("A")));
+
+ assertEquals("add replicated log entry", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new ApplyLogEntries(1));
+
+ assertEquals("commit index -1", -1, mockRaftActor.getRaftActorContext().getCommitIndex());
+ mockRaftActor.onReceiveRecover(new RaftActor.DeleteEntries(2));
+
+ assertEquals("remove log entries", 0, replicatedLog.size());
+
+ mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+
+ assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
+ assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
+
+ mockRaftActor.onReceiveRecover(mock(RecoveryCompleted.class));
+
+ mockRaftActor.waitForRecoveryComplete();
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+ }};
+ }
+
+
+ @Test
+ public void testUpdatingElectionTermCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testUpdatingElectionTermCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getRaftActorContext().getTermInformation().updateAndPersist(10, "foobar");
+
+ assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testAddingReplicatedLogEntryCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testAddingReplicatedLogEntryCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getRaftActorContext().getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(10, 10, mock(Payload.class)));
+
+ assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testRemovingReplicatedLogEntryCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testRemovingReplicatedLogEntryCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(2);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.getReplicatedLog().appendAndPersist(new MockRaftActorContext.MockReplicatedLogEntry(1, 0, mock(Payload.class)));
+
+ mockRaftActor.getRaftActorContext().getReplicatedLog().removeFromAndPersist(0);
+
+ assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testApplyLogEntriesCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testApplyLogEntriesCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ mockRaftActor.onReceiveCommand(new ApplyLogEntries(10));
+
+ assertEquals("Persist called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testCaptureSnapshotReplyCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testCaptureSnapshotReplyCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch persistLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setSaveSnapshotLatch(persistLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes));
+
+ assertEquals("Save Snapshot called", true, persistLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
+ }
+
+ @Test
+ public void testSaveSnapshotSuccessCallsDataPersistence() throws Exception {
+ new JavaTestKit(getSystem()) {
+ {
+ String persistenceId = "testSaveSnapshotSuccessCallsDataPersistence";
+
+ DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+
+ config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+ CountDownLatch deleteMessagesLatch = new CountDownLatch(1);
+ CountDownLatch deleteSnapshotsLatch = new CountDownLatch(1);
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setDeleteMessagesLatch(deleteMessagesLatch);
+ dataPersistenceProviderMonitor.setDeleteSnapshotsLatch(deleteSnapshotsLatch);
+
+ TestActorRef<MockRaftActor> mockActorRef = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId,
+ Collections.EMPTY_MAP, Optional.<ConfigParams>of(config), dataPersistenceProviderMonitor), persistenceId);
+
+ MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
+
+ ByteString snapshotBytes = fromObject(Arrays.asList(
+ new MockRaftActorContext.MockPayload("A"),
+ new MockRaftActorContext.MockPayload("B"),
+ new MockRaftActorContext.MockPayload("C"),
+ new MockRaftActorContext.MockPayload("D")));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshot(-1,1,-1,1));
+
+ mockRaftActor.onReceiveCommand(new CaptureSnapshotReply(snapshotBytes));
+
+ mockRaftActor.onReceiveCommand(new SaveSnapshotSuccess(new SnapshotMetadata("foo", 100, 100)));
+
+ assertEquals("Delete Messages called", true, deleteMessagesLatch.await(5, TimeUnit.SECONDS));
+
+ assertEquals("Delete Snapshots called", true, deleteSnapshotsLatch.await(5, TimeUnit.SECONDS));
+
+ mockActorRef.tell(PoisonPill.getInstance(), getRef());
+
+ }
+ };
}
private ByteString fromObject(Object snapshot) throws Exception {
import org.opendaylight.controller.cluster.raft.AbstractActorTest;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import java.util.List;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
public abstract class AbstractRaftActorBehaviorTest extends AbstractActorTest {
RaftActorBehavior behavior = createBehavior(context);
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftState expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
behavior.handleMessage(getRef(), appendEntries);
- assertEquals(expected, raftState);
+ assertEquals(expected, raftBehavior);
// Also expect an AppendEntriesReply to be sent where success is false
final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
}
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftState expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
behavior.handleMessage(getRef(), appendEntries);
- assertEquals(expected, raftState);
+ assertEquals(expected, raftBehavior);
assertEquals(1, log.size());
RaftActorBehavior behavior = createBehavior(
createActorContext(behaviorActor));
- RaftState raftState = behavior.handleMessage(getTestActor(),
+ RaftActorBehavior raftBehavior = behavior.handleMessage(getTestActor(),
new RequestVote(1000, "test", 10000, 999));
- if(behavior.state() != RaftState.Follower){
- assertEquals(RaftState.Follower, raftState);
+ if(!(behavior instanceof Follower)){
+ assertTrue(raftBehavior instanceof Follower);
} else {
final Boolean out =
RaftActorBehavior behavior = createBehavior(actorContext);
- RaftState raftState = behavior.handleMessage(getTestActor(),
+ RaftActorBehavior raftBehavior = behavior.handleMessage(getTestActor(),
new RequestVote(1000, "test", 10000, 999));
- if(behavior.state() != RaftState.Follower){
- assertEquals(RaftState.Follower, raftState);
+ if(!(behavior instanceof Follower)){
+ assertTrue(raftBehavior instanceof Follower);
} else {
final Boolean out =
new ExpectMsg<Boolean>(duration("1 seconds"),
setLastLogEntry(
(MockRaftActorContext) actorContext, 0, 0, p);
- RaftState raftState = createBehavior(actorContext)
+ RaftActorBehavior raftBehavior = createBehavior(actorContext)
.handleMessage(actorRef, rpc);
- assertEquals(RaftState.Follower, raftState);
+ assertTrue(raftBehavior instanceof Follower);
}
protected MockRaftActorContext.SimpleReplicatedLog setLastLogEntry(
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
Candidate candidate =
new Candidate(raftActorContext);
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
candidate.handleMessage(candidateActor, new ElectionTimeout());
- Assert.assertEquals(RaftState.Leader, raftState);
+ Assert.assertTrue(raftBehavior instanceof Leader);
}
@Test
Candidate candidate =
new Candidate(raftActorContext);
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
candidate.handleMessage(candidateActor, new ElectionTimeout());
- Assert.assertEquals(RaftState.Candidate, raftState);
+ Assert.assertTrue(raftBehavior instanceof Candidate);
}
@Test
Candidate candidate =
new Candidate(raftActorContext);
- RaftState stateOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+ RaftActorBehavior behaviorOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
- Assert.assertEquals(RaftState.Leader, stateOnFirstVote);
+ Assert.assertTrue(behaviorOnFirstVote instanceof Leader);
}
Candidate candidate =
new Candidate(raftActorContext);
- RaftState stateOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
+ RaftActorBehavior behaviorOnFirstVote = candidate.handleMessage(peerActor1, new RequestVoteReply(0, true));
- RaftState stateOnSecondVote = candidate.handleMessage(peerActor2, new RequestVoteReply(0, true));
+ RaftActorBehavior behaviorOnSecondVote = candidate.handleMessage(peerActor2, new RequestVoteReply(0, true));
- Assert.assertEquals(RaftState.Candidate, stateOnFirstVote);
- Assert.assertEquals(RaftState.Leader, stateOnSecondVote);
+ Assert.assertTrue(behaviorOnFirstVote instanceof Candidate);
+ Assert.assertTrue(behaviorOnSecondVote instanceof Leader);
}
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
-import akka.util.Timeout;
import com.google.protobuf.ByteString;
import junit.framework.Assert;
import org.junit.Test;
import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
-import scala.concurrent.duration.FiniteDuration;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import static akka.pattern.Patterns.ask;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
Follower follower =
new Follower(raftActorContext);
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
follower.handleMessage(followerActor, new ElectionTimeout());
- Assert.assertEquals(RaftState.Candidate, raftState);
+ Assert.assertTrue(raftBehavior instanceof Candidate);
}
@Test
AppendEntries appendEntries =
new AppendEntries(2, "leader-1", 100, 1, entries, 101);
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
createBehavior(context).handleMessage(getRef(), appendEntries);
assertEquals(101L, context.getLastApplied());
RaftActorBehavior behavior = createBehavior(context);
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftState expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
behavior.handleMessage(getRef(), appendEntries);
- assertEquals(expected, raftState);
+ assertEquals(expected, raftBehavior);
// Also expect an AppendEntriesReply to be sent where success is false
final Boolean out = new ExpectMsg<Boolean>(duration("1 seconds"),
RaftActorBehavior behavior = createBehavior(context);
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftState expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
behavior.handleMessage(getRef(), appendEntries);
- assertEquals(expected, raftState);
+ assertEquals(expected, raftBehavior);
assertEquals(5, log.last().getIndex() + 1);
assertNotNull(log.get(3));
assertNotNull(log.get(4));
RaftActorBehavior behavior = createBehavior(context);
// Send an unknown message so that the state of the RaftActor remains unchanged
- RaftState expected = behavior.handleMessage(getRef(), "unknown");
+ RaftActorBehavior expected = behavior.handleMessage(getRef(), "unknown");
- RaftState raftState =
+ RaftActorBehavior raftBehavior =
behavior.handleMessage(getRef(), appendEntries);
- assertEquals(expected, raftState);
+ assertEquals(expected, raftBehavior);
// The entry at index 2 will be found out-of-sync with the leader
// and will be removed
}
public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
- FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
- Timeout operationTimeout = new Timeout(operationDuration);
- Future<Object> future = ask(actor, message, operationTimeout);
-
- try {
- return Await.result(future, operationDuration);
- } catch (Exception e) {
- throw e;
- }
+ return MessageCollectorActor.getAllMessages(actor);
}
public ByteString getNextChunk (ByteString bs, int offset){
import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
import org.opendaylight.controller.cluster.raft.RaftActorContext;
-import org.opendaylight.controller.cluster.raft.RaftState;
import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
import org.opendaylight.controller.cluster.raft.SerializationUtils;
import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
import java.io.ByteArrayOutputStream;
// handle message should return the Leader state when it receives an
// unknown message
- RaftState state = leader.handleMessage(senderActor, "foo");
- Assert.assertEquals(RaftState.Leader, state);
+ RaftActorBehavior behavior = leader.handleMessage(senderActor, "foo");
+ Assert.assertTrue(behavior instanceof Leader);
}};
}
actorContext.setPeerAddresses(peerAddresses);
Leader leader = new Leader(actorContext);
- RaftState raftState = leader
+ RaftActorBehavior raftBehavior = leader
.handleMessage(senderActor, new Replicate(null, null,
new MockRaftActorContext.MockReplicatedLogEntry(1,
100,
));
// State should not change
- assertEquals(RaftState.Leader, raftState);
+ assertTrue(raftBehavior instanceof Leader);
final String out =
new ExpectMsg<String>(duration("1 seconds"), "match hint") {
actorContext.getReplicatedLog().removeFrom(0);
- actorContext.getReplicatedLog().append(new ReplicatedLogImplEntry(0, 1,
- new MockRaftActorContext.MockPayload("foo")));
-
- ReplicatedLogImplEntry entry =
- new ReplicatedLogImplEntry(1, 1,
- new MockRaftActorContext.MockPayload("foo"));
-
- actorContext.getReplicatedLog().append(entry);
+ actorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1)
+ .build());
Leader leader = new Leader(actorContext);
- RaftState raftState = leader
- .handleMessage(senderActor, new Replicate(null, "state-id",entry));
+ RaftActorBehavior raftBehavior = leader
+ .handleMessage(senderActor, new Replicate(null, "state-id",actorContext.getReplicatedLog().get(1)));
// State should not change
- assertEquals(RaftState.Leader, raftState);
+ assertTrue(raftBehavior instanceof Leader);
assertEquals(1, actorContext.getCommitIndex());
new MockRaftActorContext.MockPayload("D"));
// this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
- RaftState raftState = leader.handleMessage(
+ RaftActorBehavior raftBehavior = leader.handleMessage(
senderActor, new Replicate(null, "state-id", entry));
- assertEquals(RaftState.Leader, raftState);
+ assertTrue(raftBehavior instanceof Leader);
// we might receive some heartbeat messages, so wait till we SendInstallSnapshot
Boolean[] matches = new ReceiveWhile<Boolean>(Boolean.class, duration("2 seconds")) {
new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
new MockRaftActorContext.MockPayload("D"));
+ RaftActorBehavior raftBehavior = leader.handleMessage(senderActor, new SendInstallSnapshot());
- RaftState raftState = leader.handleMessage(senderActor, new SendInstallSnapshot());
-
- assertEquals(RaftState.Leader, raftState);
+ assertTrue(raftBehavior instanceof Leader);
// check if installsnapshot gets called with the correct values.
final String out =
//clears leaders log
actorContext.getReplicatedLog().removeFrom(0);
- RaftState raftState = leader.handleMessage(senderActor,
+ RaftActorBehavior raftBehavior = leader.handleMessage(senderActor,
new InstallSnapshotReply(currentTerm, followerActor.path().toString(),
leader.getFollowerToSnapshot().getChunkIndex(), true));
- assertEquals(RaftState.Leader, raftState);
+ assertTrue(raftBehavior instanceof Leader);
assertEquals(leader.mapFollowerToSnapshot.size(), 0);
assertEquals(leader.followerToLog.size(), 1);
return null;
}
+ public static class ForwardMessageToBehaviorActor extends MessageCollectorActor {
+ private static AbstractRaftActorBehavior behavior;
+
+ public ForwardMessageToBehaviorActor(){
+
+ }
+
+ @Override public void onReceive(Object message) throws Exception {
+ super.onReceive(message);
+ behavior.handleMessage(sender(), message);
+ }
+
+ public static void setBehavior(AbstractRaftActorBehavior behavior){
+ ForwardMessageToBehaviorActor.behavior = behavior;
+ }
+ }
+
+ @Test
+ public void testLeaderCreatedWithCommitIndexLessThanLastIndex() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower", getSystem(), followerActor);
+
+ Follower follower = new Follower(followerActorContext);
+
+ ForwardMessageToBehaviorActor.setBehavior(follower);
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+
+ //create 3 entries
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ leaderActorContext.setCommitIndex(1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+
+ // follower too has the exact same log entries and has the same commit index
+ followerActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ followerActorContext.setCommitIndex(1);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntriesMessages.AppendEntries appendEntries =
+ (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+ .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+ assertEquals(0, appendEntries.getPrevLogIndex());
+
+ AppendEntriesReply appendEntriesReply =
+ (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+
+ assertNotNull(appendEntriesReply);
+
+ // follower returns its next index
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+ }};
+ }
+
+
+ @Test
+ public void testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex() throws Exception {
+ new JavaTestKit(getSystem()) {{
+
+ ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+ MockRaftActorContext leaderActorContext =
+ new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+ ActorRef followerActor = getSystem().actorOf(
+ Props.create(ForwardMessageToBehaviorActor.class));
+
+ MockRaftActorContext followerActorContext =
+ new MockRaftActorContext("follower", getSystem(), followerActor);
+
+ Follower follower = new Follower(followerActorContext);
+
+ ForwardMessageToBehaviorActor.setBehavior(follower);
+
+ Map<String, String> peerAddresses = new HashMap();
+ peerAddresses.put(followerActor.path().toString(),
+ followerActor.path().toString());
+
+ leaderActorContext.setPeerAddresses(peerAddresses);
+
+ leaderActorContext.getReplicatedLog().removeFrom(0);
+
+ leaderActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ leaderActorContext.setCommitIndex(1);
+
+ followerActorContext.getReplicatedLog().removeFrom(0);
+
+ followerActorContext.setReplicatedLog(
+ new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+ // follower has the same log entries but its commit index > leaders commit index
+ followerActorContext.setCommitIndex(2);
+
+ Leader leader = new Leader(leaderActorContext);
+
+ leader.handleMessage(leaderActor, new SendHeartBeat());
+
+ AppendEntriesMessages.AppendEntries appendEntries =
+ (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+ .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+ assertNotNull(appendEntries);
+
+ assertEquals(1, appendEntries.getLeaderCommit());
+ assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+ assertEquals(0, appendEntries.getPrevLogIndex());
+
+ AppendEntriesReply appendEntriesReply =
+ (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+ leaderActor, AppendEntriesReply.class);
+
+ assertNotNull(appendEntriesReply);
+
+ assertEquals(2, appendEntriesReply.getLogLastIndex());
+ assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+ }};
+ }
+
private static class LeaderTestKit extends JavaTestKit {
private LeaderTestKit(ActorSystem actorSystem) {
package org.opendaylight.controller.cluster.raft.utils;
+import akka.actor.ActorRef;
import akka.actor.UntypedActor;
+import akka.pattern.Patterns;
+import akka.util.Timeout;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.TimeUnit;
public class MessageCollectorActor extends UntypedActor {
messages.add(message);
}
}
+
+ public static List<Object> getAllMessages(ActorRef actor) throws Exception {
+ FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+ Timeout operationTimeout = new Timeout(operationDuration);
+ Future<Object> future = Patterns.ask(actor, "get-all-messages", operationTimeout);
+
+ try {
+ return (List<Object>) Await.result(future, operationDuration);
+ } catch (Exception e) {
+ throw e;
+ }
+ }
+
+ /**
+ * Get the first message that matches the specified class
+ * @param actor
+ * @param clazz
+ * @return
+ */
+ public static Object getFirstMatching(ActorRef actor, Class clazz) throws Exception {
+ List<Object> allMessages = getAllMessages(actor);
+
+ for(Object message : allMessages){
+ if(message.getClass().equals(clazz)){
+ return message;
+ }
+ }
+
+ return null;
+ }
+
}
}
serialization-bindings {
+ "org.opendaylight.controller.cluster.common.actor.Monitor" = java
"org.opendaylight.controller.cluster.raft.client.messages.FindLeader" = java
"org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry" = java
"com.google.protobuf.Message" = proto
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-api</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-broker-impl</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<artifactId>binding-data-codec</artifactId>
- <version>0.6.2-SNAPSHOT</version>
+ <version>0.7.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.yangtools</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-config</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-dom-it</artifactId>
<packaging>jar</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-test-model</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
public class ListProcessingAndOrderingTest extends AbstractDataServiceTest {
private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier DOM_UNORDERED_LIST_PATH = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
- .builder(Lists.QNAME).node(UnorderedContainer.QNAME).node(UnorderedList.QNAME).build();
+ .builder().node(Lists.QNAME).node(UnorderedContainer.QNAME).node(UnorderedList.QNAME).build();
private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier DOM_ORDERED_LIST_PATH = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
- .builder(Lists.QNAME).node(OrderedContainer.QNAME).node(OrderedList.QNAME).build();
+ .builder().node(Lists.QNAME).node(OrderedContainer.QNAME).node(OrderedList.QNAME).build();
private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier DOM_UNKEYED_LIST_PATH = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
- .builder(Lists.QNAME).node(UnkeyedContainer.QNAME).node(UnkeyedList.QNAME).build();
+ .builder().node(Lists.QNAME).node(UnkeyedContainer.QNAME).node(UnkeyedList.QNAME).build();
private static final InstanceIdentifier<UnorderedContainer> UNORDERED_CONTAINER_PATH = InstanceIdentifier.builder(Lists.class).child(UnorderedContainer.class).build();
private static final InstanceIdentifier<OrderedContainer> ORDERED_CONTAINER_PATH = InstanceIdentifier.builder(Lists.class).child(OrderedContainer.class).build();
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-it</artifactId>
mavenBundle("org.apache.sshd", "sshd-core").versionAsInProject(), //
mavenBundle("org.openexi", "nagasena").versionAsInProject(), //
mavenBundle("org.openexi", "nagasena-rta").versionAsInProject(), //
- mavenBundle(CONTROLLER + ".thirdparty", "ganymed").versionAsInProject(), //
mavenBundle(CONTROLLER, "netconf-mapping-api").versionAsInProject(), //
mavenBundle(CONTROLLER, "config-persister-impl").versionAsInProject(), //
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-binding-util</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-clustering-commons</artifactId>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
+ * API.
+ */
+public interface DataPersistenceProvider {
+ /**
+ * @return false if recovery is not applicable. In that case the provider is not persistent and may not have
+ * anything to be recovered
+ */
+ boolean isRecoveryApplicable();
+
+ /**
+ * Persist a journal entry.
+ *
+ * @param o
+ * @param procedure
+ * @param <T>
+ */
+ <T> void persist(T o, Procedure<T> procedure);
+
+ /**
+ * Save a snapshot
+ *
+ * @param o
+ */
+ void saveSnapshot(Object o);
+
+ /**
+ * Delete snapshots based on the criteria
+ *
+ * @param criteria
+ */
+ void deleteSnapshots(SnapshotSelectionCriteria criteria);
+
+ /**
+ * Delete journal entries up to the sequence number
+ *
+ * @param sequenceNumber
+ */
+ void deleteMessages(long sequenceNumber);
+
+}
*/
public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
+ //this is used in the metric name. Some transient actors do not have defined names
+ private String actorNameOverride;
+
public AbstractUntypedActorWithMetering() {
if (isMetricsCaptureEnabled())
getContext().become(new MeteringBehavior(this));
}
+ public AbstractUntypedActorWithMetering(String actorNameOverride){
+ this.actorNameOverride = actorNameOverride;
+ if (isMetricsCaptureEnabled())
+ getContext().become(new MeteringBehavior(this));
+ }
+
private boolean isMetricsCaptureEnabled(){
CommonConfig config = new CommonConfig(getContext().system().settings().config());
return config.isMetricCaptureEnabled();
}
+
+ public String getActorNameOverride() {
+ return actorNameOverride;
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.UntypedPersistentActor;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+
+public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
+
+ protected final LoggingAdapter LOG =
+ Logging.getLogger(getContext().system(), this);
+
+ public AbstractUntypedPersistentActor() {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Actor created {}", getSelf());
+ }
+ getContext().
+ system().
+ actorSelection("user/termination-monitor").
+ tell(new Monitor(getSelf()), getSelf());
+
+ }
+
+
+ @Override public void onReceiveCommand(Object message) throws Exception {
+ final String messageType = message.getClass().getSimpleName();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
+ handleCommand(message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
+
+ }
+
+ @Override public void onReceiveRecover(Object message) throws Exception {
+ final String messageType = message.getClass().getSimpleName();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received message {}", messageType);
+ }
+ handleRecover(message);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Done handling message {}", messageType);
+ }
+
+ }
+
+ protected abstract void handleRecover(Object message) throws Exception;
+
+ protected abstract void handleCommand(Object message) throws Exception;
+
+ protected void ignoreMessage(Object message) {
+ LOG.debug("Unhandled message {} ", message);
+ }
+
+ protected void unknownMessage(Object message) throws Exception {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Received unhandled message {}", message);
+ }
+ unhandled(message);
+ }
+
+ protected class PersistentDataProvider implements DataPersistenceProvider {
+
+ public PersistentDataProvider(){
+
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return true;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ AbstractUntypedPersistentActor.this.persist(o, procedure);
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ AbstractUntypedPersistentActor.this.saveSnapshot(o);
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ AbstractUntypedPersistentActor.this.deleteSnapshots(criteria);
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ AbstractUntypedPersistentActor.this.deleteMessages(sequenceNumber);
+ }
+ }
+
+ protected class NonPersistentDataProvider implements DataPersistenceProvider {
+
+ public NonPersistentDataProvider(){
+
+ }
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ try {
+ procedure.apply(o);
+ } catch (Exception e) {
+ LOG.error(e, "An unexpected error occurred");
+ }
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+/**
+ * Actor with its behaviour metered. Metering is enabled by configuration.
+ */
+public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor {
+
+ public AbstractUntypedPersistentActorWithMetering() {
+ if (isMetricsCaptureEnabled())
+ getContext().become(new MeteringBehavior(this));
+ }
+
+ private boolean isMetricsCaptureEnabled(){
+ CommonConfig config = new CommonConfig(getContext().system().settings().config());
+ return config.isMetricCaptureEnabled();
+ }
+}
private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
private final String MSG_PROCESSING_RATE = "msg-rate";
- private String actorName;
+ private String actorQualifiedName;
private Timer msgProcessingTimer;
/**
*
* @param actor whose behaviour needs to be metered
*/
- public MeteringBehavior(UntypedActor actor){
+ public MeteringBehavior(AbstractUntypedActorWithMetering actor){
Preconditions.checkArgument(actor != null, "actor must not be null");
+ this.meteredActor = actor;
+ String actorName = actor.getActorNameOverride() != null ? actor.getActorNameOverride()
+ : actor.getSelf().path().name();
+ init(actorName);
+ }
+
+ public MeteringBehavior(UntypedActor actor){
+ Preconditions.checkArgument(actor != null, "actor must not be null");
this.meteredActor = actor;
- actorName = meteredActor.getSelf().path().toStringWithoutAddress();
- final String msgProcessingTime = MetricRegistry.name(actorName, MSG_PROCESSING_RATE);
+
+ String actorName = actor.getSelf().path().name();
+ init(actorName);
+ }
+
+ private void init(String actorName){
+ actorQualifiedName = new StringBuilder(meteredActor.getSelf().path().parent().toStringWithoutAddress()).
+ append("/").append(actorName).toString();
+
+ final String msgProcessingTime = MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE);
msgProcessingTimer = METRICREGISTRY.timer(msgProcessingTime);
}
final String messageType = message.getClass().getSimpleName();
final String msgProcessingTimeByMsgType =
- MetricRegistry.name(actorName, MSG_PROCESSING_RATE, messageType);
+ MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE, messageType);
final Timer msgProcessingTimerByMsgType = METRICREGISTRY.timer(msgProcessingTimeByMsgType);
import akka.actor.ActorRef;
-public class Monitor {
+import java.io.Serializable;
+
+public class Monitor implements Serializable {
private final ActorRef actorRef;
public Monitor(ActorRef actorRef){
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * This class is intended for testing purposes. It just triggers CountDownLatch's in each method.
+ * This class really should be under src/test/java but it was problematic trying to uses it in other projects.
+ */
+public class DataPersistenceProviderMonitor implements DataPersistenceProvider {
+
+ private CountDownLatch persistLatch = new CountDownLatch(1);
+ private CountDownLatch saveSnapshotLatch = new CountDownLatch(1);
+ private CountDownLatch deleteSnapshotsLatch = new CountDownLatch(1);;
+ private CountDownLatch deleteMessagesLatch = new CountDownLatch(1);;
+
+ @Override
+ public boolean isRecoveryApplicable() {
+ return false;
+ }
+
+ @Override
+ public <T> void persist(T o, Procedure<T> procedure) {
+ persistLatch.countDown();
+ }
+
+ @Override
+ public void saveSnapshot(Object o) {
+ saveSnapshotLatch.countDown();
+ }
+
+ @Override
+ public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+ deleteSnapshotsLatch.countDown();
+ }
+
+ @Override
+ public void deleteMessages(long sequenceNumber) {
+ deleteMessagesLatch.countDown();
+ }
+
+ public void setPersistLatch(CountDownLatch persistLatch) {
+ this.persistLatch = persistLatch;
+ }
+
+ public void setSaveSnapshotLatch(CountDownLatch saveSnapshotLatch) {
+ this.saveSnapshotLatch = saveSnapshotLatch;
+ }
+
+ public void setDeleteSnapshotsLatch(CountDownLatch deleteSnapshotsLatch) {
+ this.deleteSnapshotsLatch = deleteSnapshotsLatch;
+ }
+
+ public void setDeleteMessagesLatch(CountDownLatch deleteMessagesLatch) {
+ this.deleteMessagesLatch = deleteMessagesLatch;
+ }
+}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-
-package org.opendaylight.controller.cluster.datastore.node;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import org.opendaylight.controller.cluster.datastore.node.utils.NodeIdentifierFactory;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.Node;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeContainerBuilder;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
-import org.opendaylight.yangtools.yang.model.api.AugmentationTarget;
-import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static com.google.common.base.Preconditions.checkArgument;
-
-/**
- * NormalizedNodeBuilder is a builder that walks through a tree like structure and constructs a
- * NormalizedNode from it.
- * <p/>
- * A large part of this code has been copied over from a similar class in sal-common-impl which was
- * originally supposed to convert a CompositeNode to NormalizedNode
- *
- * @param <T>
- */
-public abstract class NodeToNormalizedNodeBuilder<T extends PathArgument>
- implements Identifiable<T> {
-
- private final T identifier;
-
- protected static final Logger logger = LoggerFactory
- .getLogger(NodeToNormalizedNodeBuilder.class);
-
- @Override
- public T getIdentifier() {
- return identifier;
- }
-
- ;
-
- protected NodeToNormalizedNodeBuilder(final T identifier) {
- super();
- this.identifier = identifier;
-
- }
-
- /**
- * @return Should return true if the node that this operation corresponds to is a mixin
- */
- public boolean isMixin() {
- return false;
- }
-
-
- /**
- * @return Should return true if the node that this operation corresponds to has a 'key'
- * associated with it. This is typically true for a list-item or leaf-list entry in yang
- */
- public boolean isKeyedEntry() {
- return false;
- }
-
- protected Set<QName> getQNameIdentifiers() {
- return Collections.singleton(identifier.getNodeType());
- }
-
- public abstract NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child);
-
- public abstract NodeToNormalizedNodeBuilder<?> getChild(QName child);
-
- public abstract NormalizedNode<?, ?> normalize(QName nodeType, Node node);
-
-
-
- private static abstract class SimpleTypeNormalization<T extends PathArgument>
- extends NodeToNormalizedNodeBuilder<T> {
-
- protected SimpleTypeNormalization(final T identifier) {
- super(identifier);
- }
-
- @Override
- public NormalizedNode<?, ?> normalize(final QName nodeType,
- final Node node) {
- checkArgument(node != null);
- return normalizeImpl(nodeType, node);
- }
-
- protected abstract NormalizedNode<?, ?> normalizeImpl(QName nodeType,
- Node node);
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- return null;
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- return null;
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- // TODO Auto-generated method stub
- return null;
- }
-
- }
-
-
- private static final class LeafNormalization extends
- SimpleTypeNormalization<NodeIdentifier> {
-
- private final LeafSchemaNode schema;
-
- protected LeafNormalization(final LeafSchemaNode schema, final NodeIdentifier identifier) {
- super(identifier);
- this.schema = schema;
- }
-
- @Override
- protected NormalizedNode<?, ?> normalizeImpl(final QName nodeType,
- final Node node) {
- Object value = NodeValueCodec.toTypeSafeValue(this.schema, this.schema.getType(), node);
- return ImmutableNodes.leafNode(nodeType, value);
-
- }
-
- }
-
-
- private static final class LeafListEntryNormalization extends
- SimpleTypeNormalization<NodeWithValue> {
-
- private final LeafListSchemaNode schema;
-
- public LeafListEntryNormalization(final LeafListSchemaNode potential) {
- super(new NodeWithValue(potential.getQName(), null));
- this.schema = potential;
- }
-
- @Override
- protected NormalizedNode<?, ?> normalizeImpl(final QName nodeType,
- final Node node) {
- final Object data = node.getValue();
- if (data == null) {
- Preconditions.checkArgument(false,
- "No data available in leaf list entry for " + nodeType);
- }
-
- Object value = NodeValueCodec.toTypeSafeValue(this.schema, this.schema.getType(), node);
-
- NodeWithValue nodeId = new NodeWithValue(nodeType, value);
- return Builders.leafSetEntryBuilder().withNodeIdentifier(nodeId)
- .withValue(value).build();
- }
-
-
- @Override
- public boolean isKeyedEntry() {
- return true;
- }
- }
-
-
- private static abstract class NodeToNormalizationNodeOperation<T extends PathArgument>
- extends NodeToNormalizedNodeBuilder<T> {
-
- protected NodeToNormalizationNodeOperation(final T identifier) {
- super(identifier);
- }
-
- @SuppressWarnings({"rawtypes", "unchecked"})
- @Override
- public final NormalizedNodeContainer<?, ?, ?> normalize(
- final QName nodeType, final Node node) {
- checkArgument(node != null);
-
- if (!node.getType().equals(AugmentationNode.class.getSimpleName())
- && !node.getType().equals(ContainerNode.class.getSimpleName())
- && !node.getType().equals(MapNode.class.getSimpleName())) {
- checkArgument(nodeType != null);
- }
-
- NormalizedNodeContainerBuilder builder = createBuilder(node);
-
- Set<NodeToNormalizedNodeBuilder<?>> usedMixins = new HashSet<>();
-
- logNode(node);
-
- if (node.getChildCount() == 0 && (
- node.getType().equals(LeafSetEntryNode.class.getSimpleName())
- || node.getType().equals(LeafNode.class.getSimpleName()))) {
- PathArgument childPathArgument =
- NodeIdentifierFactory.getArgument(node.getPath());
-
- final NormalizedNode child;
- if (childPathArgument instanceof NodeWithValue) {
- final NodeWithValue nodeWithValue =
- new NodeWithValue(childPathArgument.getNodeType(),
- node.getValue());
- child =
- Builders.leafSetEntryBuilder()
- .withNodeIdentifier(nodeWithValue)
- .withValue(node.getValue()).build();
- } else {
- child =
- ImmutableNodes.leafNode(childPathArgument.getNodeType(),
- node.getValue());
- }
- builder.addChild(child);
- }
-
- final List<Node> children = node.getChildList();
- for (Node nodeChild : children) {
-
- PathArgument childPathArgument =
- NodeIdentifierFactory.getArgument(nodeChild.getPath());
-
- QName childNodeType = null;
- NodeToNormalizedNodeBuilder childOp = null;
-
- if (childPathArgument instanceof AugmentationIdentifier) {
- childOp = getChild(childPathArgument);
- checkArgument(childOp instanceof AugmentationNormalization, childPathArgument);
- } else {
- childNodeType = childPathArgument.getNodeType();
- childOp = getChild(childNodeType);
- }
- // We skip unknown nodes if this node is mixin since
- // it's nodes and parent nodes are interleaved
- if (childOp == null && isMixin()) {
- continue;
- } else if (childOp == null) {
- logger.error(
- "childOp is null and this operation is not a mixin : this = {}",
- this.toString());
- }
-
- checkArgument(childOp != null,
- "Node %s is not allowed inside %s",
- childNodeType, getIdentifier());
-
- if (childOp.isMixin()) {
- if (usedMixins.contains(childOp)) {
- // We already run / processed that mixin, so to avoid
- // duplicate we are
- // skipping next nodes.
- continue;
- }
- // builder.addChild(childOp.normalize(nodeType, treeCacheNode));
- final NormalizedNode childNode =
- childOp.normalize(childNodeType, nodeChild);
- if (childNode != null)
- builder.addChild(childNode);
- usedMixins.add(childOp);
- } else {
- final NormalizedNode childNode =
- childOp.normalize(childNodeType, nodeChild);
- if (childNode != null)
- builder.addChild(childNode);
- }
- }
-
-
- try {
- return (NormalizedNodeContainer<?, ?, ?>) builder.build();
- } catch (Exception e) {
- return null;
- }
-
- }
-
- private void logNode(Node node) {
- //let us find out the type of the node
- logger.debug("We got a {} , with identifier {} with {} children",
- node.getType(), node.getPath(),
- node.getChildList());
- }
-
- @SuppressWarnings("rawtypes")
- protected abstract NormalizedNodeContainerBuilder createBuilder(
- final Node node);
-
- }
-
-
- private static abstract class DataContainerNormalizationOperation<T extends PathArgument>
- extends NodeToNormalizationNodeOperation<T> {
-
- private final DataNodeContainer schema;
- private final Map<QName, NodeToNormalizedNodeBuilder<?>> byQName;
- private final Map<PathArgument, NodeToNormalizedNodeBuilder<?>> byArg;
-
- protected DataContainerNormalizationOperation(final T identifier,
- final DataNodeContainer schema) {
- super(identifier);
- this.schema = schema;
- this.byArg = new ConcurrentHashMap<>();
- this.byQName = new ConcurrentHashMap<>();
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- NodeToNormalizedNodeBuilder<?> potential = byArg.get(child);
- if (potential != null) {
- return potential;
- }
- potential = fromSchema(schema, child);
- return register(potential);
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- if (child == null) {
- return null;
- }
-
- NodeToNormalizedNodeBuilder<?> potential = byQName.get(child);
- if (potential != null) {
- return potential;
- }
- potential = fromSchemaAndPathArgument(schema, child);
- return register(potential);
- }
-
- private NodeToNormalizedNodeBuilder<?> register(
- final NodeToNormalizedNodeBuilder<?> potential) {
- if (potential != null) {
- byArg.put(potential.getIdentifier(), potential);
- for (QName qName : potential.getQNameIdentifiers()) {
- byQName.put(qName, potential);
- }
- }
- return potential;
- }
-
- }
-
-
- private static final class ListItemNormalization extends
- DataContainerNormalizationOperation<NodeIdentifierWithPredicates> {
-
- private final List<QName> keyDefinition;
- private final ListSchemaNode schemaNode;
-
- protected ListItemNormalization(
- final NodeIdentifierWithPredicates identifier,
- final ListSchemaNode schema) {
- super(identifier, schema);
- this.schemaNode = schema;
- keyDefinition = schema.getKeyDefinition();
- }
-
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- NodeIdentifierWithPredicates nodeIdentifierWithPredicates =
- (NodeIdentifierWithPredicates) NodeIdentifierFactory
- .createPathArgument(node
- .getPath(), schemaNode);
- return Builders.mapEntryBuilder()
- .withNodeIdentifier(
- nodeIdentifierWithPredicates
- );
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- DataContainerNodeAttrBuilder<NodeIdentifierWithPredicates, MapEntryNode>
- builder =
- Builders.mapEntryBuilder().withNodeIdentifier(
- (NodeIdentifierWithPredicates) currentArg);
- for (Entry<QName, Object> keyValue : ((NodeIdentifierWithPredicates) currentArg)
- .getKeyValues().entrySet()) {
- if (keyValue.getValue() == null) {
- throw new NullPointerException(
- "Null value found for path : "
- + currentArg);
- }
- builder.addChild(Builders.leafBuilder()
- //
- .withNodeIdentifier(new NodeIdentifier(keyValue.getKey()))
- .withValue(keyValue.getValue()).build());
- }
- return builder.build();
- }
-
-
- @Override
- public boolean isKeyedEntry() {
- return true;
- }
- }
-
-
- private static final class ContainerNormalization extends
- DataContainerNormalizationOperation<NodeIdentifier> {
-
- protected ContainerNormalization(final ContainerSchemaNode schema) {
- super(new NodeIdentifier(schema.getQName()), schema);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- return Builders.containerBuilder()
- .withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- return Builders.containerBuilder()
- .withNodeIdentifier((NodeIdentifier) currentArg).build();
- }
-
- }
-
-
- private static abstract class MixinNormalizationOp<T extends PathArgument>
- extends NodeToNormalizationNodeOperation<T> {
-
- protected MixinNormalizationOp(final T identifier) {
- super(identifier);
- }
-
- @Override
- public final boolean isMixin() {
- return true;
- }
-
- }
-
-
- private static final class LeafListMixinNormalization extends
- MixinNormalizationOp<NodeIdentifier> {
-
- private final NodeToNormalizedNodeBuilder<?> innerOp;
-
- public LeafListMixinNormalization(final LeafListSchemaNode potential) {
- super(new NodeIdentifier(potential.getQName()));
- innerOp = new LeafListEntryNormalization(potential);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- return Builders.leafSetBuilder()
- .withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- return Builders.leafSetBuilder().withNodeIdentifier(getIdentifier())
- .build();
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- if (child instanceof NodeWithValue) {
- return innerOp;
- }
- return null;
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- if (getIdentifier().getNodeType().equals(child)) {
- return innerOp;
- }
- return null;
- }
-
- }
-
-
- private static final class AugmentationNormalization extends
- MixinNormalizationOp<AugmentationIdentifier> {
-
- private final Map<QName, NodeToNormalizedNodeBuilder<?>> byQName;
- private final Map<PathArgument, NodeToNormalizedNodeBuilder<?>> byArg;
-
- public AugmentationNormalization(final AugmentationSchema augmentation,
- final DataNodeContainer schema) {
- super(augmentationIdentifierFrom(augmentation));
-
- ImmutableMap.Builder<QName, NodeToNormalizedNodeBuilder<?>>
- byQNameBuilder =
- ImmutableMap.builder();
- ImmutableMap.Builder<PathArgument, NodeToNormalizedNodeBuilder<?>>
- byArgBuilder =
- ImmutableMap.builder();
-
- for (DataSchemaNode augNode : augmentation.getChildNodes()) {
- DataSchemaNode resolvedNode =
- schema.getDataChildByName(augNode.getQName());
- NodeToNormalizedNodeBuilder<?> resolvedOp =
- fromDataSchemaNode(resolvedNode);
- byArgBuilder.put(resolvedOp.getIdentifier(), resolvedOp);
- for (QName resQName : resolvedOp.getQNameIdentifiers()) {
- byQNameBuilder.put(resQName, resolvedOp);
- }
- }
- byQName = byQNameBuilder.build();
- byArg = byArgBuilder.build();
-
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- return byArg.get(child);
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- return byQName.get(child);
- }
-
- @Override
- protected Set<QName> getQNameIdentifiers() {
- return getIdentifier().getPossibleChildNames();
- }
-
- @SuppressWarnings("rawtypes")
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- return Builders.augmentationBuilder()
- .withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- return Builders.augmentationBuilder()
- .withNodeIdentifier(getIdentifier())
- .build();
- }
-
- }
-
-
- private static final class ListMixinNormalization extends
- MixinNormalizationOp<NodeIdentifier> {
-
- private final ListItemNormalization innerNode;
-
- public ListMixinNormalization(final ListSchemaNode list) {
- super(new NodeIdentifier(list.getQName()));
- this.innerNode =
- new ListItemNormalization(new NodeIdentifierWithPredicates(
- list.getQName(), Collections.<QName, Object>emptyMap()),
- list);
- }
-
- @SuppressWarnings("rawtypes")
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- return Builders.mapBuilder().withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- return Builders.mapBuilder().withNodeIdentifier(getIdentifier())
- .build();
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- if (child.getNodeType().equals(getIdentifier().getNodeType())) {
- return innerNode;
- }
- return null;
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- if (getIdentifier().getNodeType().equals(child)) {
- return innerNode;
- }
- return null;
- }
-
- }
-
-
- private static class ChoiceNodeNormalization extends
- MixinNormalizationOp<NodeIdentifier> {
-
- private final ImmutableMap<QName, NodeToNormalizedNodeBuilder<?>>
- byQName;
- private final ImmutableMap<PathArgument, NodeToNormalizedNodeBuilder<?>>
- byArg;
-
- protected ChoiceNodeNormalization(
- final org.opendaylight.yangtools.yang.model.api.ChoiceNode schema) {
- super(new NodeIdentifier(schema.getQName()));
- ImmutableMap.Builder<QName, NodeToNormalizedNodeBuilder<?>>
- byQNameBuilder =
- ImmutableMap.builder();
- ImmutableMap.Builder<PathArgument, NodeToNormalizedNodeBuilder<?>>
- byArgBuilder =
- ImmutableMap.builder();
-
- for (ChoiceCaseNode caze : schema.getCases()) {
- for (DataSchemaNode cazeChild : caze.getChildNodes()) {
- NodeToNormalizedNodeBuilder<?> childOp =
- fromDataSchemaNode(cazeChild);
- byArgBuilder.put(childOp.getIdentifier(), childOp);
- for (QName qname : childOp.getQNameIdentifiers()) {
- byQNameBuilder.put(qname, childOp);
- }
- }
- }
- byQName = byQNameBuilder.build();
- byArg = byArgBuilder.build();
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(
- final PathArgument child) {
- return byArg.get(child);
- }
-
- @Override
- public NodeToNormalizedNodeBuilder<?> getChild(final QName child) {
- return byQName.get(child);
- }
-
- @Override
- protected NormalizedNodeContainerBuilder createBuilder(
- final Node node) {
- return Builders.choiceBuilder().withNodeIdentifier(getIdentifier());
- }
-
- @Override
- public NormalizedNode<?, ?> createDefault(
- final PathArgument currentArg) {
- return Builders.choiceBuilder().withNodeIdentifier(getIdentifier())
- .build();
- }
- }
-
- /**
- * Find an appropriate NormalizedNodeBuilder using both the schema and the
- * Path Argument
- *
- * @param schema
- * @param child
- * @return
- */
- public static NodeToNormalizedNodeBuilder<?> fromSchemaAndPathArgument(
- final DataNodeContainer schema, final QName child) {
- DataSchemaNode potential = schema.getDataChildByName(child);
- if (potential == null) {
- Iterable<org.opendaylight.yangtools.yang.model.api.ChoiceNode>
- choices =
- FluentIterable.from(schema.getChildNodes()).filter(
- org.opendaylight.yangtools.yang.model.api.ChoiceNode.class);
- potential = findChoice(choices, child);
- }
- if (potential == null) {
- if (logger.isTraceEnabled()) {
- logger.trace("BAD CHILD = {}", child.toString());
- }
- }
-
- checkArgument(potential != null,
- "Supplied QName %s is not valid according to schema %s", child,
- schema);
-
- // If the schema in an instance of DataSchemaNode and the potential
- // is augmenting something then there is a chance that this may be
- // and augmentation node
- if ((schema instanceof DataSchemaNode)
- && potential.isAugmenting()) {
-
- AugmentationNormalization augmentation =
- fromAugmentation(schema, (AugmentationTarget) schema,
- potential);
-
- // If an augmentation normalization (builder) is not found then
- // we fall through to the regular processing
- if(augmentation != null){
- return augmentation;
- }
- }
- return fromDataSchemaNode(potential);
- }
-
- /**
- * Given a bunch of choice nodes and a the name of child find a choice node for that child which
- * has a non-null value
- *
- * @param choices
- * @param child
- * @return
- */
- private static org.opendaylight.yangtools.yang.model.api.ChoiceNode findChoice(
- final Iterable<org.opendaylight.yangtools.yang.model.api.ChoiceNode> choices,
- final QName child) {
- org.opendaylight.yangtools.yang.model.api.ChoiceNode foundChoice = null;
- choiceLoop:
- for (org.opendaylight.yangtools.yang.model.api.ChoiceNode choice : choices) {
- for (ChoiceCaseNode caze : choice.getCases()) {
- if (caze.getDataChildByName(child) != null) {
- foundChoice = choice;
- break choiceLoop;
- }
- }
- }
- return foundChoice;
- }
-
-
- /**
- * Create an AugmentationIdentifier based on the AugmentationSchema
- *
- * @param augmentation
- * @return
- */
- public static AugmentationIdentifier augmentationIdentifierFrom(
- final AugmentationSchema augmentation) {
- ImmutableSet.Builder<QName> potentialChildren = ImmutableSet.builder();
- for (DataSchemaNode child : augmentation.getChildNodes()) {
- potentialChildren.add(child.getQName());
- }
- return new AugmentationIdentifier(potentialChildren.build());
- }
-
- /**
- * Create an AugmentationNormalization based on the schema of the DataContainer, the
- * AugmentationTarget and the potential schema node
- *
- * @param schema
- * @param augments
- * @param potential
- * @return
- */
- private static AugmentationNormalization fromAugmentation(
- final DataNodeContainer schema, final AugmentationTarget augments,
- final DataSchemaNode potential) {
- AugmentationSchema augmentation = null;
- for (AugmentationSchema aug : augments.getAvailableAugmentations()) {
- DataSchemaNode child = aug.getDataChildByName(potential.getQName());
- if (child != null) {
- augmentation = aug;
- break;
- }
-
- }
- if (augmentation != null) {
- return new AugmentationNormalization(augmentation, schema);
- } else {
- return null;
- }
- }
-
- /**
- * @param schema
- * @param child
- * @return
- */
- private static NodeToNormalizedNodeBuilder<?> fromSchema(
- final DataNodeContainer schema, final PathArgument child) {
- if (child instanceof AugmentationIdentifier) {
- QName childQName = ((AugmentationIdentifier) child)
- .getPossibleChildNames().iterator().next();
-
- return fromSchemaAndPathArgument(schema, childQName);
- }
- return fromSchemaAndPathArgument(schema, child.getNodeType());
- }
-
- public static NodeToNormalizedNodeBuilder<?> fromDataSchemaNode(
- final DataSchemaNode potential) {
- if (potential instanceof ContainerSchemaNode) {
- return new ContainerNormalization((ContainerSchemaNode) potential);
- } else if (potential instanceof ListSchemaNode) {
- return new ListMixinNormalization((ListSchemaNode) potential);
- } else if (potential instanceof LeafSchemaNode) {
- return new LeafNormalization((LeafSchemaNode) potential,
- new NodeIdentifier(potential.getQName()));
- } else if (potential instanceof org.opendaylight.yangtools.yang.model.api.ChoiceNode) {
- return new ChoiceNodeNormalization(
- (org.opendaylight.yangtools.yang.model.api.ChoiceNode) potential);
- } else if (potential instanceof LeafListSchemaNode) {
- return new LeafListMixinNormalization(
- (LeafListSchemaNode) potential);
- }
- return null;
- }
-
- public static NodeToNormalizedNodeBuilder<?> from(final SchemaContext ctx) {
- return new ContainerNormalization(ctx);
- }
-
- public abstract NormalizedNode<?, ?> createDefault(PathArgument currentArg);
-
-}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-
-package org.opendaylight.controller.cluster.datastore.node;
-
-import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
-import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.data.api.codec.BitsCodec;
-import org.opendaylight.yangtools.yang.data.impl.codec.TypeDefinitionAwareCodec;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.util.IdentityrefType;
-import org.opendaylight.yangtools.yang.model.util.InstanceIdentifierType;
-import org.opendaylight.yangtools.yang.model.util.Leafref;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class NodeValueCodec {
- protected static final Logger logger = LoggerFactory
- .getLogger(NodeValueCodec.class);
-
- public static Object toTypeSafeValue(DataSchemaNode schema, TypeDefinition type, NormalizedNodeMessages.Node node){
-
- String value = node.getValue();
-
- if(schema != null && value != null){
- TypeDefinition<?> baseType = type;
-
- while (baseType.getBaseType() != null) {
- baseType = baseType.getBaseType();
- }
-
- TypeDefinitionAwareCodec<Object, ? extends TypeDefinition<?>> codec =
- TypeDefinitionAwareCodec.from(type);
-
- if(codec instanceof BitsCodec){
- if(value.contains("[]")){
- value = "";
- } else {
- value = value.replace("[", "");
- value = value.replace("]", "");
- value = value.replace(",", " ");
- }
- }
-
- if (codec != null) {
- return codec.deserialize(value);
- } else if(baseType instanceof Leafref) {
- return value;
- } else if(baseType instanceof IdentityrefType) {
- return QNameFactory.create(value);
- } else if(baseType instanceof InstanceIdentifierType) {
- return InstanceIdentifierUtils.fromSerializable(node.getInstanceIdentifierValue());
- } else {
- logger.error("Could not figure out how to transform value " + value + " for schemaType " + type);
- }
- }
-
- return value;
- }
-}
package org.opendaylight.controller.cluster.datastore.node;
-import org.opendaylight.controller.cluster.datastore.node.utils.PathUtils;
import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer.DeSerializer;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeSerializer.Serializer;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.Container;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class NormalizedNodeToNodeCodec {
+ public interface Encoded {
+ NormalizedNodeMessages.Container getEncodedNode();
+
+ NormalizedNodeMessages.InstanceIdentifier getEncodedPath();
+ }
+
+ public interface Decoded {
+ NormalizedNode<?,?> getDecodedNode();
+
+ YangInstanceIdentifier getDecodedPath();
+ }
+
private final SchemaContext ctx;
- private static final Logger logger = LoggerFactory.getLogger(NormalizedNodeToNodeCodec.class);
public NormalizedNodeToNodeCodec(final SchemaContext ctx){
this.ctx = ctx;
+ }
+ public NormalizedNodeMessages.Container encode(NormalizedNode<?,?> node){
+ return encode(null, node).getEncodedNode();
}
- public NormalizedNodeMessages.Container encode(YangInstanceIdentifier id, NormalizedNode node){
+ public Encoded encode(YangInstanceIdentifier path, NormalizedNode<?,?> node) {
+
+ NormalizedNodeMessages.InstanceIdentifier serializedPath = null;
NormalizedNodeMessages.Container.Builder builder = NormalizedNodeMessages.Container.newBuilder();
- String parentPath = "";
- if(id != null){
- parentPath = PathUtils.getParentPath(PathUtils.toString(id));
- }
+ // Note: parent path is no longer used
+ builder.setParentPath("");
- builder.setParentPath(parentPath);
if(node != null) {
- builder.setNormalizedNode(NormalizedNodeSerializer.serialize(node));
+ if(path == null) {
+ builder.setNormalizedNode(NormalizedNodeSerializer.serialize(node));
+ } else {
+ Serializer serializer = NormalizedNodeSerializer.newSerializer(node);
+ builder.setNormalizedNode(serializer.serialize(path));
+ serializedPath = serializer.getSerializedPath();
+ }
}
- return builder.build();
+ return new EncodedImpl(builder.build(), serializedPath);
+ }
+
+
+ public NormalizedNode<?,?> decode(NormalizedNodeMessages.Node node){
+ return decode(null, node).getDecodedNode();
}
- public NormalizedNode<?,?> decode(YangInstanceIdentifier id, NormalizedNodeMessages.Node node){
+ public Decoded decode(NormalizedNodeMessages.InstanceIdentifier path,
+ NormalizedNodeMessages.Node node) {
if(node.getIntType() < 0 || node.getSerializedSize() == 0){
- return null;
+ return new DecodedImpl(null, null);
}
- return NormalizedNodeSerializer.deSerialize(node);
+
+ DeSerializer deSerializer = NormalizedNodeSerializer.newDeSerializer(path, node);
+ NormalizedNode<?,?> decodedNode = deSerializer.deSerialize();
+ return new DecodedImpl(decodedNode, deSerializer.getDeserializedPath());
}
+ private static class DecodedImpl implements Decoded {
+
+ private final NormalizedNode<?, ?> decodedNode;
+ private final YangInstanceIdentifier decodedPath;
+ public DecodedImpl(NormalizedNode<?, ?> decodedNode, YangInstanceIdentifier decodedPath) {
+ this.decodedNode = decodedNode;
+ this.decodedPath = decodedPath;
+ }
+
+ @Override
+ public NormalizedNode<?, ?> getDecodedNode() {
+ return decodedNode;
+ }
+
+ @Override
+ public YangInstanceIdentifier getDecodedPath() {
+ return decodedPath;
+ }
+ }
+
+ private static class EncodedImpl implements Encoded {
+
+ private final Container encodedNode;
+ private final InstanceIdentifier encodedPath;
+
+ EncodedImpl(Container encodedNode, InstanceIdentifier encodedPath) {
+ this.encodedNode = encodedNode;
+ this.encodedPath = encodedPath;
+ }
+
+ @Override
+ public Container getEncodedNode() {
+ return encodedNode;
+ }
+
+ @Override
+ public InstanceIdentifier getEncodedPath() {
+ return encodedPath;
+ }
+ }
}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-
-package org.opendaylight.controller.cluster.datastore.node;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.Node;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MixinNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-
-import java.util.Map;
-
-/**
- * NormalizedNodeToProtocolBufferNode walks the NormalizedNode tree converting it to the
- * NormalizedMessage.Node
- * <p/>
- * {@link org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode } is a tree like structure that provides a generic structure for a yang data
- * model
- */
-public class NormalizedNodeToProtocolBufferNode {
-
-
- private final Node.Builder builderRoot;
- private NormalizedNodeMessages.Container container;
-
- public NormalizedNodeToProtocolBufferNode() {
-
- builderRoot = Node.newBuilder();
- }
-
- public void encode(String parentPath, NormalizedNode<?, ?> normalizedNode) {
- if (parentPath == null) {
- parentPath = "";
- }
-
- NormalizedNodeMessages.Container.Builder containerBuilder =
- NormalizedNodeMessages.Container.newBuilder();
-
- if (normalizedNode != null) {
-
- navigateNormalizedNode(0, parentPath, normalizedNode, builderRoot);
- // here we need to put back the Node Tree in Container
-
- container =
- containerBuilder.setParentPath(parentPath).setNormalizedNode(
- builderRoot.build()).build();
- } else {
- //this can happen when an attempt was made to read from datastore and normalized node was null.
- container = containerBuilder.setParentPath(parentPath).build();
-
- }
-
- }
-
-
- private void navigateDataContainerNode(int level, final String parentPath,
- final DataContainerNode<?> dataContainerNode,
- Node.Builder builderParent) {
-
- String newParentPath =
- parentPath + "/" + dataContainerNode.getIdentifier().toString();
- String type = getDataContainerType(dataContainerNode).getSimpleName();
- builderParent.setPath(dataContainerNode.getIdentifier().toString())
- .setType(type);
-
- final Iterable<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>>
- value =
- dataContainerNode.getValue();
- for (NormalizedNode<?, ?> node : value) {
- Node.Builder builderChild = Node.newBuilder();
- if (node instanceof MixinNode
- && node instanceof NormalizedNodeContainer) {
-
- navigateNormalizedNodeContainerMixin(level, newParentPath,
- (NormalizedNodeContainer<?, ?, ?>) node, builderChild);
- } else {
- navigateNormalizedNode(level, newParentPath, node,
- builderChild);
- }
- builderParent.addChild(builderChild);
- }
- }
-
- private Class getDataContainerType(
- NormalizedNodeContainer<?, ?, ?> dataContainerNode) {
- if (dataContainerNode instanceof ChoiceNode) {
- return ChoiceNode.class;
- } else if (dataContainerNode instanceof AugmentationNode) {
- return AugmentationNode.class;
- } else if (dataContainerNode instanceof ContainerNode) {
- return ContainerNode.class;
- } else if (dataContainerNode instanceof MapEntryNode) {
- return MapEntryNode.class;
- } else if (dataContainerNode instanceof UnkeyedListEntryNode) {
- return UnkeyedListEntryNode.class;
- } else if (dataContainerNode instanceof MapNode) {
- return MapNode.class;
- } else if (dataContainerNode instanceof LeafSetNode) {
- return LeafSetNode.class;
- }
- throw new IllegalArgumentException(
- "could not find the data container node type "
- + dataContainerNode.toString()
- );
- }
-
- private void navigateNormalizedNodeContainerMixin(int level,
- final String parentPath,
- NormalizedNodeContainer<?, ?, ?> node, Node.Builder builderParent) {
- String newParentPath =
- parentPath + "/" + node.getIdentifier().toString();
-
- builderParent.setPath(node.getIdentifier().toString()).setType(
- this.getDataContainerType(node).getSimpleName());
- final Iterable<? extends NormalizedNode<?, ?>> value = node.getValue();
- for (NormalizedNode normalizedNode : value) {
- // child node builder
- Node.Builder builderChild = Node.newBuilder();
- if (normalizedNode instanceof MixinNode
- && normalizedNode instanceof NormalizedNodeContainer) {
- navigateNormalizedNodeContainerMixin(level + 1, newParentPath,
- (NormalizedNodeContainer) normalizedNode, builderChild);
- } else {
- navigateNormalizedNode(level, newParentPath, normalizedNode,
- builderChild);
- }
- builderParent.addChild(builderChild);
-
- }
-
-
-
- }
-
-
- private void navigateNormalizedNode(int level,
- String parentPath, NormalizedNode<?, ?> normalizedNode,
- Node.Builder builderParent) {
-
- if (normalizedNode instanceof DataContainerNode) {
-
- final DataContainerNode<?> dataContainerNode =
- (DataContainerNode) normalizedNode;
-
- navigateDataContainerNode(level + 1, parentPath, dataContainerNode,
- builderParent);
- } else if (normalizedNode instanceof MixinNode
- && normalizedNode instanceof NormalizedNodeContainer) {
-
- navigateNormalizedNodeContainerMixin(level, parentPath,
- (NormalizedNodeContainer<?, ?, ?>) normalizedNode,
- builderParent);
- } else {
- if (normalizedNode instanceof LeafNode) {
- buildLeafNode(parentPath, normalizedNode, builderParent);
- } else if (normalizedNode instanceof LeafSetEntryNode) {
- buildLeafSetEntryNode(parentPath, normalizedNode,
- builderParent);
- }
-
- }
-
- }
-
- private void buildLeafSetEntryNode(String parentPath,
- NormalizedNode<?, ?> normalizedNode,
- Node.Builder builderParent) {
- String path =
- parentPath + "/" + normalizedNode.getIdentifier().toString();
- LeafSetEntryNode leafSetEntryNode = (LeafSetEntryNode) normalizedNode;
- Map<QName, String> attributes = leafSetEntryNode.getAttributes();
- if (!attributes.isEmpty()) {
- NormalizedNodeMessages.Attribute.Builder builder = null;
- for (Map.Entry<QName, String> attribute : attributes.entrySet()) {
- builder = NormalizedNodeMessages.Attribute.newBuilder();
-
- builder
- .setName(attribute.getKey().toString())
- .setValue(normalizedNode.getValue().toString());
-
- builderParent.addAttributes(builder.build());
- }
- }
- buildNodeValue(normalizedNode, builderParent);
- }
-
- private void buildLeafNode(String parentPath,
- NormalizedNode<?, ?> normalizedNode,
- Node.Builder builderParent) {
- Preconditions.checkNotNull(parentPath);
- Preconditions.checkNotNull(normalizedNode);
- String path =
- parentPath + "/" + normalizedNode.getIdentifier().toString();
- LeafNode leafNode = (LeafNode) normalizedNode;
- Map<QName, String> attributes = leafNode.getAttributes();
- if (!attributes.isEmpty()) {
- NormalizedNodeMessages.Attribute.Builder builder = null;
- for (Map.Entry<QName, String> attribute : attributes.entrySet()) {
- builder = NormalizedNodeMessages.Attribute.newBuilder();
- builder
- .setName(attribute.getKey().toString())
- .setValue(attribute.getValue().toString());
-
- builderParent.addAttributes(builder.build());
- }
- }
-
- Object value = normalizedNode.getValue();
- if (value == null) {
- builderParent
- .setPath(normalizedNode.getIdentifier().toString())
- .setType(LeafNode.class.getSimpleName())
- .setValueType(String.class.getSimpleName())
- .setValue("");
- } else {
- buildNodeValue(normalizedNode, builderParent);
- }
- }
-
- private void buildNodeValue(NormalizedNode<?, ?> normalizedNode,
- Node.Builder builderParent) {
-
- Object value = normalizedNode.getValue();
-
- builderParent
- .setPath(normalizedNode.getIdentifier().toString())
- .setType(LeafNode.class.getSimpleName())
- .setValueType((value.getClass().getSimpleName()))
- .setValue(value.toString());
-
- if(value.getClass().equals(YangInstanceIdentifier.class)){
- builderParent.setInstanceIdentifierValue(
- InstanceIdentifierUtils
- .toSerializable((YangInstanceIdentifier) value));
- }
- }
-
- public NormalizedNodeMessages.Container getContainer() {
- return container;
- }
-}
.create(name.trim()));
}
- return new YangInstanceIdentifier.AugmentationIdentifier(null, childNames);
+ return new YangInstanceIdentifier.AugmentationIdentifier(childNames);
}
}
return value;
}
- public static YangInstanceIdentifier.PathArgument getArgument(String id, DataSchemaNode schemaNode){
- YangInstanceIdentifier.PathArgument value = cache.get(id);
- if(value == null){
- synchronized (cache){
- value = cache.get(id);
- if(value == null) {
- value = createPathArgument(id, schemaNode);
- cache.put(id, value);
- }
- }
- }
- return value;
- }
-
public static YangInstanceIdentifier.PathArgument createPathArgument(String id, DataSchemaNode schemaNode){
final NodeIdentifierWithPredicatesGenerator
nodeIdentifierWithPredicatesGenerator = new NodeIdentifierWithPredicatesGenerator(id, schemaNode);
public class PathUtils {
- public static String getParentPath(String currentElementPath){
- StringBuilder parentPath = new StringBuilder();
-
- if(currentElementPath != null){
- String[] parentPaths = currentElementPath.split("/");
- if(parentPaths.length > 2){
- for(int i=0;i<parentPaths.length-1;i++){
- if(parentPaths[i].length() > 0){
- parentPath.append("/");
- parentPath.append(parentPaths[i]);
- }
- }
- }
- }
- return parentPath.toString();
- }
-
/**
* Given a YangInstanceIdentifier return a serialized version of the same
* as a String
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
-import java.net.URI;
-import java.util.Date;
-
/**
* NormalizedNodeSerializationContext provides methods which help in encoding
* certain components of a NormalizedNode properly
*/
public interface NormalizedNodeSerializationContext {
- int addNamespace(URI namespace);
- int addRevision(Date revision);
- int addLocalName(String localName);
}
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.Node.Builder;
import org.opendaylight.yangtools.yang.data.api.Node;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
-
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
+import java.util.EnumMap;
import java.util.Map;
-
import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeType.ANY_XML_NODE_TYPE;
import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeType.AUGMENTATION_NODE_TYPE;
import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.NormalizedNodeType.CHOICE_NODE_TYPE;
return new Serializer(node).serialize();
}
+ public static Serializer newSerializer(NormalizedNode node) {
+ Preconditions.checkNotNull(node, "node should not be null");
+ return new Serializer(node);
+ }
/**
* DeSerialize a protocol buffer message back into a NormalizedNode
* @param node
* @return
*/
- public static NormalizedNode deSerialize(NormalizedNodeMessages.Node node){
- return new DeSerializer(node).deSerialize();
+ public static NormalizedNode deSerialize(NormalizedNodeMessages.Node node) {
+ Preconditions.checkNotNull(node, "node should not be null");
+ return new DeSerializer(null, node).deSerialize();
+ }
+
+ public static DeSerializer newDeSerializer(NormalizedNodeMessages.InstanceIdentifier path,
+ NormalizedNodeMessages.Node node) {
+ Preconditions.checkNotNull(node, "node should not be null");
+ return new DeSerializer(path, node);
}
/**
* @param pathArgument
* @return
*/
- public static YangInstanceIdentifier.PathArgument deSerialize(NormalizedNodeMessages.Node node, NormalizedNodeMessages.PathArgument pathArgument){
+ public static YangInstanceIdentifier.PathArgument deSerialize(NormalizedNodeMessages.Node node,
+ NormalizedNodeMessages.PathArgument pathArgument){
Preconditions.checkNotNull(node, "node should not be null");
Preconditions.checkNotNull(pathArgument, "pathArgument should not be null");
- return new DeSerializer(node).deSerialize(pathArgument);
+ return new DeSerializer(null, node).deSerialize(pathArgument);
}
- private static class Serializer implements NormalizedNodeSerializationContext {
+ public static class Serializer extends QNameSerializationContextImpl
+ implements NormalizedNodeSerializationContext {
private final NormalizedNode node;
- private final Map<Object, Integer> codeMap = new HashMap<>();
- private final List<String> codes = new ArrayList<>();
+ private NormalizedNodeMessages.InstanceIdentifier serializedPath;
private Serializer(NormalizedNode node) {
this.node = node;
}
- private NormalizedNodeMessages.Node serialize() {
- return this.serialize(node).addAllCode(codes).build();
+ public NormalizedNodeMessages.InstanceIdentifier getSerializedPath() {
+ return serializedPath;
+ }
+
+ public NormalizedNodeMessages.Node serialize() {
+ return this.serialize(node).addAllCode(getCodes()).build();
+ }
+
+ public NormalizedNodeMessages.Node serialize(YangInstanceIdentifier path) {
+ Builder builder = serialize(node);
+ serializedPath = InstanceIdentifierUtils.toSerializable(path, this);
+ return builder.addAllCode(getCodes()).build();
}
private NormalizedNodeMessages.Node.Builder serialize(
return builder;
}
-
-
- @Override public int addNamespace(URI namespace) {
- int namespaceInt = getCode(namespace);
-
- if(namespaceInt == -1) {
- namespaceInt = addCode(namespace, namespace.toString());
- }
- return namespaceInt;
- }
-
- @Override public int addRevision(Date revision) {
- if(revision == null){
- return -1;
- }
-
- int revisionInt = getCode(revision);
- if(revisionInt == -1) {
- String formattedRevision =
- SimpleDateFormatUtil.getRevisionFormat().format(revision);
- revisionInt = addCode(revision, formattedRevision);
- }
- return revisionInt;
- }
-
- @Override public int addLocalName(String localName) {
- int localNameInt = getCode(localName);
- if(localNameInt == -1) {
- localNameInt = addCode(localName, localName.toString());
- }
- return localNameInt;
-
- }
-
- public int addCode(Object code, String codeStr){
- int count = codes.size();
- codes.add(codeStr);
- codeMap.put(code, Integer.valueOf(count));
- return count;
- }
-
- public int getCode(Object code){
- if(codeMap.containsKey(code)){
- return codeMap.get(code);
- }
- return -1;
- }
}
- private static class DeSerializer implements NormalizedNodeDeSerializationContext {
+ public static class DeSerializer extends QNameDeSerializationContextImpl
+ implements NormalizedNodeDeSerializationContext {
private static Map<NormalizedNodeType, DeSerializationFunction>
- deSerializationFunctions = new HashMap<>();
+ deSerializationFunctions = new EnumMap<>(NormalizedNodeType.class);
static {
deSerializationFunctions.put(CONTAINER_NODE_TYPE,
}
private final NormalizedNodeMessages.Node node;
+ private final NormalizedNodeMessages.InstanceIdentifier path;
+ private YangInstanceIdentifier deserializedPath;
- public DeSerializer(NormalizedNodeMessages.Node node){
+ public DeSerializer(NormalizedNodeMessages.InstanceIdentifier path,
+ NormalizedNodeMessages.Node node) {
+ super(node.getCodeList());
+ this.path = path;
this.node = node;
}
- public NormalizedNode deSerialize(){
- return deSerialize(node);
+ public YangInstanceIdentifier getDeserializedPath() {
+ return deserializedPath;
+ }
+
+ public NormalizedNode deSerialize() {
+ NormalizedNode deserializedNode = deSerialize(node);
+ if(path != null) {
+ deserializedPath = InstanceIdentifierUtils.fromSerializable(path, this);
+ }
+
+ return deserializedNode;
}
private NormalizedNode deSerialize(NormalizedNodeMessages.Node node){
Preconditions.checkNotNull(node, "node should not be null");
- DeSerializationFunction deSerializationFunction =
- Preconditions.checkNotNull(deSerializationFunctions.get(NormalizedNodeType.values()[node.getIntType()]), "Unknown type " + node);
+
+ DeSerializationFunction deSerializationFunction = deSerializationFunctions.get(
+ NormalizedNodeType.values()[node.getIntType()]);
return deSerializationFunction.apply(this, node);
}
this, path);
}
- @Override public String getNamespace(int namespace) {
- return node.getCode(namespace);
- }
-
- @Override public String getRevision(int revision) {
- return node.getCode(revision);
- }
-
- @Override public String getLocalName(int localName) {
- return node.getCode(localName);
- }
-
public YangInstanceIdentifier.PathArgument deSerialize(
NormalizedNodeMessages.PathArgument pathArgument) {
return PathArgumentSerializer.deSerialize(this, pathArgument);
NormalizedNode apply(DeSerializer deserializer, NormalizedNodeMessages.Node node);
}
}
-
-
-
-
}
public static NormalizedNodeType getSerializableNodeType(NormalizedNode node){
Preconditions.checkNotNull(node, "node should not be null");
- if(node instanceof ContainerNode){
- return CONTAINER_NODE_TYPE;
- } else if(node instanceof LeafNode){
+ if(node instanceof LeafNode){
return LEAF_NODE_TYPE;
- } else if(node instanceof MapNode){
- return MAP_NODE_TYPE;
+ } else if(node instanceof LeafSetEntryNode){
+ return LEAF_SET_ENTRY_NODE_TYPE;
} else if(node instanceof MapEntryNode){
return MAP_ENTRY_NODE_TYPE;
+ } else if(node instanceof ContainerNode){
+ return CONTAINER_NODE_TYPE;
+ } else if(node instanceof MapNode){
+ return MAP_NODE_TYPE;
} else if(node instanceof AugmentationNode){
return AUGMENTATION_NODE_TYPE;
} else if(node instanceof LeafSetNode){
return LEAF_SET_NODE_TYPE;
- } else if(node instanceof LeafSetEntryNode){
- return LEAF_SET_ENTRY_NODE_TYPE;
} else if(node instanceof ChoiceNode){
return CHOICE_NODE_TYPE;
} else if(node instanceof OrderedLeafSetNode){
} else if(node instanceof AnyXmlNode){
return ANY_XML_NODE_TYPE;
}
+
throw new IllegalArgumentException("Node type unknown : " + node.getClass().getSimpleName());
}
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
-
import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.PathArgumentType.getSerializablePathArgumentType;
public class PathArgumentSerializer {
- private static final Map<Class, PathArgumentAttributesGetter> pathArgumentAttributesGetters = new HashMap<>();
+ private static final String REVISION_ARG = "?revision=";
+ private static final Map<Class<?>, PathArgumentAttributesGetter> pathArgumentAttributesGetters = new HashMap<>();
- public static NormalizedNodeMessages.PathArgument serialize(NormalizedNodeSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument){
+ public static NormalizedNodeMessages.PathArgument serialize(QNameSerializationContext context,
+ YangInstanceIdentifier.PathArgument pathArgument){
Preconditions.checkNotNull(context, "context should not be null");
Preconditions.checkNotNull(pathArgument, "pathArgument should not be null");
}
- public static YangInstanceIdentifier.PathArgument deSerialize(NormalizedNodeDeSerializationContext context, NormalizedNodeMessages.PathArgument pathArgument){
+ public static YangInstanceIdentifier.PathArgument deSerialize(QNameDeSerializationContext context,
+ NormalizedNodeMessages.PathArgument pathArgument){
Preconditions.checkNotNull(context, "context should not be null");
Preconditions.checkNotNull(pathArgument, "pathArgument should not be null");
private static interface PathArgumentAttributesGetter {
- Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument);
+ Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument);
}
static {
pathArgumentAttributesGetters.put(YangInstanceIdentifier.NodeWithValue.class, new PathArgumentAttributesGetter() {
@Override
public Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(
- NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument) {
- List<NormalizedNodeMessages.PathArgumentAttribute> attributes =
- new ArrayList<>();
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument) {
YangInstanceIdentifier.NodeWithValue identifier
= (YangInstanceIdentifier.NodeWithValue) pathArgument;
NormalizedNodeMessages.PathArgumentAttribute attribute =
buildAttribute(context, null, identifier.getValue());
- attributes.add(attribute);
-
- return attributes;
-
+ return Arrays.asList(attribute);
}
});
pathArgumentAttributesGetters.put(YangInstanceIdentifier.NodeIdentifierWithPredicates.class, new PathArgumentAttributesGetter() {
@Override
public Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(
- NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument) {
-
- List<NormalizedNodeMessages.PathArgumentAttribute> attributes =
- new ArrayList<>();
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument) {
YangInstanceIdentifier.NodeIdentifierWithPredicates identifier
= (YangInstanceIdentifier.NodeIdentifierWithPredicates) pathArgument;
- for (QName key : identifier.getKeyValues().keySet()) {
- Object value = identifier.getKeyValues().get(key);
+ Map<QName, Object> keyValues = identifier.getKeyValues();
+ List<NormalizedNodeMessages.PathArgumentAttribute> attributes =
+ new ArrayList<>(keyValues.size());
+ for (Entry<QName, Object> e : keyValues.entrySet()) {
NormalizedNodeMessages.PathArgumentAttribute attribute =
- buildAttribute(context, key, value);
+ buildAttribute(context, e.getKey(), e.getValue());
attributes.add(attribute);
-
}
return attributes;
-
}
});
pathArgumentAttributesGetters.put(YangInstanceIdentifier.AugmentationIdentifier.class, new PathArgumentAttributesGetter() {
@Override
public Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(
- NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument) {
-
- List<NormalizedNodeMessages.PathArgumentAttribute> attributes =
- new ArrayList<>();
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument) {
YangInstanceIdentifier.AugmentationIdentifier identifier
= (YangInstanceIdentifier.AugmentationIdentifier) pathArgument;
- for (QName key : identifier.getPossibleChildNames()) {
+ Set<QName> possibleChildNames = identifier.getPossibleChildNames();
+ List<NormalizedNodeMessages.PathArgumentAttribute> attributes =
+ new ArrayList<>(possibleChildNames.size());
+ for (QName key : possibleChildNames) {
Object value = key;
NormalizedNodeMessages.PathArgumentAttribute attribute =
buildAttribute(context, key, value);
attributes.add(attribute);
-
}
return attributes;
-
}
});
pathArgumentAttributesGetters.put(YangInstanceIdentifier.NodeIdentifier.class, new PathArgumentAttributesGetter() {
@Override
public Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> get(
- NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument) {
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument) {
return Collections.emptyList();
}
});
}
- private static NormalizedNodeMessages.PathArgumentAttribute buildAttribute(NormalizedNodeSerializationContext context,QName name, Object value){
+ private static NormalizedNodeMessages.PathArgumentAttribute buildAttribute(
+ QNameSerializationContext context, QName name, Object value) {
NormalizedNodeMessages.PathArgumentAttribute.Builder builder =
NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
}
- private static NormalizedNodeMessages.QName.Builder encodeQName(NormalizedNodeSerializationContext context, QName qName){
- if(qName == null){
+ private static NormalizedNodeMessages.QName.Builder encodeQName(QNameSerializationContext context,
+ QName qName) {
+ if(qName == null) {
return NormalizedNodeMessages.QName.getDefaultInstance().toBuilder();
}
NormalizedNodeMessages.QName.Builder qNameBuilder =
}
private static Iterable<? extends NormalizedNodeMessages.PathArgumentAttribute> getPathArgumentAttributes(
- NormalizedNodeSerializationContext context,
- YangInstanceIdentifier.PathArgument pathArgument) {
+ QNameSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument) {
return pathArgumentAttributesGetters.get(pathArgument.getClass()).get(context, pathArgument);
-
}
- private static String qNameToString(NormalizedNodeDeSerializationContext context,
+ private static String qNameToString(QNameDeSerializationContext context,
NormalizedNodeMessages.QName qName){
// If this serializer is used qName cannot be null (see encodeQName)
// adding null check only in case someone tried to deSerialize a protocol buffer node
// that was not serialized using the PathArgumentSerializer
- Preconditions.checkNotNull(qName, "qName should not be null");
- Preconditions.checkArgument(!"".equals(qName.getLocalName()),
- "qName.localName cannot be empty qName = " + qName.toString());
- Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
+// Preconditions.checkNotNull(qName, "qName should not be null");
+// Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
- StringBuilder sb = new StringBuilder();
String namespace = context.getNamespace(qName.getNamespace());
- String revision = "";
String localName = context.getLocalName(qName.getLocalName());
+ StringBuilder sb;
if(qName.getRevision() != -1){
- revision = context.getRevision(qName.getRevision());
- sb.append("(").append(namespace).append("?revision=").append(
- revision).append(")").append(
- localName);
+ String revision = context.getRevision(qName.getRevision());
+ sb = new StringBuilder(namespace.length() + REVISION_ARG.length() + revision.length() +
+ localName.length() + 2);
+ sb.append('(').append(namespace).append(REVISION_ARG).append(
+ revision).append(')').append(localName);
} else {
- sb.append("(").append(namespace).append(")").append(
- localName);
+ sb = new StringBuilder(namespace.length() + localName.length() + 2);
+ sb.append('(').append(namespace).append(')').append(localName);
}
return sb.toString();
-
}
/**
* @return MD-SAL PathArgument
*/
private static YangInstanceIdentifier.PathArgument parsePathArgument(
- NormalizedNodeDeSerializationContext context,
- NormalizedNodeMessages.PathArgument pathArgument) {
-
- Preconditions.checkArgument(pathArgument.getIntType() >= 0
- && pathArgument.getIntType() < PathArgumentType.values().length,
- "Illegal PathArgumentType " + pathArgument.getIntType());
+ QNameDeSerializationContext context, NormalizedNodeMessages.PathArgument pathArgument) {
switch(PathArgumentType.values()[pathArgument.getIntType()]){
case NODE_IDENTIFIER_WITH_VALUE : {
}
private static Map<QName, Object> toAttributesMap(
- NormalizedNodeDeSerializationContext context,
- List<NormalizedNodeMessages.PathArgumentAttribute> attributesList) {
-
- Map<QName, Object> map = new HashMap<>();
+ QNameDeSerializationContext context,
+ List<NormalizedNodeMessages.PathArgumentAttribute> attributesList) {
- for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+ Map<QName, Object> map;
+ if(attributesList.size() == 1) {
+ NormalizedNodeMessages.PathArgumentAttribute attribute = attributesList.get(0);
NormalizedNodeMessages.QName name = attribute.getName();
Object value = parseAttribute(context, attribute);
+ map = Collections.singletonMap(QNameFactory.create(qNameToString(context, name)), value);
+ } else {
+ map = new HashMap<>();
+
+ for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+ NormalizedNodeMessages.QName name = attribute.getName();
+ Object value = parseAttribute(context, attribute);
- map.put(QNameFactory.create(qNameToString(context, name)), value);
+ map.put(QNameFactory.create(qNameToString(context, name)), value);
+ }
}
return map;
}
- private static Object parseAttribute(NormalizedNodeDeSerializationContext context, NormalizedNodeMessages.PathArgumentAttribute attribute){
+ private static Object parseAttribute(QNameDeSerializationContext context,
+ NormalizedNodeMessages.PathArgumentAttribute attribute){
return ValueSerializer.deSerialize(context, attribute);
}
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
-import com.google.common.base.Preconditions;
+import java.util.Map;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import com.google.common.collect.ImmutableMap;
public enum PathArgumentType {
AUGMENTATION_IDENTIFIER,
NODE_IDENTIFIER_WITH_VALUE,
NODE_IDENTIFIER_WITH_PREDICATES;
+ private static Map<Class<?>, PathArgumentType> CLASS_TO_ENUM_MAP =
+ ImmutableMap.<Class<?>, PathArgumentType>builder().
+ put(YangInstanceIdentifier.AugmentationIdentifier.class, AUGMENTATION_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifier.class, NODE_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifierWithPredicates.class, NODE_IDENTIFIER_WITH_PREDICATES).
+ put(YangInstanceIdentifier.NodeWithValue.class, NODE_IDENTIFIER_WITH_VALUE).build();
+
public static int getSerializablePathArgumentType(YangInstanceIdentifier.PathArgument pathArgument){
- Preconditions.checkNotNull(pathArgument, "pathArgument should not be null");
-
- if(pathArgument instanceof YangInstanceIdentifier.AugmentationIdentifier){
- return AUGMENTATION_IDENTIFIER.ordinal();
- } else if(pathArgument instanceof YangInstanceIdentifier.NodeIdentifier){
- return NODE_IDENTIFIER.ordinal();
- } else if(pathArgument instanceof YangInstanceIdentifier.NodeIdentifierWithPredicates){
- return NODE_IDENTIFIER_WITH_PREDICATES.ordinal();
- } else if(pathArgument instanceof YangInstanceIdentifier.NodeWithValue){
- return NODE_IDENTIFIER_WITH_VALUE.ordinal();
+
+ PathArgumentType type = CLASS_TO_ENUM_MAP.get(pathArgument.getClass());
+ if(type == null) {
+ throw new IllegalArgumentException("Unknown type of PathArgument = " + pathArgument);
}
- throw new IllegalArgumentException("Unknown type of PathArgument = " + pathArgument.toString());
+
+ return type.ordinal();
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+
+/**
+ * Interface that provides methods which help in decoding components of a QName.
+ *
+ * @author Thomas Pantelis
+ */
+public interface QNameDeSerializationContext {
+ String getNamespace(int namespace);
+
+ String getRevision(int revision);
+
+ String getLocalName(int localName);
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+
+import java.util.List;
+
+/**
+ * Implementation of the QNameDeSerializationContext interface.
+ *
+ * @author Thomas Pantelis
+ */
+public class QNameDeSerializationContextImpl implements QNameDeSerializationContext {
+
+ private final List<String> codeList;
+
+ public QNameDeSerializationContextImpl(List<String> codeList) {
+ this.codeList = codeList;
+ }
+
+ @Override
+ public String getNamespace(int namespace) {
+ return codeList.get(namespace);
+ }
+
+ @Override
+ public String getRevision(int revision) {
+ return codeList.get(revision);
+ }
+
+ @Override
+ public String getLocalName(int localName) {
+ return codeList.get(localName);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+
+import java.net.URI;
+import java.util.Date;
+
+/**
+ * Interface that provides methods which help in encoding components of a QName.
+ *
+ * @author Thomas Pantelis
+ */
+public interface QNameSerializationContext {
+ int addNamespace(URI namespace);
+
+ int addRevision(Date revision);
+
+ int addLocalName(String localName);
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
+
+/**
+ * Implementation of the QNameSerializationContext interface.
+ *
+ * @author Thomas Pantelis
+ */
+public class QNameSerializationContextImpl implements QNameSerializationContext {
+
+ private final Map<Object, Integer> codeMap = new HashMap<>();
+ private final List<String> codes = new ArrayList<>();
+
+ public List<String> getCodes() {
+ return codes;
+ }
+
+ @Override public int addNamespace(URI namespace) {
+ int namespaceInt = getCode(namespace);
+
+ if(namespaceInt == -1) {
+ namespaceInt = addCode(namespace, namespace.toString());
+ }
+ return namespaceInt;
+ }
+
+ @Override public int addRevision(Date revision) {
+ if(revision == null){
+ return -1;
+ }
+
+ int revisionInt = getCode(revision);
+ if(revisionInt == -1) {
+ String formattedRevision =
+ SimpleDateFormatUtil.getRevisionFormat().format(revision);
+ revisionInt = addCode(revision, formattedRevision);
+ }
+ return revisionInt;
+ }
+
+ @Override public int addLocalName(String localName) {
+ int localNameInt = getCode(localName);
+ if(localNameInt == -1) {
+ localNameInt = addCode(localName, localName);
+ }
+ return localNameInt;
+
+ }
+
+ private int addCode(Object code, String codeStr){
+ int count = codes.size();
+ codes.add(codeStr);
+ codeMap.put(code, Integer.valueOf(count));
+ return count;
+ }
+
+ private int getCode(Object code){
+ Integer value = codeMap.get(code);
+ return value == null ? -1 : value.intValue();
+ }
+}
package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
-import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
public class ValueSerializer {
public static void serialize(NormalizedNodeMessages.Node.Builder builder,
- NormalizedNodeSerializationContext context, Object value){
+ QNameSerializationContext context, Object value) {
builder.setIntValueType(ValueType.getSerializableType(value).ordinal());
if(value instanceof YangInstanceIdentifier) {
builder.setInstanceIdentifierValue(
- InstanceIdentifierUtils.toSerializable((YangInstanceIdentifier) value));
+ InstanceIdentifierUtils.toSerializable((YangInstanceIdentifier) value, context));
} else if(value instanceof Set) {
Set set = (Set) value;
if(!set.isEmpty()){
}
public static void serialize(NormalizedNodeMessages.PathArgumentAttribute.Builder builder,
- NormalizedNodeSerializationContext context, Object value){
+ QNameSerializationContext context, Object value){
builder.setType(ValueType.getSerializableType(value).ordinal());
builder.setValue(value.toString());
}
- public static Object deSerialize(
- NormalizedNodeDeSerializationContext context, NormalizedNodeMessages.Node node) {
+ public static Object deSerialize(QNameDeSerializationContext context,
+ NormalizedNodeMessages.Node node) {
if(node.getIntValueType() == ValueType.YANG_IDENTIFIER_TYPE.ordinal()){
return InstanceIdentifierUtils.fromSerializable(
- node.getInstanceIdentifierValue());
+ node.getInstanceIdentifierValue(), context);
} else if(node.getIntValueType() == ValueType.BITS_TYPE.ordinal()){
return new HashSet(node.getBitsValueList());
}
return deSerializeBasicTypes(node.getIntValueType(), node.getValue());
}
- public static Object deSerialize(
- NormalizedNodeDeSerializationContext context,
- NormalizedNodeMessages.PathArgumentAttribute attribute) {
+ public static Object deSerialize(QNameDeSerializationContext context,
+ NormalizedNodeMessages.PathArgumentAttribute attribute) {
return deSerializeBasicTypes(attribute.getType(), attribute.getValue());
}
private static Object deSerializeBasicTypes(int valueType, String value) {
- Preconditions.checkArgument(valueType >= 0 && valueType < ValueType.values().length,
- "Illegal value type " + valueType );
-
switch(ValueType.values()[valueType]){
case SHORT_TYPE: {
return Short.valueOf(value);
public static final ValueType getSerializableType(Object node){
Preconditions.checkNotNull(node, "node should not be null");
- if(types.containsKey(node.getClass())) {
- return types.get(node.getClass());
+ ValueType type = types.get(node.getClass());
+ if(type != null) {
+ return type;
} else if(node instanceof Set){
return BITS_TYPE;
}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+public class NodeTypes {
+
+ public static final byte LEAF_NODE = 1;
+ public static final byte LEAF_SET = 2;
+ public static final byte LEAF_SET_ENTRY_NODE = 3;
+ public static final byte CONTAINER_NODE = 4;
+ public static final byte UNKEYED_LIST = 5;
+ public static final byte UNKEYED_LIST_ITEM = 6;
+ public static final byte MAP_NODE = 7;
+ public static final byte MAP_ENTRY_NODE = 8;
+ public static final byte ORDERED_MAP_NODE = 9;
+ public static final byte CHOICE_NODE = 10;
+ public static final byte AUGMENTATION_NODE = 11;
+ public static final byte ANY_XML_NODE = 12;
+ public static final byte END_NODE = 13;
+
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.Node;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * NormalizedNodeInputStreamReader reads the byte stream and constructs the normalized node including its children nodes.
+ * This process goes in recursive manner, where each NodeTypes object signifies the start of the object, except END_NODE.
+ * If a node can have children, then that node's end is calculated based on appearance of END_NODE.
+ *
+ */
+
+public class NormalizedNodeInputStreamReader implements NormalizedNodeStreamReader {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeInputStreamReader.class);
+
+ private static final String REVISION_ARG = "?revision=";
+
+ private final DataInputStream reader;
+
+ private final Map<Integer, String> codedStringMap = new HashMap<>();
+
+ private QName lastLeafSetQName;
+
+ public NormalizedNodeInputStreamReader(InputStream stream) throws IOException {
+ Preconditions.checkNotNull(stream);
+ reader = new DataInputStream(stream);
+ }
+
+ @Override
+ public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
+ NormalizedNode<?, ?> node = null;
+
+ // each node should start with a byte
+ byte nodeType = reader.readByte();
+
+ if(nodeType == NodeTypes.END_NODE) {
+ LOG.debug("End node reached. return");
+ return null;
+ }
+ else if(nodeType == NodeTypes.AUGMENTATION_NODE) {
+ LOG.debug("Reading augmentation node. will create augmentation identifier");
+
+ YangInstanceIdentifier.AugmentationIdentifier identifier =
+ new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
+ DataContainerNodeBuilder<YangInstanceIdentifier.AugmentationIdentifier, AugmentationNode> augmentationBuilder =
+ Builders.augmentationBuilder().withNodeIdentifier(identifier);
+ augmentationBuilder = addDataContainerChildren(augmentationBuilder);
+ node = augmentationBuilder.build();
+
+ } else {
+ if(nodeType == NodeTypes.LEAF_SET_ENTRY_NODE) {
+ LOG.debug("Reading leaf set entry node. Will create NodeWithValue instance identifier");
+
+ // Read the object value
+ Object value = readObject();
+
+ YangInstanceIdentifier.NodeWithValue nodeWithValue = new YangInstanceIdentifier.NodeWithValue(
+ lastLeafSetQName, value);
+ node = Builders.leafSetEntryBuilder().withNodeIdentifier(nodeWithValue).
+ withValue(value).build();
+
+ } else if(nodeType == NodeTypes.MAP_ENTRY_NODE) {
+ LOG.debug("Reading map entry node. Will create node identifier with predicates.");
+
+ QName qName = readQName();
+ YangInstanceIdentifier.NodeIdentifierWithPredicates nodeIdentifier =
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(qName, readKeyValueMap());
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> mapEntryBuilder
+ = Builders.mapEntryBuilder().withNodeIdentifier(nodeIdentifier);
+
+ mapEntryBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates,
+ MapEntryNode>)addDataContainerChildren(mapEntryBuilder);
+ node = mapEntryBuilder.build();
+
+ } else {
+ LOG.debug("Creating standard node identifier. ");
+
+ QName qName = readQName();
+ YangInstanceIdentifier.NodeIdentifier identifier = new YangInstanceIdentifier.NodeIdentifier(qName);
+ node = readNodeIdentifierDependentNode(nodeType, identifier);
+
+ }
+ }
+ return node;
+ }
+
+ private NormalizedNode<?, ?> readNodeIdentifierDependentNode(byte nodeType, YangInstanceIdentifier.NodeIdentifier identifier)
+ throws IOException {
+
+ switch(nodeType) {
+ case NodeTypes.LEAF_NODE :
+ LOG.debug("Read leaf node");
+ // Read the object value
+ NormalizedNodeAttrBuilder leafBuilder = Builders.leafBuilder();
+ return leafBuilder.withNodeIdentifier(identifier).withValue(readObject()).build();
+
+ case NodeTypes.ANY_XML_NODE :
+ LOG.debug("Read xml node");
+ Node value = (Node) readObject();
+ return Builders.anyXmlBuilder().withValue(value).build();
+
+ case NodeTypes.MAP_NODE :
+ LOG.debug("Read map node");
+ CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = Builders.mapBuilder().withNodeIdentifier(identifier);
+ mapBuilder = addMapNodeChildren(mapBuilder);
+ return mapBuilder.build();
+
+ case NodeTypes.CHOICE_NODE :
+ LOG.debug("Read choice node");
+ DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ChoiceNode> choiceBuilder =
+ Builders.choiceBuilder().withNodeIdentifier(identifier);
+ choiceBuilder = addDataContainerChildren(choiceBuilder);
+ return choiceBuilder.build();
+
+ case NodeTypes.ORDERED_MAP_NODE :
+ LOG.debug("Reading ordered map node");
+ CollectionNodeBuilder<MapEntryNode, OrderedMapNode> orderedMapBuilder =
+ Builders.orderedMapBuilder().withNodeIdentifier(identifier);
+ orderedMapBuilder = addMapNodeChildren(orderedMapBuilder);
+ return orderedMapBuilder.build();
+
+ case NodeTypes.UNKEYED_LIST :
+ LOG.debug("Read unkeyed list node");
+ CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> unkeyedListBuilder =
+ Builders.unkeyedListBuilder().withNodeIdentifier(identifier);
+ unkeyedListBuilder = addUnkeyedListChildren(unkeyedListBuilder);
+ return unkeyedListBuilder.build();
+
+ case NodeTypes.UNKEYED_LIST_ITEM :
+ LOG.debug("Read unkeyed list item node");
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, UnkeyedListEntryNode> unkeyedListEntryBuilder
+ = Builders.unkeyedListEntryBuilder().withNodeIdentifier(identifier);
+
+ unkeyedListEntryBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, UnkeyedListEntryNode>)
+ addDataContainerChildren(unkeyedListEntryBuilder);
+ return unkeyedListEntryBuilder.build();
+
+ case NodeTypes.CONTAINER_NODE :
+ LOG.debug("Read container node");
+ DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> containerBuilder =
+ Builders.containerBuilder().withNodeIdentifier(identifier);
+
+ containerBuilder = (DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode>)
+ addDataContainerChildren(containerBuilder);
+ return containerBuilder.build();
+
+ case NodeTypes.LEAF_SET :
+ LOG.debug("Read leaf set node");
+ ListNodeBuilder<Object, LeafSetEntryNode<Object>> leafSetBuilder =
+ Builders.leafSetBuilder().withNodeIdentifier(identifier);
+ leafSetBuilder = addLeafSetChildren(identifier.getNodeType(), leafSetBuilder);
+ return leafSetBuilder.build();
+
+ default :
+ return null;
+ }
+ }
+
+ private QName readQName() throws IOException {
+ // Read in the same sequence of writing
+ String localName = readCodedString();
+ String namespace = readCodedString();
+ String revision = readCodedString();
+ String qName;
+ // Not using stringbuilder as compiler optimizes string concatenation of +
+ if(revision != null){
+ qName = "(" + namespace+ REVISION_ARG + revision + ")" +localName;
+ } else {
+ qName = "(" + namespace + ")" +localName;
+ }
+
+ return QNameFactory.create(qName);
+ }
+
+
+ private String readCodedString() throws IOException {
+ boolean readFromMap = reader.readBoolean();
+ if(readFromMap) {
+ return codedStringMap.get(reader.readInt());
+ } else {
+ String value = reader.readUTF();
+ if(value != null) {
+ codedStringMap.put(Integer.valueOf(codedStringMap.size()), value);
+ }
+ return value;
+ }
+ }
+
+ private Set<QName> readQNameSet() throws IOException{
+ // Read the children count
+ int count = reader.readInt();
+ Set<QName> children = new HashSet<>(count);
+ for(int i = 0; i<count; i++) {
+ children.add(readQName());
+ }
+ return children;
+ }
+
+ private Map<QName, Object> readKeyValueMap() throws IOException {
+ int count = reader.readInt();
+ Map<QName, Object> keyValueMap = new HashMap<>(count);
+
+ for(int i = 0; i<count; i++) {
+ keyValueMap.put(readQName(), readObject());
+ }
+
+ return keyValueMap;
+ }
+
+ private Object readObject() throws IOException {
+ byte objectType = reader.readByte();
+ switch(objectType) {
+ case ValueTypes.BITS_TYPE:
+ return readObjSet();
+
+ case ValueTypes.BOOL_TYPE :
+ return reader.readBoolean();
+
+ case ValueTypes.BYTE_TYPE :
+ return reader.readByte();
+
+ case ValueTypes.INT_TYPE :
+ return reader.readInt();
+
+ case ValueTypes.LONG_TYPE :
+ return reader.readLong();
+
+ case ValueTypes.QNAME_TYPE :
+ return readQName();
+
+ case ValueTypes.SHORT_TYPE :
+ return reader.readShort();
+
+ case ValueTypes.STRING_TYPE :
+ return reader.readUTF();
+
+ case ValueTypes.BIG_DECIMAL_TYPE :
+ return new BigDecimal(reader.readUTF());
+
+ case ValueTypes.BIG_INTEGER_TYPE :
+ return new BigInteger(reader.readUTF());
+
+ case ValueTypes.YANG_IDENTIFIER_TYPE :
+ int size = reader.readInt();
+
+ List<YangInstanceIdentifier.PathArgument> pathArguments = new ArrayList<>(size);
+
+ for(int i=0; i<size; i++) {
+ pathArguments.add(readPathArgument());
+ }
+ return YangInstanceIdentifier.create(pathArguments);
+
+ default :
+ return null;
+ }
+ }
+
+ private Set<String> readObjSet() throws IOException {
+ int count = reader.readInt();
+ Set<String> children = new HashSet<>(count);
+ for(int i = 0; i<count; i++) {
+ children.add(readCodedString());
+ }
+ return children;
+ }
+
+ private YangInstanceIdentifier.PathArgument readPathArgument() throws IOException {
+ // read Type
+ int type = reader.readByte();
+
+ switch(type) {
+
+ case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
+ return new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
+
+ case PathArgumentTypes.NODE_IDENTIFIER :
+ return new YangInstanceIdentifier.NodeIdentifier(readQName());
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES :
+ return new YangInstanceIdentifier.NodeIdentifierWithPredicates(readQName(), readKeyValueMap());
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
+ return new YangInstanceIdentifier.NodeWithValue(readQName(), readObject());
+
+ default :
+ return null;
+ }
+ }
+
+ private ListNodeBuilder<Object, LeafSetEntryNode<Object>> addLeafSetChildren(QName nodeType,
+ ListNodeBuilder<Object, LeafSetEntryNode<Object>> builder)
+ throws IOException {
+
+ LOG.debug("Reading children of leaf set");
+
+ lastLeafSetQName = nodeType;
+
+ LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child = (LeafSetEntryNode<Object>)readNormalizedNode();
+ }
+ return builder;
+ }
+
+ private CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> addUnkeyedListChildren(
+ CollectionNodeBuilder<UnkeyedListEntryNode, UnkeyedListNode> builder)
+ throws IOException{
+
+ LOG.debug("Reading children of unkeyed list");
+ UnkeyedListEntryNode child = (UnkeyedListEntryNode)readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child = (UnkeyedListEntryNode)readNormalizedNode();
+ }
+ return builder;
+ }
+
+ private DataContainerNodeBuilder addDataContainerChildren(DataContainerNodeBuilder builder)
+ throws IOException {
+ LOG.debug("Reading data container (leaf nodes) nodes");
+
+ DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?> child =
+ (DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>) readNormalizedNode();
+
+ while(child != null) {
+ builder.withChild(child);
+ child =
+ (DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>) readNormalizedNode();
+ }
+ return builder;
+ }
+
+
+ private CollectionNodeBuilder addMapNodeChildren(CollectionNodeBuilder builder)
+ throws IOException {
+ LOG.debug("Reading map node children");
+ MapEntryNode child = (MapEntryNode)readNormalizedNode();
+
+ while(child != null){
+ builder.withChild(child);
+ child = (MapEntryNode)readNormalizedNode();
+ }
+
+ return builder;
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * NormalizedNodeOutputStreamWriter will be used by distributed datastore to send normalized node in
+ * a stream.
+ * A stream writer wrapper around this class will write node objects to stream in recursive manner.
+ * for example - If you have a ContainerNode which has a two LeafNode as children, then
+ * you will first call {@link #startContainerNode(YangInstanceIdentifier.NodeIdentifier, int)}, then will call
+ * {@link #leafNode(YangInstanceIdentifier.NodeIdentifier, Object)} twice and then, {@link #endNode()} to end
+ * container node.
+ *
+ * Based on the each node, the node type is also written to the stream, that helps in reconstructing the object,
+ * while reading.
+ *
+ *
+ */
+
+public class NormalizedNodeOutputStreamWriter implements NormalizedNodeStreamWriter{
+
+ private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeOutputStreamWriter.class);
+
+ private final DataOutputStream writer;
+
+ private final Map<String, Integer> stringCodeMap = new HashMap<>();
+
+ public NormalizedNodeOutputStreamWriter(OutputStream stream) throws IOException {
+ Preconditions.checkNotNull(stream);
+ writer = new DataOutputStream(stream);
+ }
+
+ @Override
+ public void leafNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Writing a new leaf node");
+ startNode(name.getNodeType(), NodeTypes.LEAF_NODE);
+
+ writeObject(value);
+ }
+
+ @Override
+ public void startLeafSet(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new leaf set");
+
+ startNode(name.getNodeType(), NodeTypes.LEAF_SET);
+ }
+
+ @Override
+ public void leafSetEntryNode(Object value) throws IOException, IllegalArgumentException {
+ LOG.debug("Writing a new leaf set entry node");
+
+ writer.writeByte(NodeTypes.LEAF_SET_ENTRY_NODE);
+ writeObject(value);
+ }
+
+ @Override
+ public void startContainerNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+
+ LOG.debug("Starting a new container node");
+
+ startNode(name.getNodeType(), NodeTypes.CONTAINER_NODE);
+ }
+
+ @Override
+ public void startUnkeyedList(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new unkeyed list");
+
+ startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST);
+ }
+
+ @Override
+ public void startUnkeyedListItem(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalStateException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new unkeyed list item");
+
+ startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST_ITEM);
+ }
+
+ @Override
+ public void startMapNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new map node");
+
+ startNode(name.getNodeType(), NodeTypes.MAP_NODE);
+ }
+
+ @Override
+ public void startMapEntryNode(YangInstanceIdentifier.NodeIdentifierWithPredicates identifier, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(identifier, "Node identifier should not be null");
+ LOG.debug("Starting a new map entry node");
+ startNode(identifier.getNodeType(), NodeTypes.MAP_ENTRY_NODE);
+
+ writeKeyValueMap(identifier.getKeyValues());
+
+ }
+
+ @Override
+ public void startOrderedMapNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new ordered map node");
+
+ startNode(name.getNodeType(), NodeTypes.ORDERED_MAP_NODE);
+ }
+
+ @Override
+ public void startChoiceNode(YangInstanceIdentifier.NodeIdentifier name, int childSizeHint) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Starting a new choice node");
+
+ startNode(name.getNodeType(), NodeTypes.CHOICE_NODE);
+ }
+
+ @Override
+ public void startAugmentationNode(YangInstanceIdentifier.AugmentationIdentifier identifier) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(identifier, "Node identifier should not be null");
+ LOG.debug("Starting a new augmentation node");
+
+ writer.writeByte(NodeTypes.AUGMENTATION_NODE);
+ writeQNameSet(identifier.getPossibleChildNames());
+ }
+
+ @Override
+ public void anyxmlNode(YangInstanceIdentifier.NodeIdentifier name, Object value) throws IOException, IllegalArgumentException {
+ Preconditions.checkNotNull(name, "Node identifier should not be null");
+ LOG.debug("Writing a new xml node");
+
+ startNode(name.getNodeType(), NodeTypes.ANY_XML_NODE);
+
+ writeObject(value);
+ }
+
+ @Override
+ public void endNode() throws IOException, IllegalStateException {
+ LOG.debug("Ending the node");
+
+ writer.writeByte(NodeTypes.END_NODE);
+ }
+
+ @Override
+ public void close() throws IOException {
+ writer.close();
+ }
+
+ @Override
+ public void flush() throws IOException {
+ writer.flush();
+ }
+
+ private void startNode(final QName qName, byte nodeType) throws IOException {
+
+ Preconditions.checkNotNull(qName, "QName of node identifier should not be null.");
+ // First write the type of node
+ writer.writeByte(nodeType);
+ // Write Start Tag
+ writeQName(qName);
+ }
+
+ private void writeQName(QName qName) throws IOException {
+
+ writeCodedString(qName.getLocalName());
+ writeCodedString(qName.getNamespace().toString());
+ writeCodedString(qName.getFormattedRevision());
+ }
+
+ private void writeCodedString(String key) throws IOException {
+ Integer value = stringCodeMap.get(key);
+
+ if(value != null) {
+ writer.writeBoolean(true);
+ writer.writeInt(value);
+ } else {
+ if(key != null) {
+ stringCodeMap.put(key, Integer.valueOf(stringCodeMap.size()));
+ }
+ writer.writeBoolean(false);
+ writer.writeUTF(key);
+ }
+ }
+
+ private void writeObjSet(Set set) throws IOException {
+ if(!set.isEmpty()){
+ writer.writeInt(set.size());
+ for(Object o : set){
+ if(o instanceof String){
+ writeCodedString(o.toString());
+ } else {
+ throw new IllegalArgumentException("Expected value type to be String but was : " +
+ o.toString());
+ }
+ }
+ } else {
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException {
+ Iterable<YangInstanceIdentifier.PathArgument> pathArguments = identifier.getPathArguments();
+ int size = Iterables.size(pathArguments);
+ writer.writeInt(size);
+
+ for(YangInstanceIdentifier.PathArgument pathArgument : pathArguments) {
+ writePathArgument(pathArgument);
+ }
+ }
+
+ private void writePathArgument(YangInstanceIdentifier.PathArgument pathArgument) throws IOException {
+
+ byte type = PathArgumentTypes.getSerializablePathArgumentType(pathArgument);
+
+ writer.writeByte(type);
+
+ switch(type) {
+ case PathArgumentTypes.NODE_IDENTIFIER :
+
+ YangInstanceIdentifier.NodeIdentifier nodeIdentifier =
+ (YangInstanceIdentifier.NodeIdentifier) pathArgument;
+
+ writeQName(nodeIdentifier.getNodeType());
+ break;
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES:
+
+ YangInstanceIdentifier.NodeIdentifierWithPredicates nodeIdentifierWithPredicates =
+ (YangInstanceIdentifier.NodeIdentifierWithPredicates) pathArgument;
+ writeQName(nodeIdentifierWithPredicates.getNodeType());
+
+ writeKeyValueMap(nodeIdentifierWithPredicates.getKeyValues());
+ break;
+
+ case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
+
+ YangInstanceIdentifier.NodeWithValue nodeWithValue =
+ (YangInstanceIdentifier.NodeWithValue) pathArgument;
+
+ writeQName(nodeWithValue.getNodeType());
+ writeObject(nodeWithValue.getValue());
+ break;
+
+ case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
+
+ YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier =
+ (YangInstanceIdentifier.AugmentationIdentifier) pathArgument;
+
+ // No Qname in augmentation identifier
+ writeQNameSet(augmentationIdentifier.getPossibleChildNames());
+ break;
+ default :
+ throw new IllegalStateException("Unknown node identifier type is found : " + pathArgument.getClass().toString() );
+ }
+ }
+
+ private void writeKeyValueMap(Map<QName, Object> keyValueMap) throws IOException {
+ if(keyValueMap != null && !keyValueMap.isEmpty()) {
+ writer.writeInt(keyValueMap.size());
+ Set<QName> qNameSet = keyValueMap.keySet();
+
+ for(QName qName : qNameSet) {
+ writeQName(qName);
+ writeObject(keyValueMap.get(qName));
+ }
+ } else {
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeQNameSet(Set<QName> children) throws IOException {
+ // Write each child's qname separately, if list is empty send count as 0
+ if(children != null && !children.isEmpty()) {
+ writer.writeInt(children.size());
+ for(QName qName : children) {
+ writeQName(qName);
+ }
+ } else {
+ LOG.debug("augmentation node does not have any child");
+ writer.writeInt(0);
+ }
+ }
+
+ private void writeObject(Object value) throws IOException {
+
+ byte type = ValueTypes.getSerializableType(value);
+ // Write object type first
+ writer.writeByte(type);
+
+ switch(type) {
+ case ValueTypes.BOOL_TYPE:
+ writer.writeBoolean((Boolean) value);
+ break;
+ case ValueTypes.QNAME_TYPE:
+ writeQName((QName) value);
+ break;
+ case ValueTypes.INT_TYPE:
+ writer.writeInt((Integer) value);
+ break;
+ case ValueTypes.BYTE_TYPE:
+ writer.writeByte((Byte) value);
+ break;
+ case ValueTypes.LONG_TYPE:
+ writer.writeLong((Long) value);
+ break;
+ case ValueTypes.SHORT_TYPE:
+ writer.writeShort((Short) value);
+ break;
+ case ValueTypes.BITS_TYPE:
+ writeObjSet((Set) value);
+ break;
+ case ValueTypes.YANG_IDENTIFIER_TYPE:
+ writeYangInstanceIdentifier((YangInstanceIdentifier) value);
+ break;
+ default:
+ writer.writeUTF(value.toString());
+ break;
+ }
+ }
+}
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+import java.io.IOException;
+
+
+public interface NormalizedNodeStreamReader extends AutoCloseable {
+
+ NormalizedNode<?, ?> readNormalizedNode() throws IOException;
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.collect.ImmutableMap;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+import java.util.Map;
+
+public class PathArgumentTypes {
+ public static final byte AUGMENTATION_IDENTIFIER = 1;
+ public static final byte NODE_IDENTIFIER = 2;
+ public static final byte NODE_IDENTIFIER_WITH_VALUE = 3;
+ public static final byte NODE_IDENTIFIER_WITH_PREDICATES = 4;
+
+ private static Map<Class<?>, Byte> CLASS_TO_ENUM_MAP =
+ ImmutableMap.<Class<?>, Byte>builder().
+ put(YangInstanceIdentifier.AugmentationIdentifier.class, AUGMENTATION_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifier.class, NODE_IDENTIFIER).
+ put(YangInstanceIdentifier.NodeIdentifierWithPredicates.class, NODE_IDENTIFIER_WITH_PREDICATES).
+ put(YangInstanceIdentifier.NodeWithValue.class, NODE_IDENTIFIER_WITH_VALUE).build();
+
+ public static byte getSerializablePathArgumentType(YangInstanceIdentifier.PathArgument pathArgument){
+
+ Byte type = CLASS_TO_ENUM_MAP.get(pathArgument.getClass());
+ if(type == null) {
+ throw new IllegalArgumentException("Unknown type of PathArgument = " + pathArgument);
+ }
+
+ return type;
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+public class ValueTypes {
+ public static final byte SHORT_TYPE = 1;
+ public static final byte BYTE_TYPE = 2;
+ public static final byte INT_TYPE = 3;
+ public static final byte LONG_TYPE = 4;
+ public static final byte BOOL_TYPE = 5;
+ public static final byte QNAME_TYPE = 6;
+ public static final byte BITS_TYPE = 7;
+ public static final byte YANG_IDENTIFIER_TYPE = 8;
+ public static final byte STRING_TYPE = 9;
+ public static final byte BIG_INTEGER_TYPE = 10;
+ public static final byte BIG_DECIMAL_TYPE = 11;
+
+ private static Map<Class, Byte> types = new HashMap<>();
+
+ static {
+ types.put(String.class, Byte.valueOf(STRING_TYPE));
+ types.put(Byte.class, Byte.valueOf(BYTE_TYPE));
+ types.put(Integer.class, Byte.valueOf(INT_TYPE));
+ types.put(Long.class, Byte.valueOf(LONG_TYPE));
+ types.put(Boolean.class, Byte.valueOf(BOOL_TYPE));
+ types.put(QName.class, Byte.valueOf(QNAME_TYPE));
+ types.put(Set.class, Byte.valueOf(BITS_TYPE));
+ types.put(YangInstanceIdentifier.class, Byte.valueOf(YANG_IDENTIFIER_TYPE));
+ types.put(Short.class, Byte.valueOf(SHORT_TYPE));
+ types.put(BigInteger.class, Byte.valueOf(BIG_INTEGER_TYPE));
+ types.put(BigDecimal.class, Byte.valueOf(BIG_DECIMAL_TYPE));
+ }
+
+ public static final byte getSerializableType(Object node){
+ Preconditions.checkNotNull(node, "node should not be null");
+
+ Byte type = types.get(node.getClass());
+ if(type != null) {
+ return type;
+ } else if(node instanceof Set){
+ return BITS_TYPE;
+ }
+
+ throw new IllegalArgumentException("Unknown value type " + node.getClass().getSimpleName());
+ }
+}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-
-package org.opendaylight.controller.cluster.datastore.util;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import org.opendaylight.controller.protobuff.messages.common.SimpleNormalizedNodeMessage;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlDocumentUtils;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.serializer.DomFromNormalizedNodeSerializerFactory;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
-import org.opendaylight.yangtools.yang.model.api.ChoiceNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.TransformerFactoryConfigurationError;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.ByteArrayInputStream;
-import java.io.StringWriter;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-/*
- *
- * <code>EncoderDecoderUtil</code> helps in wrapping the NormalizedNode into a SimpleNormalizedNode
- * protobuf message containing the XML representation of the NormalizeNode
- *
- * @author: syedbahm
- */
-public class EncoderDecoderUtil {
- static DocumentBuilderFactory factory;
-
- private static DomFromNormalizedNodeSerializerFactory serializerFactory =
- DomFromNormalizedNodeSerializerFactory
- .getInstance(XmlDocumentUtils.getDocument(),
- DomUtils.defaultValueCodecProvider());
-
- private static DomToNormalizedNodeParserFactory parserFactory =
- DomToNormalizedNodeParserFactory
- .getInstance(DomUtils.defaultValueCodecProvider());
-
- static {
- factory = DocumentBuilderFactory.newInstance();
- factory.setNamespaceAware(true);
- factory.setCoalescing(true);
- factory.setIgnoringElementContentWhitespace(true);
- factory.setIgnoringComments(true);
- }
-
- private static DataSchemaNode findChildNode(Collection<DataSchemaNode> children,
- String name) {
- List<DataNodeContainer> containers = Lists.newArrayList();
-
- for (DataSchemaNode dataSchemaNode : children) {
- if (dataSchemaNode.getQName().getLocalName().equals(name))
- return dataSchemaNode;
- if (dataSchemaNode instanceof DataNodeContainer) {
- containers.add((DataNodeContainer) dataSchemaNode);
- } else if (dataSchemaNode instanceof ChoiceNode) {
- containers.addAll(((ChoiceNode) dataSchemaNode).getCases());
- }
- }
-
- for (DataNodeContainer container : containers) {
- DataSchemaNode retVal =
- findChildNode(container.getChildNodes(), name);
- if (retVal != null) {
- return retVal;
- }
- }
-
- return null;
- }
-
- private static DataSchemaNode getSchemaNode(SchemaContext context,
- QName qname) {
-
- for (Module module : context
- .findModuleByNamespace(qname.getNamespace())) {
- // we will take the first child as the start of the
- if (module.getChildNodes() != null || !module.getChildNodes()
- .isEmpty()) {
-
- DataSchemaNode found =
- findChildNode(module.getChildNodes(), qname.getLocalName());
- return found;
- }
- }
- return null;
- }
-
- private static String toString(Element xml) {
- try {
- Transformer transformer =
- TransformerFactory.newInstance().newTransformer();
- transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-
- StreamResult result = new StreamResult(new StringWriter());
- DOMSource source = new DOMSource(xml);
- transformer.transform(source, result);
-
- return result.getWriter().toString();
- } catch (IllegalArgumentException | TransformerFactoryConfigurationError
- | TransformerException e) {
- throw new RuntimeException("Unable to serialize xml element " + xml,
- e);
- }
- }
-
- private static String toString(Iterable<Element> xmlIterable) {
- try {
- Transformer transformer =
- TransformerFactory.newInstance().newTransformer();
- transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-
- StreamResult result = new StreamResult(new StringWriter());
- Iterator iterator = xmlIterable.iterator();
- DOMSource source;
- if(iterator.hasNext()) {
- source = new DOMSource((org.w3c.dom.Node) iterator.next());
- transformer.transform(source, result);
- transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
- }
-
- while(iterator.hasNext()) {
- source = new DOMSource((org.w3c.dom.Node) iterator.next());
- transformer.transform(source, result);
- }
- System.out.println(result.getWriter().toString());
- return result.getWriter().toString();
- } catch (IllegalArgumentException | TransformerFactoryConfigurationError
- | TransformerException e) {
- throw new RuntimeException("Unable to serialize xml element(s) " + xmlIterable.toString(),
- e);
- }
- }
-
- private static Iterable<Element> serialize(DataSchemaNode schemaNode, NormalizedNode normalizedNode){
- if(schemaNode instanceof ContainerSchemaNode){ //1
- return serializerFactory
- .getContainerNodeSerializer()
- .serialize((ContainerSchemaNode) schemaNode,
- (ContainerNode) normalizedNode);
- } else if(schemaNode instanceof ChoiceNode){ //2
- return serializerFactory
- .getChoiceNodeSerializer()
- .serialize((ChoiceNode) schemaNode,
- (org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode) normalizedNode);
- } else if(schemaNode instanceof LeafSchemaNode){ //3
- return serializerFactory
- .getLeafNodeSerializer()
- .serialize((LeafSchemaNode) schemaNode, (LeafNode) normalizedNode);
- } else if(schemaNode instanceof ListSchemaNode){ //4
- return serializerFactory
- .getMapNodeSerializer()
- .serialize((ListSchemaNode) schemaNode, (MapNode) normalizedNode);
- } else if(schemaNode instanceof LeafListSchemaNode){ //5
- return serializerFactory
- .getLeafSetNodeSerializer()
- .serialize((LeafListSchemaNode) schemaNode, (LeafSetNode) normalizedNode);
- } else if(schemaNode instanceof AugmentationSchema){//6
- return serializerFactory
- .getAugmentationNodeSerializer()
- .serialize((AugmentationSchema) schemaNode, (AugmentationNode) normalizedNode);
- } else if(schemaNode instanceof ListSchemaNode && normalizedNode instanceof LeafSetEntryNode){ //7
- return serializerFactory
- .getLeafSetEntryNodeSerializer()
- .serialize((LeafListSchemaNode) schemaNode, (LeafSetEntryNode) normalizedNode);
- } else if(schemaNode instanceof ListSchemaNode){ //8
- return serializerFactory
- .getMapEntryNodeSerializer()
- .serialize((ListSchemaNode) schemaNode, (MapEntryNode) normalizedNode);
- }
-
-
-
- throw new UnsupportedOperationException(schemaNode.getClass().toString());
- }
-
- private static NormalizedNode parse(Document doc, DataSchemaNode schemaNode){
- if(schemaNode instanceof ContainerSchemaNode){
- return parserFactory
- .getContainerNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (ContainerSchemaNode) schemaNode);
-
- } else if(schemaNode instanceof ChoiceNode){
- return parserFactory
- .getChoiceNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (ChoiceNode) schemaNode);
- } else if(schemaNode instanceof LeafNode){
- return parserFactory
- .getLeafNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (LeafSchemaNode) schemaNode);
- } else if(schemaNode instanceof ListSchemaNode){
- return parserFactory
- .getMapNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (ListSchemaNode) schemaNode);
- } else if(schemaNode instanceof LeafListSchemaNode){
- return parserFactory
- .getLeafSetNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (LeafListSchemaNode) schemaNode);
- } else if(schemaNode instanceof AugmentationSchema){
- return parserFactory
- .getAugmentationNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (AugmentationSchema) schemaNode);
- } else if(schemaNode instanceof ListSchemaNode){
- return parserFactory
- .getMapEntryNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- (ListSchemaNode) schemaNode);
-
- }
-
- throw new UnsupportedOperationException(schemaNode.getClass().toString());
- }
-
-
- /**
- * Helps in generation of NormalizedNodeXml message for the supplied NormalizedNode
- *
- * @param sc --SchemaContext
- * @param normalizedNode -- Normalized Node to be encoded
- * @return SimpleNormalizedNodeMessage.NormalizedNodeXml
- */
- public static SimpleNormalizedNodeMessage.NormalizedNodeXml encode(
- SchemaContext sc, NormalizedNode<?, ?> normalizedNode) {
-
- Preconditions.checkArgument(sc != null, "Schema context found null");
-
- Preconditions.checkArgument(normalizedNode != null,
- "normalized node found null");
-
- DataSchemaNode schemaNode = getSchemaNode(sc,
- normalizedNode.getIdentifier()
- .getNodeType()
- );
-
- Preconditions.checkState(schemaNode != null,
- "Couldn't find schema node for " + normalizedNode.getIdentifier());
-
- Iterable<Element> els = serialize(schemaNode, normalizedNode);
-
- String xmlString = toString(els.iterator().next());
- SimpleNormalizedNodeMessage.NormalizedNodeXml.Builder builder =
- SimpleNormalizedNodeMessage.NormalizedNodeXml.newBuilder();
- builder.setXmlString(xmlString);
- builder
- .setNodeIdentifier(normalizedNode.getIdentifier()
- .getNodeType().toString());
- return builder.build();
-
- }
-
- /**
- * Utilizes the SimpleNormalizedNodeMessage.NormalizedNodeXml to convert into NormalizedNode
- *
- * @param sc -- schema context
- * @param normalizedNodeXml -- containing the normalized Node XML
- * @return NormalizedNode return
- * @throws Exception
- */
-
- public static NormalizedNode decode(SchemaContext sc,
- SimpleNormalizedNodeMessage.NormalizedNodeXml normalizedNodeXml)
- throws Exception {
-
- Preconditions
- .checkArgument(sc != null, "schema context seems to be null");
-
- Preconditions.checkArgument(normalizedNodeXml != null,
- "SimpleNormalizedNodeMessage.NormalizedNodeXml found to be null");
- QName qname = QName.create(normalizedNodeXml.getNodeIdentifier());
-
- // here we will try to get back the NormalizedNode
- DataSchemaNode schemaNode = getSchemaNode(sc, qname);
-
- // now we need to read the XML
- Document doc =
- factory.newDocumentBuilder().parse(
- new ByteArrayInputStream(
- normalizedNodeXml.getXmlString().getBytes(
- "utf-8"))
- );
-
- doc.getDocumentElement().normalize();
-
-
- return parse(doc, schemaNode);
- }
-
-
-
-}
import org.opendaylight.controller.cluster.datastore.node.utils.NodeIdentifierFactory;
import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.PathArgumentSerializer;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameDeSerializationContext;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameDeSerializationContextImpl;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameSerializationContext;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameSerializationContextImpl;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
protected static final Logger logger = LoggerFactory
.getLogger(InstanceIdentifierUtils.class);
- @Deprecated
- public static YangInstanceIdentifier from(String path) {
- String[] ids = path.split("/");
-
- List<YangInstanceIdentifier.PathArgument> pathArguments =
- new ArrayList<>();
- for (String nodeId : ids) {
- if (!"".equals(nodeId)) {
- pathArguments
- .add(NodeIdentifierFactory.getArgument(nodeId));
- }
- }
- final YangInstanceIdentifier instanceIdentifier =
- YangInstanceIdentifier.create(pathArguments);
- return instanceIdentifier;
- }
-
-
/**
* Convert an MD-SAL YangInstanceIdentifier into a protocol buffer version of it
*
* @param path an MD-SAL YangInstanceIdentifier
* @return a protocol buffer version of the MD-SAL YangInstanceIdentifier
*/
- public static NormalizedNodeMessages.InstanceIdentifier toSerializable(YangInstanceIdentifier path){
+ public static NormalizedNodeMessages.InstanceIdentifier toSerializable(YangInstanceIdentifier path) {
+ QNameSerializationContextImpl context = new QNameSerializationContextImpl();
+ Builder builder = toSerializableBuilder(path, context);
+ return builder.addAllCode(context.getCodes()).build();
+ }
+
+ public static NormalizedNodeMessages.InstanceIdentifier toSerializable(
+ YangInstanceIdentifier path, QNameSerializationContext context) {
+ return toSerializableBuilder(path, context).build();
+ }
+
+ private static NormalizedNodeMessages.InstanceIdentifier.Builder toSerializableBuilder(
+ YangInstanceIdentifier path, QNameSerializationContext context) {
NormalizedNodeMessages.InstanceIdentifier.Builder builder =
NormalizedNodeMessages.InstanceIdentifier.newBuilder();
try {
-
- for (org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument pathArgument : path
- .getPathArguments()) {
-
- String nodeType = "";
- if(!(pathArgument instanceof YangInstanceIdentifier.AugmentationIdentifier)){
- nodeType = pathArgument.getNodeType().toString();
+ for (org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.
+ PathArgument pathArgument : path.getPathArguments()) {
+ NormalizedNodeMessages.PathArgument serializablePathArgument;
+ if(context == null) {
+ String nodeType = "";
+ if(!(pathArgument instanceof YangInstanceIdentifier.AugmentationIdentifier)){
+ nodeType = pathArgument.getNodeType().toString();
+ }
+
+ serializablePathArgument = NormalizedNodeMessages.PathArgument.newBuilder()
+ .setValue(pathArgument.toString())
+ .setType(pathArgument.getClass().getSimpleName())
+ .setNodeType(NormalizedNodeMessages.QName.newBuilder().setValue(nodeType))
+ .addAllAttributes(getPathArgumentAttributes(pathArgument)).build();
+ } else {
+ serializablePathArgument = PathArgumentSerializer.serialize(context, pathArgument);
}
- NormalizedNodeMessages.PathArgument serializablePathArgument =
- NormalizedNodeMessages.PathArgument.newBuilder()
- .setValue(pathArgument.toString())
- .setType(pathArgument.getClass().getSimpleName())
- .setNodeType(NormalizedNodeMessages.QName.newBuilder()
- .setValue(nodeType))
- .addAllAttributes(getPathArgumentAttributes(
- pathArgument))
- .build();
-
builder.addArguments(serializablePathArgument);
}
-
} catch(Exception e){
logger.error("An exception occurred", e);
}
- return builder.build();
+
+ return builder;
}
* @param path a protocol buffer version of the MD-SAL YangInstanceIdentifier
* @return an MD-SAL YangInstanceIdentifier
*/
- public static YangInstanceIdentifier fromSerializable(NormalizedNodeMessages.InstanceIdentifier path){
-
- List<YangInstanceIdentifier.PathArgument> pathArguments =
- new ArrayList<>();
+ public static YangInstanceIdentifier fromSerializable(NormalizedNodeMessages.InstanceIdentifier path) {
+ return fromSerializable(path, new QNameDeSerializationContextImpl(path.getCodeList()));
+ }
- for(NormalizedNodeMessages.PathArgument pathArgument : path.getArgumentsList()){
+ public static YangInstanceIdentifier fromSerializable(NormalizedNodeMessages.InstanceIdentifier path,
+ QNameDeSerializationContext context) {
- pathArguments
- .add(parsePathArgument(pathArgument));
+ List<YangInstanceIdentifier.PathArgument> pathArguments = new ArrayList<>();
+ for(NormalizedNodeMessages.PathArgument pathArgument : path.getArgumentsList()) {
+ if(context == null || pathArgument.hasType()) {
+ pathArguments.add(parsePathArgument(pathArgument));
+ } else {
+ pathArguments.add(PathArgumentSerializer.deSerialize(context, pathArgument));
+ }
}
- final YangInstanceIdentifier instanceIdentifier = YangInstanceIdentifier.create(pathArguments);
-
- return instanceIdentifier;
+ return YangInstanceIdentifier.create(pathArguments);
}
/**
* @param pathArgument protocol buffer PathArgument
* @return MD-SAL PathArgument
*/
- private static YangInstanceIdentifier.PathArgument parsePathArgument(NormalizedNodeMessages.PathArgument pathArgument) {
+ private static YangInstanceIdentifier.PathArgument parsePathArgument(
+ NormalizedNodeMessages.PathArgument pathArgument) {
if (YangInstanceIdentifier.NodeWithValue.class.getSimpleName().equals(pathArgument.getType())) {
YangInstanceIdentifier.NodeWithValue nodeWithValue =
}
public interface CanCommitTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // required string transactionId = 1;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ boolean hasTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ java.lang.String getTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getTransactionIdBytes();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CanCommitTransaction}
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
}
break;
}
+ case 10: {
+ bitField0_ |= 0x00000001;
+ transactionId_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return PARSER;
}
+ private int bitField0_;
+ // required string transactionId = 1;
+ public static final int TRANSACTIONID_FIELD_NUMBER = 1;
+ private java.lang.Object transactionId_;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ transactionId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
+ transactionId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (!hasTransactionId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTransactionIdBytes());
+ }
getUnknownFields().writeTo(output);
}
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTransactionIdBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
public Builder clear() {
super.clear();
+ transactionId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CanCommitTransaction buildPartial() {
org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CanCommitTransaction result = new org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CanCommitTransaction(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.transactionId_ = transactionId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CanCommitTransaction other) {
if (other == org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CanCommitTransaction.getDefaultInstance()) return this;
+ if (other.hasTransactionId()) {
+ bitField0_ |= 0x00000001;
+ transactionId_ = other.transactionId_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
+ if (!hasTransactionId()) {
+
+ return false;
+ }
return true;
}
}
return this;
}
+ private int bitField0_;
+
+ // required string transactionId = 1;
+ private java.lang.Object transactionId_ = "";
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ transactionId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder clearTransactionId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ transactionId_ = getDefaultInstance().getTransactionId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CanCommitTransaction)
}
public interface AbortTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // required string transactionId = 1;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ boolean hasTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ java.lang.String getTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getTransactionIdBytes();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.AbortTransaction}
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
}
break;
}
+ case 10: {
+ bitField0_ |= 0x00000001;
+ transactionId_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return PARSER;
}
+ private int bitField0_;
+ // required string transactionId = 1;
+ public static final int TRANSACTIONID_FIELD_NUMBER = 1;
+ private java.lang.Object transactionId_;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ transactionId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
+ transactionId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (!hasTransactionId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTransactionIdBytes());
+ }
getUnknownFields().writeTo(output);
}
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTransactionIdBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
public Builder clear() {
super.clear();
+ transactionId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.AbortTransaction buildPartial() {
org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.AbortTransaction result = new org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.AbortTransaction(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.transactionId_ = transactionId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.AbortTransaction other) {
if (other == org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.AbortTransaction.getDefaultInstance()) return this;
+ if (other.hasTransactionId()) {
+ bitField0_ |= 0x00000001;
+ transactionId_ = other.transactionId_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
+ if (!hasTransactionId()) {
+
+ return false;
+ }
return true;
}
}
return this;
}
+ private int bitField0_;
+
+ // required string transactionId = 1;
+ private java.lang.Object transactionId_ = "";
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ transactionId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder clearTransactionId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ transactionId_ = getDefaultInstance().getTransactionId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.AbortTransaction)
}
public interface CommitTransactionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
+
+ // required string transactionId = 1;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ boolean hasTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ java.lang.String getTransactionId();
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ com.google.protobuf.ByteString
+ getTransactionIdBytes();
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.CommitTransaction}
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
+ int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
}
break;
}
+ case 10: {
+ bitField0_ |= 0x00000001;
+ transactionId_ = input.readBytes();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
return PARSER;
}
+ private int bitField0_;
+ // required string transactionId = 1;
+ public static final int TRANSACTIONID_FIELD_NUMBER = 1;
+ private java.lang.Object transactionId_;
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ transactionId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private void initFields() {
+ transactionId_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
+ if (!hasTransactionId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTransactionIdBytes());
+ }
getUnknownFields().writeTo(output);
}
if (size != -1) return size;
size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTransactionIdBytes());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
public Builder clear() {
super.clear();
+ transactionId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CommitTransaction buildPartial() {
org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CommitTransaction result = new org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CommitTransaction(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.transactionId_ = transactionId_;
+ result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CommitTransaction other) {
if (other == org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages.CommitTransaction.getDefaultInstance()) return this;
+ if (other.hasTransactionId()) {
+ bitField0_ |= 0x00000001;
+ transactionId_ = other.transactionId_;
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
+ if (!hasTransactionId()) {
+
+ return false;
+ }
return true;
}
}
return this;
}
+ private int bitField0_;
+
+ // required string transactionId = 1;
+ private java.lang.Object transactionId_ = "";
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public boolean hasTransactionId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public java.lang.String getTransactionId() {
+ java.lang.Object ref = transactionId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ transactionId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public com.google.protobuf.ByteString
+ getTransactionIdBytes() {
+ java.lang.Object ref = transactionId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ transactionId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder clearTransactionId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ transactionId_ = getDefaultInstance().getTransactionId();
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required string transactionId = 1;</code>
+ */
+ public Builder setTransactionIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ transactionId_ = value;
+ onChanged();
+ return this;
+ }
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CommitTransaction)
}
static {
java.lang.String[] descriptorData = {
"\n\014Cohort.proto\022!org.opendaylight.control" +
- "ler.mdsal\"\026\n\024CanCommitTransaction\".\n\031Can" +
- "CommitTransactionReply\022\021\n\tcanCommit\030\001 \002(" +
- "\010\"\022\n\020AbortTransaction\"\027\n\025AbortTransactio" +
- "nReply\"\023\n\021CommitTransaction\"\030\n\026CommitTra" +
- "nsactionReply\"\026\n\024PreCommitTransaction\"\033\n" +
- "\031PreCommitTransactionReplyBZ\n8org.openda" +
- "ylight.controller.protobuff.messages.coh" +
- "ort3pcB\036ThreePhaseCommitCohortMessages"
+ "ler.mdsal\"-\n\024CanCommitTransaction\022\025\n\rtra" +
+ "nsactionId\030\001 \002(\t\".\n\031CanCommitTransaction" +
+ "Reply\022\021\n\tcanCommit\030\001 \002(\010\")\n\020AbortTransac" +
+ "tion\022\025\n\rtransactionId\030\001 \002(\t\"\027\n\025AbortTran" +
+ "sactionReply\"*\n\021CommitTransaction\022\025\n\rtra" +
+ "nsactionId\030\001 \002(\t\"\030\n\026CommitTransactionRep" +
+ "ly\"\026\n\024PreCommitTransaction\"\033\n\031PreCommitT" +
+ "ransactionReplyBZ\n8org.opendaylight.cont" +
+ "roller.protobuff.messages.cohort3pcB\036Thr",
+ "eePhaseCommitCohortMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_CanCommitTransaction_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CanCommitTransaction_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "TransactionId", });
internal_static_org_opendaylight_controller_mdsal_CanCommitTransactionReply_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_org_opendaylight_controller_mdsal_CanCommitTransactionReply_fieldAccessorTable = new
internal_static_org_opendaylight_controller_mdsal_AbortTransaction_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_AbortTransaction_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "TransactionId", });
internal_static_org_opendaylight_controller_mdsal_AbortTransactionReply_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_org_opendaylight_controller_mdsal_AbortTransactionReply_fieldAccessorTable = new
internal_static_org_opendaylight_controller_mdsal_CommitTransaction_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CommitTransaction_descriptor,
- new java.lang.String[] { });
+ new java.lang.String[] { "TransactionId", });
internal_static_org_opendaylight_controller_mdsal_CommitTransactionReply_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_org_opendaylight_controller_mdsal_CommitTransactionReply_fieldAccessorTable = new
*/
org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.PathArgumentOrBuilder getArgumentsOrBuilder(
int index);
+
+ // repeated string code = 2;
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ java.util.List<java.lang.String>
+ getCodeList();
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ int getCodeCount();
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ java.lang.String getCode(int index);
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ com.google.protobuf.ByteString
+ getCodeBytes(int index);
}
/**
* Protobuf type {@code org.opendaylight.controller.mdsal.InstanceIdentifier}
arguments_.add(input.readMessage(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.PathArgument.PARSER, extensionRegistry));
break;
}
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ code_ = new com.google.protobuf.LazyStringArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ code_.add(input.readBytes());
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
arguments_ = java.util.Collections.unmodifiableList(arguments_);
}
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ code_ = new com.google.protobuf.UnmodifiableLazyStringList(code_);
+ }
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
return arguments_.get(index);
}
+ // repeated string code = 2;
+ public static final int CODE_FIELD_NUMBER = 2;
+ private com.google.protobuf.LazyStringList code_;
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public java.util.List<java.lang.String>
+ getCodeList() {
+ return code_;
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public int getCodeCount() {
+ return code_.size();
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public java.lang.String getCode(int index) {
+ return code_.get(index);
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getCodeBytes(int index) {
+ return code_.getByteString(index);
+ }
+
private void initFields() {
arguments_ = java.util.Collections.emptyList();
+ code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
for (int i = 0; i < arguments_.size(); i++) {
output.writeMessage(1, arguments_.get(i));
}
+ for (int i = 0; i < code_.size(); i++) {
+ output.writeBytes(2, code_.getByteString(i));
+ }
getUnknownFields().writeTo(output);
}
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, arguments_.get(i));
}
+ {
+ int dataSize = 0;
+ for (int i = 0; i < code_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(code_.getByteString(i));
+ }
+ size += dataSize;
+ size += 1 * getCodeList().size();
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
} else {
argumentsBuilder_.clear();
}
+ code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
} else {
result.arguments_ = argumentsBuilder_.build();
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ code_ = new com.google.protobuf.UnmodifiableLazyStringList(
+ code_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.code_ = code_;
onBuilt();
return result;
}
}
}
}
+ if (!other.code_.isEmpty()) {
+ if (code_.isEmpty()) {
+ code_ = other.code_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureCodeIsMutable();
+ code_.addAll(other.code_);
+ }
+ onChanged();
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
return argumentsBuilder_;
}
+ // repeated string code = 2;
+ private com.google.protobuf.LazyStringList code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ private void ensureCodeIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ code_ = new com.google.protobuf.LazyStringArrayList(code_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public java.util.List<java.lang.String>
+ getCodeList() {
+ return java.util.Collections.unmodifiableList(code_);
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public int getCodeCount() {
+ return code_.size();
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public java.lang.String getCode(int index) {
+ return code_.get(index);
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public com.google.protobuf.ByteString
+ getCodeBytes(int index) {
+ return code_.getByteString(index);
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public Builder setCode(
+ int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCodeIsMutable();
+ code_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public Builder addCode(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCodeIsMutable();
+ code_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public Builder addAllCode(
+ java.lang.Iterable<java.lang.String> values) {
+ ensureCodeIsMutable();
+ super.addAll(values, code_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public Builder clearCode() {
+ code_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated string code = 2;</code>
+ *
+ * <pre>
+ * A list of string codes which can be used for any repeated strings in the path args. This is
+ * optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ * that contains the codes.
+ * </pre>
+ */
+ public Builder addCodeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCodeIsMutable();
+ code_.add(value);
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.InstanceIdentifier)
}
"controller.mdsal.PathArgumentAttribute\022@" +
"\n\nattributes\030\005 \003(\0132,.org.opendaylight.co" +
"ntroller.mdsal.Attribute\022\017\n\007intType\030\006 \001(" +
- "\005\"X\n\022InstanceIdentifier\022B\n\targuments\030\001 \003" +
+ "\005\"f\n\022InstanceIdentifier\022B\n\targuments\030\001 \003" +
"(\0132/.org.opendaylight.controller.mdsal.P" +
- "athArgument\"\245\003\n\004Node\022\014\n\004path\030\001 \001(\t\022\014\n\004ty" +
- "pe\030\002 \001(\t\022E\n\014pathArgument\030\003 \001(\0132/.org.ope" +
- "ndaylight.controller.mdsal.PathArgument\022" +
- "\017\n\007intType\030\004 \001(\005\022@\n\nattributes\030\005 \003(\0132,.o",
- "rg.opendaylight.controller.mdsal.Attribu" +
- "te\0226\n\005child\030\006 \003(\0132\'.org.opendaylight.con" +
- "troller.mdsal.Node\022\r\n\005value\030\007 \001(\t\022\021\n\tval" +
- "ueType\030\010 \001(\t\022\024\n\014intValueType\030\t \001(\005\022V\n\027in" +
- "stanceIdentifierValue\030\n \001(\01325.org.openda" +
- "ylight.controller.mdsal.InstanceIdentifi" +
- "er\022\021\n\tbitsValue\030\013 \003(\t\022\014\n\004code\030\014 \003(\t\"`\n\tC" +
- "ontainer\022\022\n\nparentPath\030\001 \002(\t\022?\n\016normaliz" +
- "edNode\030\002 \001(\0132\'.org.opendaylight.controll" +
- "er.mdsal.Node\"\246\001\n\014NodeMapEntry\022U\n\026instan",
- "ceIdentifierPath\030\001 \002(\01325.org.opendayligh" +
- "t.controller.mdsal.InstanceIdentifier\022?\n" +
- "\016normalizedNode\030\002 \001(\0132\'.org.opendaylight" +
- ".controller.mdsal.Node\"N\n\007NodeMap\022C\n\nmap" +
- "Entries\030\001 \003(\0132/.org.opendaylight.control" +
- "ler.mdsal.NodeMapEntryBO\n5org.opendaylig" +
- "ht.controller.protobuff.messages.commonB" +
- "\026NormalizedNodeMessages"
+ "athArgument\022\014\n\004code\030\002 \003(\t\"\245\003\n\004Node\022\014\n\004pa" +
+ "th\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022E\n\014pathArgument\030\003" +
+ " \001(\0132/.org.opendaylight.controller.mdsal" +
+ ".PathArgument\022\017\n\007intType\030\004 \001(\005\022@\n\nattrib",
+ "utes\030\005 \003(\0132,.org.opendaylight.controller" +
+ ".mdsal.Attribute\0226\n\005child\030\006 \003(\0132\'.org.op" +
+ "endaylight.controller.mdsal.Node\022\r\n\005valu" +
+ "e\030\007 \001(\t\022\021\n\tvalueType\030\010 \001(\t\022\024\n\014intValueTy" +
+ "pe\030\t \001(\005\022V\n\027instanceIdentifierValue\030\n \001(" +
+ "\01325.org.opendaylight.controller.mdsal.In" +
+ "stanceIdentifier\022\021\n\tbitsValue\030\013 \003(\t\022\014\n\004c" +
+ "ode\030\014 \003(\t\"`\n\tContainer\022\022\n\nparentPath\030\001 \002" +
+ "(\t\022?\n\016normalizedNode\030\002 \001(\0132\'.org.openday" +
+ "light.controller.mdsal.Node\"\246\001\n\014NodeMapE",
+ "ntry\022U\n\026instanceIdentifierPath\030\001 \002(\01325.o" +
+ "rg.opendaylight.controller.mdsal.Instanc" +
+ "eIdentifier\022?\n\016normalizedNode\030\002 \001(\0132\'.or" +
+ "g.opendaylight.controller.mdsal.Node\"N\n\007" +
+ "NodeMap\022C\n\nmapEntries\030\001 \003(\0132/.org.openda" +
+ "ylight.controller.mdsal.NodeMapEntryBO\n5" +
+ "org.opendaylight.controller.protobuff.me" +
+ "ssages.commonB\026NormalizedNodeMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
internal_static_org_opendaylight_controller_mdsal_InstanceIdentifier_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_InstanceIdentifier_descriptor,
- new java.lang.String[] { "Arguments", });
+ new java.lang.String[] { "Arguments", "Code", });
internal_static_org_opendaylight_controller_mdsal_Node_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_org_opendaylight_controller_mdsal_Node_fieldAccessorTable = new
// @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.CloseTransactionChainReply)
}
- public interface CreateTransactionChainOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
- }
- /**
- * Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransactionChain}
- */
- public static final class CreateTransactionChain extends
- com.google.protobuf.GeneratedMessage
- implements CreateTransactionChainOrBuilder {
- // Use CreateTransactionChain.newBuilder() to construct.
- private CreateTransactionChain(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private CreateTransactionChain(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final CreateTransactionChain defaultInstance;
- public static CreateTransactionChain getDefaultInstance() {
- return defaultInstance;
- }
-
- public CreateTransactionChain getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private CreateTransactionChain(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.Builder.class);
- }
-
- public static com.google.protobuf.Parser<CreateTransactionChain> PARSER =
- new com.google.protobuf.AbstractParser<CreateTransactionChain>() {
- public CreateTransactionChain parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new CreateTransactionChain(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<CreateTransactionChain> getParserForType() {
- return PARSER;
- }
-
- private void initFields() {
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransactionChain}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.Builder.class);
- }
-
- // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor;
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain getDefaultInstanceForType() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.getDefaultInstance();
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain build() {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain buildPartial() {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain(this);
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain) {
- return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain other) {
- if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain.getDefaultInstance()) return this;
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChain) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransactionChain)
- }
-
- static {
- defaultInstance = new CreateTransactionChain(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.CreateTransactionChain)
- }
-
- public interface CreateTransactionChainReplyOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string transactionChainPath = 1;
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- boolean hasTransactionChainPath();
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- java.lang.String getTransactionChainPath();
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- com.google.protobuf.ByteString
- getTransactionChainPathBytes();
- }
- /**
- * Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransactionChainReply}
- */
- public static final class CreateTransactionChainReply extends
- com.google.protobuf.GeneratedMessage
- implements CreateTransactionChainReplyOrBuilder {
- // Use CreateTransactionChainReply.newBuilder() to construct.
- private CreateTransactionChainReply(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private CreateTransactionChainReply(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final CreateTransactionChainReply defaultInstance;
- public static CreateTransactionChainReply getDefaultInstance() {
- return defaultInstance;
- }
-
- public CreateTransactionChainReply getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private CreateTransactionChainReply(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- transactionChainPath_ = input.readBytes();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.Builder.class);
- }
-
- public static com.google.protobuf.Parser<CreateTransactionChainReply> PARSER =
- new com.google.protobuf.AbstractParser<CreateTransactionChainReply>() {
- public CreateTransactionChainReply parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new CreateTransactionChainReply(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<CreateTransactionChainReply> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string transactionChainPath = 1;
- public static final int TRANSACTIONCHAINPATH_FIELD_NUMBER = 1;
- private java.lang.Object transactionChainPath_;
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public boolean hasTransactionChainPath() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public java.lang.String getTransactionChainPath() {
- java.lang.Object ref = transactionChainPath_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- transactionChainPath_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public com.google.protobuf.ByteString
- getTransactionChainPathBytes() {
- java.lang.Object ref = transactionChainPath_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- transactionChainPath_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- private void initFields() {
- transactionChainPath_ = "";
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasTransactionChainPath()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getTransactionChainPathBytes());
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getTransactionChainPathBytes());
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransactionChainReply}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReplyOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.Builder.class);
- }
-
- // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- transactionChainPath_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor;
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply getDefaultInstanceForType() {
- return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.getDefaultInstance();
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply build() {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply buildPartial() {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.transactionChainPath_ = transactionChainPath_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply) {
- return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply other) {
- if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply.getDefaultInstance()) return this;
- if (other.hasTransactionChainPath()) {
- bitField0_ |= 0x00000001;
- transactionChainPath_ = other.transactionChainPath_;
- onChanged();
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasTransactionChainPath()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CreateTransactionChainReply) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string transactionChainPath = 1;
- private java.lang.Object transactionChainPath_ = "";
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public boolean hasTransactionChainPath() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public java.lang.String getTransactionChainPath() {
- java.lang.Object ref = transactionChainPath_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- transactionChainPath_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public com.google.protobuf.ByteString
- getTransactionChainPathBytes() {
- java.lang.Object ref = transactionChainPath_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- transactionChainPath_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public Builder setTransactionChainPath(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- transactionChainPath_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public Builder clearTransactionChainPath() {
- bitField0_ = (bitField0_ & ~0x00000001);
- transactionChainPath_ = getDefaultInstance().getTransactionChainPath();
- onChanged();
- return this;
- }
- /**
- * <code>required string transactionChainPath = 1;</code>
- */
- public Builder setTransactionChainPathBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- transactionChainPath_ = value;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransactionChainReply)
- }
-
- static {
- defaultInstance = new CreateTransactionChainReply(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.CreateTransactionChainReply)
- }
-
private static com.google.protobuf.Descriptors.Descriptor
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChain_descriptor;
private static
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
"\n\033ShardTransactionChain.proto\022!org.opend" +
"aylight.controller.mdsal\"3\n\025CloseTransac" +
"tionChain\022\032\n\022transactionChainId\030\001 \001(\t\"\034\n" +
- "\032CloseTransactionChainReply\"\030\n\026CreateTra" +
- "nsactionChain\";\n\033CreateTransactionChainR" +
- "eply\022\034\n\024transactionChainPath\030\001 \002(\tB[\n:or" +
- "g.opendaylight.controller.protobuff.mess" +
- "ages.transactionB\035ShardTransactionChainM" +
- "essages"
+ "\032CloseTransactionChainReplyB[\n:org.opend" +
+ "aylight.controller.protobuff.messages.tr" +
+ "ansactionB\035ShardTransactionChainMessages"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_descriptor,
new java.lang.String[] { });
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor =
- getDescriptor().getMessageTypes().get(2);
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChain_descriptor,
- new java.lang.String[] { });
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor =
- getDescriptor().getMessageTypes().get(3);
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_org_opendaylight_controller_mdsal_CreateTransactionChainReply_descriptor,
- new java.lang.String[] { "TransactionChainPath", });
return null;
}
};
message CanCommitTransaction{
-
+ required string transactionId = 1;
}
message CanCommitTransactionReply{
}
message AbortTransaction{
-
+ required string transactionId = 1;
}
message AbortTransactionReply {
}
message CommitTransaction{
-
+ required string transactionId = 1;
}
message CommitTransactionReply{
message InstanceIdentifier {
repeated PathArgument arguments=1;
+
+ // A list of string codes which can be used for any repeated strings in the path args. This is
+ // optional - an InstanceIdentifier may be encoded as part of another message, eg NormalizedNode,
+ // that contains the codes.
+ repeated string code = 2;
}
message Node{
new NormalizedNodeToNodeCodec(schemaContext);
long start = System.currentTimeMillis();
Container container =
- codec.encode(instanceIdentifierFromString(id), output);
+ codec.encode(output);
long end = System.currentTimeMillis();
System.out.println("Timetaken to encode :"+(end-start));
assertNotNull(container);
- assertEquals(id, container.getParentPath() + "/"
- + NormalizedNodeSerializer.deSerialize(container.getNormalizedNode(),
- container.getNormalizedNode().getPathArgument()));
// Decode the normalized node from the ProtocolBuffer form
// first get the node representation of normalized node
start = System.currentTimeMillis();
NormalizedNode<?, ?> normalizedNode =
- codec.decode(instanceIdentifierFromString(id), node);
+ codec.decode(node);
end = System.currentTimeMillis();
System.out.println("Timetaken to decode :"+(end-start));
new NormalizedNodeToNodeCodec(schemaContext);
Container container =
- normalizedNodeToNodeCodec.encode(YangInstanceIdentifier.builder()
- .build(), documentOne);
+ normalizedNodeToNodeCodec.encode(documentOne);
final NormalizedNode<?, ?> decode =
normalizedNodeToNodeCodec
.decode(
- instanceIdentifierFromString("/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test"),
container.getNormalizedNode());
assertNotNull(decode);
// let us ensure that the return decode normalized node encode returns same container
Container containerResult =
- normalizedNodeToNodeCodec.encode(YangInstanceIdentifier.builder()
- .build(), decode);
-
- assertEquals(container.getParentPath(), containerResult.getParentPath());
-
- assertEquals(containerResult.getNormalizedNode().getChildCount(),
- container.getNormalizedNode().getChildCount());
+ normalizedNodeToNodeCodec.encode(decode);
// check first level children are proper
List<Node> childrenResult =
NormalizedNodeToNodeCodec codec =
new NormalizedNodeToNodeCodec(schemaContext);
- Container encode = codec.encode(identifier, uno);
+ Container encode = codec.encode(uno);
System.out.println(encode.getNormalizedNode());
- codec.decode(identifier, encode.getNormalizedNode());
+ codec.decode(encode.getNormalizedNode());
}
}
public class PathUtilsTest {
- @Test
- public void getParentPath(){
- assertEquals("", PathUtils.getParentPath("foobar"));
- assertEquals("", PathUtils.getParentPath("/a"));
- assertEquals("/a", PathUtils.getParentPath("/a/b"));
- assertEquals("/a/b", PathUtils.getParentPath("/a/b/c"));
- assertEquals("/a/b", PathUtils.getParentPath("a/b/c"));
- }
-
@Test
public void toStringNodeIdentifier(){
YangInstanceIdentifier.PathArgument pathArgument = nodeIdentifier();
expectedException.expect(NullPointerException.class);
expectedException.expectMessage("pathArgument should not be null");
- PathArgumentSerializer.serialize(mock(
- NormalizedNodeSerializationContext.class), null);
+ PathArgumentSerializer.serialize(mock(QNameSerializationContext.class), null);
}
expectedException.expect(NullPointerException.class);
expectedException.expectMessage("pathArgument should not be null");
- PathArgumentSerializer.deSerialize(mock(NormalizedNodeDeSerializationContext.class), null);
+ PathArgumentSerializer.deSerialize(mock(QNameDeSerializationContext.class), null);
}
@Test
public void testSerializeNodeIdentifier(){
- NormalizedNodeSerializationContext serializationContext =
- mock(NormalizedNodeSerializationContext.class);
+ QNameSerializationContext serializationContext = mock(QNameSerializationContext.class);
when(serializationContext.addLocalName(anyString())).thenReturn(5);
when(serializationContext.addNamespace(any(URI.class))).thenReturn(10);
@Test
public void testSerializeNodeIdentifierWithValue(){
- NormalizedNodeSerializationContext serializationContext =
- mock(NormalizedNodeSerializationContext.class);
+ QNameSerializationContext serializationContext = mock(QNameSerializationContext.class);
when(serializationContext.addLocalName(anyString())).thenReturn(5);
when(serializationContext.addNamespace(any(URI.class))).thenReturn(10);
@Test
public void testSerializeNodeIdentifierWithPredicates(){
- NormalizedNodeSerializationContext serializationContext =
- mock(NormalizedNodeSerializationContext.class);
-
+ QNameSerializationContext serializationContext = mock(QNameSerializationContext.class);
when(serializationContext.addLocalName("test")).thenReturn(5);
when(serializationContext.addLocalName("child-name")).thenReturn(55);
@Test
public void testSerializeAugmentationIdentifier(){
- NormalizedNodeSerializationContext serializationContext =
- mock(NormalizedNodeSerializationContext.class);
+ QNameSerializationContext serializationContext = mock(QNameSerializationContext.class);
when(serializationContext.addLocalName(anyString())).thenReturn(55);
when(serializationContext.addNamespace(any(URI.class))).thenReturn(66);
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.util.TestModel;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
import java.math.BigDecimal;
import java.math.BigInteger;
import java.util.Set;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
public void testSerializeShort(){
short v1 = 5;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.SHORT_TYPE.ordinal(), builder.getIntValueType());
assertEquals("5", builder.getValue());
- NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+ NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.SHORT_TYPE.ordinal(), builder1.getType());
assertEquals("5", builder.getValue());
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), expected);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), expected);
assertEquals(ValueType.INT_TYPE.ordinal(), builder.getIntValueType());
assertEquals("243", builder.getValue());
- NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+ NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), expected);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), expected);
assertEquals(ValueType.INT_TYPE.ordinal(), builder1.getType());
assertEquals("243", builder1.getValue());
public void testSerializeLong(){
long v1 = 5;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.LONG_TYPE.ordinal(), builder.getIntValueType());
assertEquals("5", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.LONG_TYPE.ordinal(), builder1.getType());
assertEquals("5", builder1.getValue());
public void testSerializeByte(){
byte v1 = 5;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BYTE_TYPE.ordinal(), builder.getIntValueType());
assertEquals("5", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BYTE_TYPE.ordinal(), builder1.getType());
assertEquals("5", builder1.getValue());
@Test
public void testSerializeBits(){
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class),
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class),
ImmutableSet.of("foo", "bar"));
assertEquals(ValueType.BITS_TYPE.ordinal(), builder.getIntValueType());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class),
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class),
ImmutableSet.of("foo", "bar"));
assertEquals(ValueType.BITS_TYPE.ordinal(), builder1.getType());
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Expected value type to be Bits but was :");
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class),
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class),
ImmutableSet.of(1, 2));
}
@Test
public void testSerializeEmptyString(){
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class),"");
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class),"");
assertEquals(ValueType.STRING_TYPE.ordinal(), builder.getIntValueType());
assertEquals("", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class),"");
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class),"");
assertEquals(ValueType.STRING_TYPE.ordinal(), builder1.getType());
assertEquals("", builder1.getValue());
@Test
public void testSerializeString(){
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class),"foo");
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class),"foo");
assertEquals(ValueType.STRING_TYPE.ordinal(), builder.getIntValueType());
assertEquals("foo", builder.getValue());
- NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+ NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class),"foo");
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class),"foo");
assertEquals(ValueType.STRING_TYPE.ordinal(), builder1.getType());
assertEquals("foo", builder1.getValue());
public void testSerializeBoolean(){
boolean v1 = true;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BOOL_TYPE.ordinal(), builder.getIntValueType());
assertEquals("true", builder.getValue());
- NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 =
+ NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BOOL_TYPE.ordinal(), builder1.getType());
assertEquals("true", builder1.getValue());
public void testSerializeQName(){
QName v1 = TestModel.TEST_QNAME;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.QNAME_TYPE.ordinal(), builder.getIntValueType());
assertEquals("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.QNAME_TYPE.ordinal(), builder1.getType());
assertEquals("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test", builder1.getValue());
YangInstanceIdentifier v1 = TestModel.TEST_PATH;
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
-
+ QNameSerializationContext mockContext = mock(QNameSerializationContext.class);
+ ValueSerializer.serialize(builder, mockContext, v1);
assertEquals(ValueType.YANG_IDENTIFIER_TYPE.ordinal(), builder.getIntValueType());
NormalizedNodeMessages.InstanceIdentifier serializedYangInstanceIdentifier =
builder.getInstanceIdentifierValue();
assertEquals(1, serializedYangInstanceIdentifier.getArgumentsCount());
- assertEquals(TestModel.TEST_QNAME.toString(), serializedYangInstanceIdentifier.getArguments(0).getNodeType().getValue());
+ Mockito.verify(mockContext).addLocalName(TestModel.TEST_QNAME.getLocalName());
+ Mockito.verify(mockContext).addNamespace(TestModel.TEST_QNAME.getNamespace());
}
@Test
public void testSerializeBigInteger(){
BigInteger v1 = new BigInteger("1000000000000000000000000");
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BIG_INTEGER_TYPE.ordinal(), builder.getIntValueType());
assertEquals("1000000000000000000000000", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BIG_INTEGER_TYPE.ordinal(), builder1.getType());
assertEquals("1000000000000000000000000", builder1.getValue());
public void testSerializeBigDecimal(){
BigDecimal v1 = new BigDecimal("1000000000000000000000000.51616");
NormalizedNodeMessages.Node.Builder builder = NormalizedNodeMessages.Node.newBuilder();
- ValueSerializer.serialize(builder, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BIG_DECIMAL_TYPE.ordinal(), builder.getIntValueType());
assertEquals("1000000000000000000000000.51616", builder.getValue());
NormalizedNodeMessages.PathArgumentAttribute.Builder builder1 = NormalizedNodeMessages.PathArgumentAttribute.newBuilder();
- ValueSerializer.serialize(builder1, mock(
- NormalizedNodeSerializationContext.class), v1);
+ ValueSerializer.serialize(builder1, mock(QNameSerializationContext.class), v1);
assertEquals(ValueType.BIG_DECIMAL_TYPE.ordinal(), builder1.getType());
assertEquals("1000000000000000000000000.51616", builder1.getValue());
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Short);
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Byte);
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Integer);
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Long);
nodeBuilder.setValue("false");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Boolean);
nodeBuilder.setValue(TestModel.TEST_QNAME.toString());
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof QName);
nodeBuilder.addAllBitsValue(ImmutableList.of("foo", "bar"));
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof Set);
NormalizedNodeMessages.InstanceIdentifier.Builder idBuilder = NormalizedNodeMessages.InstanceIdentifier.newBuilder();
NormalizedNodeMessages.PathArgument.Builder pathBuilder = NormalizedNodeMessages.PathArgument.newBuilder();
- pathBuilder.setValue(TestModel.TEST_QNAME.toString());
pathBuilder.setIntType(PathArgumentType.NODE_IDENTIFIER.ordinal());
idBuilder.addArguments(pathBuilder);
nodeBuilder.setIntValueType(ValueType.YANG_IDENTIFIER_TYPE.ordinal());
nodeBuilder.setInstanceIdentifierValue(idBuilder);
- Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
- nodeBuilder.build());
+ QNameDeSerializationContext mockContext = mock(QNameDeSerializationContext.class);
+ Mockito.doReturn(TestModel.TEST_QNAME.getNamespace().toString()).when(mockContext).
+ getNamespace(Mockito.anyInt());
+ Mockito.doReturn(TestModel.TEST_QNAME.getLocalName()).when(mockContext).
+ getLocalName(Mockito.anyInt());
+ Mockito.doReturn(TestModel.TEST_QNAME.getFormattedRevision()).when(mockContext).
+ getRevision(Mockito.anyInt());
+
+ Object o = ValueSerializer.deSerialize(mockContext, nodeBuilder.build());
assertTrue(o instanceof YangInstanceIdentifier);
assertEquals(TestModel.TEST_PATH, o);
nodeBuilder.setIntValueType(ValueType.STRING_TYPE.ordinal());
nodeBuilder.setValue("25");
- Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ Object o = ValueSerializer.deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof String);
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof BigInteger);
nodeBuilder.setValue("25");
Object o = ValueSerializer
- .deSerialize(mock(NormalizedNodeDeSerializationContext.class),
+ .deSerialize(mock(QNameDeSerializationContext.class),
nodeBuilder.build());
assertTrue(o instanceof BigDecimal);
--- /dev/null
+/*
+ *
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.apache.commons.lang.SerializationUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.util.TestModel;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+public class NormalizedNodeStreamReaderWriterTest {
+
+ final NormalizedNode<?, ?> input = TestModel.createTestContainer();
+
+ @Test
+ public void testNormalizedNodeStreamReaderWriter() throws IOException {
+
+ byte[] byteData = null;
+
+ try(ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(byteArrayOutputStream)) {
+
+ NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
+ normalizedNodeWriter.write(input);
+ byteData = byteArrayOutputStream.toByteArray();
+
+ }
+
+ try(NormalizedNodeInputStreamReader reader = new NormalizedNodeInputStreamReader(
+ new ByteArrayInputStream(byteData))) {
+
+ NormalizedNode<?,?> node = reader.readNormalizedNode();
+ Assert.assertEquals(input, node);
+
+ }
+ }
+
+ @Test
+ public void testWithSerializable() {
+ SampleNormalizedNodeSerializable serializable = new SampleNormalizedNodeSerializable(input);
+ SampleNormalizedNodeSerializable clone = (SampleNormalizedNodeSerializable)SerializationUtils.clone(serializable);
+
+ Assert.assertEquals(input, clone.getInput());
+
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.node.utils.stream;
+
+
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.net.URISyntaxException;
+
+public class SampleNormalizedNodeSerializable implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private NormalizedNode<?, ?> input;
+
+ public SampleNormalizedNodeSerializable(NormalizedNode<?, ?> input) {
+ this.input = input;
+ }
+
+ public NormalizedNode<?, ?> getInput() {
+ return input;
+ }
+
+ private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException, URISyntaxException {
+ NormalizedNodeStreamReader reader = new NormalizedNodeInputStreamReader(stream);
+ this.input = reader.readNormalizedNode();
+ }
+
+ private void writeObject(final ObjectOutputStream stream) throws IOException {
+ NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(stream);
+ NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
+
+ normalizedNodeWriter.write(this.input);
+ }
+
+}
import org.junit.Assert;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameDeSerializationContext;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameDeSerializationContextImpl;
+import org.opendaylight.controller.cluster.datastore.node.utils.serialization.QNameSerializationContextImpl;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
public class InstanceIdentifierUtilsTest {
- private static QName TEST_QNAME =
- QName
- .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test");
- private static QName NODE_WITH_VALUE_QNAME =
- QName
- .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)value");
- private static QName NODE_WITH_PREDICATES_QNAME =
- QName
- .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)pred");
- private static QName NAME_QNAME =
- QName
- .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)name");
-
- @Test
- public void testSerializationOfNodeIdentifier() {
- YangInstanceIdentifier.PathArgument p1 =
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
-
- List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
-
- arguments.add(p1);
+ private static QName TEST_QNAME = QName
+ .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test");
+ private static QName NODE_WITH_VALUE_QNAME = QName
+ .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)value");
+ private static QName NODE_WITH_PREDICATES_QNAME = QName
+ .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)pred");
+ private static QName NAME_QNAME = QName
+ .create("(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)name");
- YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
+ @Test
+ public void testSerializationOfNodeIdentifier() {
+ YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
- NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
- InstanceIdentifierUtils.toSerializable(expected);
+ List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
- YangInstanceIdentifier actual =
- InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
+ arguments.add(p1);
+ YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
- Assert.assertEquals(expected.getLastPathArgument(),
- actual.getLastPathArgument());
+ NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
+ InstanceIdentifierUtils.toSerializable(expected);
+ YangInstanceIdentifier actual = InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
- }
+ Assert.assertEquals(expected.getLastPathArgument(), actual.getLastPathArgument());
+ }
- @Test
- public void testSerializationOfNodeWithValue() {
+ @Test
+ public void testSerializationOfNodeWithValue() {
- withValue((short) 1);
- withValue((long) 2);
- withValue(3);
- withValue(true);
+ withValue((short) 1);
+ withValue((long) 2);
+ withValue(3);
+ withValue(true);
- }
+ }
- private void withValue(Object value) {
- YangInstanceIdentifier.PathArgument p1 =
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
+ private void withValue(Object value) {
+ YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
- YangInstanceIdentifier.PathArgument p2 =
- new YangInstanceIdentifier.NodeWithValue(NODE_WITH_VALUE_QNAME, value);
+ YangInstanceIdentifier.PathArgument p2 =
+ new YangInstanceIdentifier.NodeWithValue(NODE_WITH_VALUE_QNAME, value);
+ List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
- List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
+ arguments.add(p1);
+ arguments.add(p2);
- arguments.add(p1);
- arguments.add(p2);
+ YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
- YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
+ NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
+ InstanceIdentifierUtils.toSerializable(expected);
- NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
- InstanceIdentifierUtils.toSerializable(expected);
+ YangInstanceIdentifier actual = InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
- YangInstanceIdentifier actual =
- InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
+ Assert.assertEquals(expected.getLastPathArgument(), actual.getLastPathArgument());
+ }
+ @Test
+ public void testSerializationOfNodeIdentifierWithPredicates() {
- Assert.assertEquals(expected.getLastPathArgument(),
- actual.getLastPathArgument());
- }
+ withPredicates((short) 1);
+ withPredicates((long) 2);
+ withPredicates(3);
+ withPredicates(true);
+ }
- @Test
- public void testSerializationOfNodeIdentifierWithPredicates() {
+ private void withPredicates(Object value) {
+ YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
- withPredicates((short) 1);
- withPredicates((long) 2);
- withPredicates(3);
- withPredicates(true);
+ YangInstanceIdentifier.PathArgument p2 = new YangInstanceIdentifier.NodeIdentifierWithPredicates(
+ NODE_WITH_PREDICATES_QNAME, NAME_QNAME, value);
- }
+ List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
- private void withPredicates(Object value) {
- YangInstanceIdentifier.PathArgument p1 =
- new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME);
+ arguments.add(p1);
+ arguments.add(p2);
- YangInstanceIdentifier.PathArgument p2 =
- new YangInstanceIdentifier.NodeIdentifierWithPredicates(
- NODE_WITH_PREDICATES_QNAME, NAME_QNAME, value);
+ YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
+ NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
+ InstanceIdentifierUtils.toSerializable(expected);
- List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
+ YangInstanceIdentifier actual = InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
- arguments.add(p1);
- arguments.add(p2);
+ Assert.assertEquals(expected.getLastPathArgument(), actual.getLastPathArgument());
+ }
- YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
+ @Test
+ public void testAugmentationIdentifier() {
+ YangInstanceIdentifier.PathArgument p1 = new YangInstanceIdentifier.AugmentationIdentifier(new HashSet(
+ Arrays.asList(TEST_QNAME)));
- NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
- InstanceIdentifierUtils.toSerializable(expected);
+ List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
- YangInstanceIdentifier actual =
- InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
+ arguments.add(p1);
+ YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
- Assert.assertEquals(expected.getLastPathArgument(),
- actual.getLastPathArgument());
- }
+ NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
+ InstanceIdentifierUtils.toSerializable(expected);
- @Test
- public void testAugmentationIdentifier() {
- YangInstanceIdentifier.PathArgument p1 =
- new YangInstanceIdentifier.AugmentationIdentifier(new HashSet(
- Arrays.asList(TEST_QNAME)));
+ YangInstanceIdentifier actual = InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
- List<YangInstanceIdentifier.PathArgument> arguments = new ArrayList<>();
+ Assert.assertEquals(expected.getLastPathArgument(), actual.getLastPathArgument());
- arguments.add(p1);
+ }
- YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
+ @Test
+ public void testSerializationWithContext() {
+ List<YangInstanceIdentifier.PathArgument> arguments =
+ Arrays.<YangInstanceIdentifier.PathArgument>asList(
+ new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME),
+ new YangInstanceIdentifier.NodeWithValue(NODE_WITH_VALUE_QNAME, 1),
+ new YangInstanceIdentifier.NodeIdentifierWithPredicates(
+ NODE_WITH_PREDICATES_QNAME, NAME_QNAME, 2));
- NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
- InstanceIdentifierUtils.toSerializable(expected);
+ YangInstanceIdentifier expected = YangInstanceIdentifier.create(arguments);
- YangInstanceIdentifier actual =
- InstanceIdentifierUtils.fromSerializable(instanceIdentifier);
+ QNameSerializationContextImpl serializationContext = new QNameSerializationContextImpl();
+ NormalizedNodeMessages.InstanceIdentifier instanceIdentifier =
+ InstanceIdentifierUtils.toSerializable(expected, serializationContext);
- Assert.assertEquals(expected.getLastPathArgument(),
- actual.getLastPathArgument());
+ QNameDeSerializationContext deserializationContext = new QNameDeSerializationContextImpl(
+ serializationContext.getCodes());
- }
+ YangInstanceIdentifier actual = InstanceIdentifierUtils.fromSerializable(
+ instanceIdentifier, deserializationContext);
+ Assert.assertEquals(expected.getLastPathArgument(), actual.getLastPathArgument());
+ }
}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-package org.opendaylight.controller.cluster.datastore.util;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.custommonkey.xmlunit.Diff;
-import org.custommonkey.xmlunit.XMLUnit;
-import org.junit.Test;
-import org.opendaylight.controller.protobuff.messages.common.SimpleNormalizedNodeMessage;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.codec.xml.XmlDocumentUtils;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.serializer.DomFromNormalizedNodeSerializerFactory;
-import org.opendaylight.yangtools.yang.model.api.ChoiceNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.TransformerFactoryConfigurationError;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.StringWriter;
-import java.net.URI;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-
-/**
- * Two of the testcases in the yangtools/yang-data-impl are leveraged (with modification) to create
- * the serialization of NormalizedNode using the ProtocolBuffer
- *
- * @syedbahm
- *
- */
-
-
-public class NormalizedNodeXmlConverterTest {
- private static final Logger logger = LoggerFactory
- .getLogger(NormalizedNodeXmlConverterTest.class);
- public static final String NAMESPACE =
- "urn:opendaylight:params:xml:ns:yang:controller:test";
- private static Date revision;
- private ContainerNode expectedNode;
- private ContainerSchemaNode containerNode;
- private String xmlPath;
-
- static {
- try {
- revision = new SimpleDateFormat("yyyy-MM-dd").parse("2014-03-13");
- } catch (ParseException e) {
- throw new RuntimeException(e);
- }
- }
-
- public static DataSchemaNode getSchemaNode(final SchemaContext context,
- final String moduleName, final String childNodeName) {
- for (Module module : context.getModules()) {
- if (module.getName().equals(moduleName)) {
- DataSchemaNode found =
- findChildNode(module.getChildNodes(), childNodeName);
- Preconditions.checkState(found != null, "Unable to find %s",
- childNodeName);
- return found;
- }
- }
- throw new IllegalStateException("Unable to find child node "
- + childNodeName);
- }
-
- static DataSchemaNode findChildNode(
- final Collection<DataSchemaNode> children, final String name) {
- List<DataNodeContainer> containers = Lists.newArrayList();
-
- for (DataSchemaNode dataSchemaNode : children) {
- if (dataSchemaNode.getQName().getLocalName().equals(name)) {
- return dataSchemaNode;
- }
- if (dataSchemaNode instanceof DataNodeContainer) {
- containers.add((DataNodeContainer) dataSchemaNode);
- } else if (dataSchemaNode instanceof ChoiceNode) {
- containers.addAll(((ChoiceNode) dataSchemaNode).getCases());
- }
- }
-
- for (DataNodeContainer container : containers) {
- DataSchemaNode retVal = findChildNode(container.getChildNodes(), name);
- if (retVal != null) {
- return retVal;
- }
- }
-
- return null;
- }
-
- public static YangInstanceIdentifier.NodeIdentifier getNodeIdentifier(
- final String localName) {
- return new YangInstanceIdentifier.NodeIdentifier(QName.create(
- URI.create(NAMESPACE), revision, localName));
- }
-
- public static YangInstanceIdentifier.AugmentationIdentifier getAugmentIdentifier(
- final String... childNames) {
- Set<QName> qn = Sets.newHashSet();
-
- for (String childName : childNames) {
- qn.add(getNodeIdentifier(childName).getNodeType());
- }
-
- return new YangInstanceIdentifier.AugmentationIdentifier(qn);
- }
-
-
- public static ContainerNode augmentChoiceExpectedNode() {
-
- DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> b =
- Builders.containerBuilder();
- b.withNodeIdentifier(getNodeIdentifier("container"));
-
- b.withChild(Builders
- .choiceBuilder()
- .withNodeIdentifier(getNodeIdentifier("ch2"))
- .withChild(
- Builders.leafBuilder()
- .withNodeIdentifier(getNodeIdentifier("c2Leaf")).withValue("2")
- .build())
- .withChild(
- Builders
- .choiceBuilder()
- .withNodeIdentifier(getNodeIdentifier("c2DeepChoice"))
- .withChild(
- Builders
- .leafBuilder()
- .withNodeIdentifier(
- getNodeIdentifier("c2DeepChoiceCase1Leaf2"))
- .withValue("2").build()).build()).build());
-
- b.withChild(Builders
- .choiceBuilder()
- .withNodeIdentifier(getNodeIdentifier("ch3"))
- .withChild(
- Builders.leafBuilder()
- .withNodeIdentifier(getNodeIdentifier("c3Leaf")).withValue("3")
- .build()).build());
-
- b.withChild(Builders
- .augmentationBuilder()
- .withNodeIdentifier(getAugmentIdentifier("augLeaf"))
- .withChild(
- Builders.leafBuilder()
- .withNodeIdentifier(getNodeIdentifier("augLeaf"))
- .withValue("augment").build()).build());
-
- b.withChild(Builders
- .augmentationBuilder()
- .withNodeIdentifier(getAugmentIdentifier("ch"))
- .withChild(
- Builders
- .choiceBuilder()
- .withNodeIdentifier(getNodeIdentifier("ch"))
- .withChild(
- Builders.leafBuilder()
- .withNodeIdentifier(getNodeIdentifier("c1Leaf"))
- .withValue("1").build())
- .withChild(
- Builders
- .augmentationBuilder()
- .withNodeIdentifier(
- getAugmentIdentifier("c1Leaf_AnotherAugment",
- "deepChoice"))
- .withChild(
- Builders
- .leafBuilder()
- .withNodeIdentifier(
- getNodeIdentifier("c1Leaf_AnotherAugment"))
- .withValue("1").build())
- .withChild(
- Builders
- .choiceBuilder()
- .withNodeIdentifier(
- getNodeIdentifier("deepChoice"))
- .withChild(
- Builders
- .leafBuilder()
- .withNodeIdentifier(
- getNodeIdentifier("deepLeafc1"))
- .withValue("1").build()).build())
- .build()).build()).build());
-
- return b.build();
- }
-
-
-
- public void init(final String yangPath, final String xmlPath,
- final ContainerNode expectedNode) throws Exception {
- SchemaContext schema = parseTestSchema(yangPath);
- this.xmlPath = xmlPath;
- this.containerNode =
- (ContainerSchemaNode) getSchemaNode(schema, "test", "container");
- this.expectedNode = expectedNode;
- }
-
- SchemaContext parseTestSchema(final String yangPath) throws Exception {
-
- YangParserImpl yangParserImpl = new YangParserImpl();
- InputStream stream =
- NormalizedNodeXmlConverterTest.class.getResourceAsStream(yangPath);
- ArrayList<InputStream> al = new ArrayList<InputStream>();
- al.add(stream);
- Set<Module> modules = yangParserImpl.parseYangModelsFromStreams(al);
- return yangParserImpl.resolveSchemaContext(modules);
-
- }
-
-
- @Test
- public void testConversionWithAugmentChoice() throws Exception {
- init("/augment_choice.yang", "/augment_choice.xml",
- augmentChoiceExpectedNode());
- Document doc = loadDocument(xmlPath);
-
- ContainerNode built =
- DomToNormalizedNodeParserFactory
- .getInstance(DomUtils.defaultValueCodecProvider())
- .getContainerNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- containerNode);
-
- if (expectedNode != null) {
- junit.framework.Assert.assertEquals(expectedNode, built);
- }
-
- logger.info("{}", built);
-
- Iterable<Element> els =
- DomFromNormalizedNodeSerializerFactory
- .getInstance(XmlDocumentUtils.getDocument(),
- DomUtils.defaultValueCodecProvider())
- .getContainerNodeSerializer().serialize(containerNode, built);
-
- Element el = els.iterator().next();
-
- XMLUnit.setIgnoreWhitespace(true);
- XMLUnit.setIgnoreComments(true);
-
- System.out.println(toString(doc.getDocumentElement()));
- System.out.println(toString(el));
-
- new Diff(XMLUnit.buildControlDocument(toString(doc.getDocumentElement())),
- XMLUnit.buildTestDocument(toString(el))).similar();
- }
-
- private static ContainerNode listLeafListWithAttributes() {
- DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode> b =
- Builders.containerBuilder();
- b.withNodeIdentifier(getNodeIdentifier("container"));
-
- CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder =
- Builders.mapBuilder().withNodeIdentifier(getNodeIdentifier("list"));
-
- Map<QName, Object> predicates = Maps.newHashMap();
- predicates.put(getNodeIdentifier("uint32InList").getNodeType(), 3L);
-
- DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode> list1Builder =
- Builders.mapEntryBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeIdentifierWithPredicates(
- getNodeIdentifier("list").getNodeType(), predicates));
- NormalizedNodeBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>> uint32InListBuilder =
- Builders.leafBuilder().withNodeIdentifier(
- getNodeIdentifier("uint32InList"));
-
- list1Builder.withChild(uint32InListBuilder.withValue(3L).build());
-
- listBuilder.withChild(list1Builder.build());
- b.withChild(listBuilder.build());
-
- NormalizedNodeBuilder<YangInstanceIdentifier.NodeIdentifier, Object, LeafNode<Object>> booleanBuilder =
- Builders.leafBuilder().withNodeIdentifier(getNodeIdentifier("boolean"));
- booleanBuilder.withValue(false);
- b.withChild(booleanBuilder.build());
-
- ListNodeBuilder<Object, LeafSetEntryNode<Object>> leafListBuilder =
- Builders.leafSetBuilder().withNodeIdentifier(
- getNodeIdentifier("leafList"));
-
- NormalizedNodeBuilder<YangInstanceIdentifier.NodeWithValue, Object, LeafSetEntryNode<Object>> leafList1Builder =
- Builders.leafSetEntryBuilder().withNodeIdentifier(
- new YangInstanceIdentifier.NodeWithValue(getNodeIdentifier(
- "leafList").getNodeType(), "a"));
-
- leafList1Builder.withValue("a");
-
- leafListBuilder.withChild(leafList1Builder.build());
- b.withChild(leafListBuilder.build());
-
- return b.build();
- }
-
-
- @Test
- public void testConversionWithAttributes() throws Exception {
- init("/test.yang", "/simple_xml_with_attributes.xml",
- listLeafListWithAttributes());
- Document doc = loadDocument(xmlPath);
-
- ContainerNode built =
- DomToNormalizedNodeParserFactory
- .getInstance(DomUtils.defaultValueCodecProvider())
- .getContainerNodeParser()
- .parse(Collections.singletonList(doc.getDocumentElement()),
- containerNode);
-
- if (expectedNode != null) {
- junit.framework.Assert.assertEquals(expectedNode, built);
- }
-
- logger.info("{}", built);
-
- Iterable<Element> els =
- DomFromNormalizedNodeSerializerFactory
- .getInstance(XmlDocumentUtils.getDocument(),
- DomUtils.defaultValueCodecProvider())
- .getContainerNodeSerializer().serialize(containerNode, built);
-
- Element el = els.iterator().next();
-
- XMLUnit.setIgnoreWhitespace(true);
- XMLUnit.setIgnoreComments(true);
-
- System.out.println(toString(doc.getDocumentElement()));
- System.out.println(toString(el));
-
- new Diff(XMLUnit.buildControlDocument(toString(doc.getDocumentElement())),
- XMLUnit.buildTestDocument(toString(el))).similar();
- }
-
-
- private Document loadDocument(final String xmlPath) throws Exception {
- InputStream resourceAsStream =
- NormalizedNodeXmlConverterTest.class.getResourceAsStream(xmlPath);
-
- Document currentConfigElement = readXmlToDocument(resourceAsStream);
- Preconditions.checkNotNull(currentConfigElement);
- return currentConfigElement;
- }
-
- private static final DocumentBuilderFactory BUILDERFACTORY;
-
- static {
- DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
- factory.setNamespaceAware(true);
- factory.setCoalescing(true);
- factory.setIgnoringElementContentWhitespace(true);
- factory.setIgnoringComments(true);
- BUILDERFACTORY = factory;
- }
-
- private Document readXmlToDocument(final InputStream xmlContent)
- throws IOException, SAXException {
- DocumentBuilder dBuilder;
- try {
- dBuilder = BUILDERFACTORY.newDocumentBuilder();
- } catch (ParserConfigurationException e) {
- throw new RuntimeException("Failed to parse XML document", e);
- }
- Document doc = dBuilder.parse(xmlContent);
-
- doc.getDocumentElement().normalize();
- return doc;
- }
-
- public static String toString(final Element xml) {
- try {
- Transformer transformer =
- TransformerFactory.newInstance().newTransformer();
- transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-
- StreamResult result = new StreamResult(new StringWriter());
- DOMSource source = new DOMSource(xml);
- transformer.transform(source, result);
-
- return result.getWriter().toString();
- } catch (IllegalArgumentException | TransformerFactoryConfigurationError
- | TransformerException e) {
- throw new RuntimeException("Unable to serialize xml element " + xml, e);
- }
- }
-
- @Test
- public void testConversionToNormalizedXml() throws Exception {
- SimpleNormalizedNodeMessage.NormalizedNodeXml nnXml =
- EncoderDecoderUtil.encode(parseTestSchema("/augment_choice.yang"),
- augmentChoiceExpectedNode());
- Document expectedDoc = loadDocument("/augment_choice.xml");
- Document convertedDoc =
- EncoderDecoderUtil.factory.newDocumentBuilder().parse(
- new ByteArrayInputStream(nnXml.getXmlString().getBytes("utf-8")));
- System.out.println(toString(convertedDoc.getDocumentElement()));
- XMLUnit.setIgnoreWhitespace(true);
- XMLUnit.setIgnoreComments(true);
- new Diff(XMLUnit.buildControlDocument(toString(expectedDoc
- .getDocumentElement())),
- XMLUnit.buildTestDocument(toString(convertedDoc.getDocumentElement())))
- .similar();
- System.out.println(toString(expectedDoc.getDocumentElement()));
-
- }
-
-
- @Test
- public void testConversionFromXmlToNormalizedNode() throws Exception {
- SimpleNormalizedNodeMessage.NormalizedNodeXml nnXml =
- EncoderDecoderUtil.encode(parseTestSchema("/test.yang"),
- listLeafListWithAttributes());
- Document expectedDoc = loadDocument("/simple_xml_with_attributes.xml");
- Document convertedDoc =
- EncoderDecoderUtil.factory.newDocumentBuilder().parse(
- new ByteArrayInputStream(nnXml.getXmlString().getBytes("utf-8")));
- System.out.println(toString(convertedDoc.getDocumentElement()));
- XMLUnit.setIgnoreWhitespace(true);
- XMLUnit.setIgnoreComments(true);
- new Diff(XMLUnit.buildControlDocument(toString(expectedDoc
- .getDocumentElement())),
- XMLUnit.buildTestDocument(toString(convertedDoc.getDocumentElement())))
- .similar();
- System.out.println(toString(expectedDoc.getDocumentElement()));
-
- // now we will try to convert xml back to normalize node.
- ContainerNode cn =
- (ContainerNode) EncoderDecoderUtil.decode(
- parseTestSchema("/test.yang"), nnXml);
- junit.framework.Assert.assertEquals(listLeafListWithAttributes(), cn);
-
- }
-
-}
package org.opendaylight.controller.protobuff.messages.transaction;
-import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.protobuff.messages.AbstractMessagesTest;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.common.QName;
/**
* This test case is present to ensure that if others have used proper version of protocol buffer
@Override
@Test
public void verifySerialization() throws Exception {
- String testTransactionChainPath =
- "/actor/path";
-
- ShardTransactionChainMessages.CreateTransactionChainReply.Builder builder =
- ShardTransactionChainMessages.CreateTransactionChainReply.newBuilder();
- builder.setTransactionChainPath(testTransactionChainPath);
-
- writeToFile((com.google.protobuf.GeneratedMessage.Builder<?>) builder);
-
- // Here we will read the same and check we got back what we had saved
- ShardTransactionChainMessages.CreateTransactionChainReply replyNew =
- (ShardTransactionChainMessages.CreateTransactionChainReply) readFromFile(ShardTransactionChainMessages.CreateTransactionChainReply.PARSER);
-
- Assert.assertEquals(replyNew.getTransactionChainPath(),testTransactionChainPath);
-
- // the following will compare with the version we had shipped
- ShardTransactionChainMessages.CreateTransactionChainReply replyOriginal =
- (ShardTransactionChainMessages.CreateTransactionChainReply) readFromTestDataFile(ShardTransactionChainMessages.CreateTransactionChainReply.PARSER);
-
-
- Assert.assertEquals(replyOriginal.getTransactionChainPath(),
- replyNew.getTransactionChainPath());
-
}
@Override
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-clustering-config</artifactId>
<description>Configuration files for md-sal clustering</description>
<type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
<name>distributed-operational-store-service</name>
</operational-data-store>
+
+ <allow-concurrent-commits>true</allow-concurrent-commits>
</module>
<module>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-impl</artifactId>
<packaging>bundle</packaging>
static final Short OUTER_LIST_ID = (short) 10;
- static final YangInstanceIdentifier OUTER_LIST_PATH_LEGACY = YangInstanceIdentifier.builder(TEST_QNAME)
+ static final YangInstanceIdentifier OUTER_LIST_PATH_LEGACY = YangInstanceIdentifier.builder(TEST_PATH)
.nodeWithKey(OUTER_LIST_QNAME, ID_QNAME, OUTER_LIST_ID).build();
static final YangInstanceIdentifier LEAF_TWO_PATH_LEGACY = YangInstanceIdentifier.builder(OUTER_LIST_PATH_LEGACY)
.withNodeIdentifier(new NodeIdentifier(TEST_QNAME)).withChild(testAnyXmlNode).build();
DataNormalizer normalizer = new DataNormalizer(createTestContext());
- Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder(TEST_QNAME).build(), testContainerNode);
+ Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder().node(TEST_QNAME).build(), testContainerNode);
verifyLegacyNode(
legacyNode,
DataNormalizer normalizer = new DataNormalizer(createTestContext());
- Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder(TEST_QNAME).build(), testContainerNode);
+ Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder().node(TEST_QNAME).build(), testContainerNode);
verifyLegacyNode(
legacyNode,
DataNormalizer normalizer = new DataNormalizer(createTestContext());
- Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder(TEST_QNAME).build(), testContainerNode);
+ Node<?> legacyNode = normalizer.toLegacy(YangInstanceIdentifier.builder().node(TEST_QNAME).build(), testContainerNode);
verifyLegacyNode(
legacyNode,
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common-util</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-common</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-connector-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-distributed-datastore</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-clustering-commons</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-akka-raft</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
public interface ClusterWrapper {
void subscribeToMemberEvents(ActorRef actorRef);
String getCurrentMemberName();
+ String getSelfAddress();
}
public class ClusterWrapperImpl implements ClusterWrapper {
private final Cluster cluster;
private final String currentMemberName;
+ private final String selfAddress;
public ClusterWrapperImpl(ActorSystem actorSystem){
Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
);
currentMemberName = (String) cluster.getSelfRoles().toArray()[0];
+ selfAddress = cluster.selfAddress().toString();
}
public String getCurrentMemberName() {
return currentMemberName;
}
+
+ public String getSelfAddress() {
+ return selfAddress;
+ }
}
import akka.actor.Props;
import akka.japi.Creator;
-
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class DataChangeListener extends AbstractUntypedActor {
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListener.class);
+
private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
- private volatile boolean notificationsEnabled = false;
+ private boolean notificationsEnabled = false;
public DataChangeListener(AsyncDataChangeListener<YangInstanceIdentifier,
NormalizedNode<?, ?>> listener) {
this.listener = Preconditions.checkNotNull(listener, "listener should not be null");
}
- @Override public void handleReceive(Object message) throws Exception {
+ @Override
+ public void handleReceive(Object message) throws Exception {
if(message instanceof DataChanged){
dataChanged(message);
} else if(message instanceof EnableNotification){
private void enableNotification(EnableNotification message) {
notificationsEnabled = message.isEnabled();
+ LOG.debug("{} notifications for listener {}", (notificationsEnabled ? "Enabled" : "Disabled"),
+ listener);
}
private void dataChanged(Object message) {
// Do nothing if notifications are not enabled
- if(!notificationsEnabled){
+ if(!notificationsEnabled) {
+ LOG.debug("Notifications not enabled for listener {} - dropping change notification",
+ listener);
return;
}
DataChanged reply = (DataChanged) message;
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>
- change = reply.getChange();
+ AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = reply.getChange();
+
+ LOG.debug("Sending change notification {} to listener {}", change, listener);
+
this.listener.onDataChanged(change);
- if(getSender() != null){
+ // It seems the sender is never null but it doesn't hurt to check. If the caller passes in
+ // a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
+ if(getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
getSender().tell(new DataChangedReply(), getSelf());
}
}
package org.opendaylight.controller.cluster.datastore;
+import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-
import com.google.common.base.Preconditions;
-
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
@Override public void onDataChanged(
AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- dataChangeListenerActor.tell(new DataChanged(schemaContext, change), null);
+ dataChangeListenerActor.tell(new DataChanged(schemaContext, change), ActorRef.noSender());
}
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
+import scala.concurrent.Future;
/**
* ListenerRegistrationProxy acts as a proxy for a ListenerRegistration that was done on a remote shard
* The ListenerRegistrationProxy talks to a remote ListenerRegistration actor.
* </p>
*/
+@SuppressWarnings("rawtypes")
public class DataChangeListenerRegistrationProxy implements ListenerRegistration {
+
+ private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerRegistrationProxy.class);
+
private volatile ActorSelection listenerRegistrationActor;
- private final AsyncDataChangeListener listener;
- private final ActorRef dataChangeListenerActor;
+ private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
+ private ActorRef dataChangeListenerActor;
+ private final String shardName;
+ private final ActorContext actorContext;
private boolean closed = false;
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- DataChangeListenerRegistrationProxy(
- ActorSelection listenerRegistrationActor,
- L listener, ActorRef dataChangeListenerActor) {
- this.listenerRegistrationActor = listenerRegistrationActor;
+ DataChangeListenerRegistrationProxy (
+ String shardName, ActorContext actorContext, L listener) {
+ this.shardName = shardName;
+ this.actorContext = actorContext;
this.listener = listener;
- this.dataChangeListenerActor = dataChangeListenerActor;
}
- public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- DataChangeListenerRegistrationProxy(
- L listener, ActorRef dataChangeListenerActor) {
- this(null, listener, dataChangeListenerActor);
+ @VisibleForTesting
+ ActorSelection getListenerRegistrationActor() {
+ return listenerRegistrationActor;
+ }
+
+ @VisibleForTesting
+ ActorRef getDataChangeListenerActor() {
+ return dataChangeListenerActor;
}
@Override
return listener;
}
- public void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+ private void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+ if(listenerRegistrationActor == null) {
+ return;
+ }
+
boolean sendCloseMessage = false;
synchronized(this) {
if(closed) {
this.listenerRegistrationActor = listenerRegistrationActor;
}
}
+
if(sendCloseMessage) {
listenerRegistrationActor.tell(new
CloseDataChangeListenerRegistration().toSerializable(), null);
}
+ }
+
+ public void init(final YangInstanceIdentifier path, final AsyncDataBroker.DataChangeScope scope) {
- this.listenerRegistrationActor = listenerRegistrationActor;
+ dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+ DataChangeListener.props(listener));
+
+ Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+ findFuture.onComplete(new OnComplete<ActorRef>() {
+ @Override
+ public void onComplete(Throwable failure, ActorRef shard) {
+ if(failure instanceof LocalShardNotFoundException) {
+ LOG.debug("No local shard found for {} - DataChangeListener {} at path {} " +
+ "cannot be registered", shardName, listener, path);
+ } else if(failure != null) {
+ LOG.error("Failed to find local shard {} - DataChangeListener {} at path {} " +
+ "cannot be registered: {}", shardName, listener, path, failure);
+ } else {
+ doRegistration(shard, path, scope);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
}
- public ActorSelection getListenerRegistrationActor() {
- return listenerRegistrationActor;
+ private void doRegistration(ActorRef shard, final YangInstanceIdentifier path,
+ DataChangeScope scope) {
+
+ Future<Object> future = actorContext.executeOperationAsync(shard,
+ new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
+ actorContext.getDatastoreContext().getShardInitializationTimeout());
+
+ future.onComplete(new OnComplete<Object>(){
+ @Override
+ public void onComplete(Throwable failure, Object result) {
+ if(failure != null) {
+ LOG.error("Failed to register DataChangeListener {} at path {}",
+ listener, path.toString(), failure);
+ } else {
+ RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
+ setListenerRegistrationActor(actorContext.actorSelection(
+ reply.getListenerRegistrationPath()));
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
}
@Override
sendCloseMessage = !closed && listenerRegistrationActor != null;
closed = true;
}
+
if(sendCloseMessage) {
- listenerRegistrationActor.tell(new
- CloseDataChangeListenerRegistration().toSerializable(), null);
+ listenerRegistrationActor.tell(new CloseDataChangeListenerRegistration().toSerializable(),
+ ActorRef.noSender());
+ listenerRegistrationActor = null;
}
- dataChangeListenerActor.tell(PoisonPill.getInstance(), null);
+ if(dataChangeListenerActor != null) {
+ dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ dataChangeListenerActor = null;
+ }
}
}
package org.opendaylight.controller.cluster.datastore;
-import com.google.common.base.Preconditions;
-
+import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
+import org.opendaylight.controller.cluster.datastore.config.FileConfigurationReader;
+import org.opendaylight.controller.cluster.raft.ConfigParams;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
-
+import akka.util.Timeout;
import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
import java.util.concurrent.TimeUnit;
private final Duration shardTransactionIdleTimeout;
private final int operationTimeoutInSeconds;
private final String dataStoreMXBeanType;
+ private final ConfigParams shardRaftConfig;
+ private final int shardTransactionCommitTimeoutInSeconds;
+ private final int shardTransactionCommitQueueCapacity;
+ private final Timeout shardInitializationTimeout;
+ private final Timeout shardLeaderElectionTimeout;
+ private final boolean persistent;
+ private final ConfigurationReader configurationReader;
- public DatastoreContext() {
- this.dataStoreProperties = null;
- this.dataStoreMXBeanType = "DistributedDatastore";
- this.shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
- this.operationTimeoutInSeconds = 5;
- }
-
- public DatastoreContext(String dataStoreMXBeanType,
- InMemoryDOMDataStoreConfigProperties dataStoreProperties,
- Duration shardTransactionIdleTimeout,
- int operationTimeoutInSeconds) {
+ private DatastoreContext(InMemoryDOMDataStoreConfigProperties dataStoreProperties,
+ ConfigParams shardRaftConfig, String dataStoreMXBeanType, int operationTimeoutInSeconds,
+ Duration shardTransactionIdleTimeout, int shardTransactionCommitTimeoutInSeconds,
+ int shardTransactionCommitQueueCapacity, Timeout shardInitializationTimeout,
+ Timeout shardLeaderElectionTimeout,
+ boolean persistent, ConfigurationReader configurationReader) {
+ this.dataStoreProperties = dataStoreProperties;
+ this.shardRaftConfig = shardRaftConfig;
this.dataStoreMXBeanType = dataStoreMXBeanType;
- this.dataStoreProperties = Preconditions.checkNotNull(dataStoreProperties);
- this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+ this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
+ this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
+ this.shardInitializationTimeout = shardInitializationTimeout;
+ this.shardLeaderElectionTimeout = shardLeaderElectionTimeout;
+ this.persistent = persistent;
+ this.configurationReader = configurationReader;
+ }
+
+ public static Builder newBuilder() {
+ return new Builder();
}
public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
public int getOperationTimeoutInSeconds() {
return operationTimeoutInSeconds;
}
+
+ public ConfigParams getShardRaftConfig() {
+ return shardRaftConfig;
+ }
+
+ public int getShardTransactionCommitTimeoutInSeconds() {
+ return shardTransactionCommitTimeoutInSeconds;
+ }
+
+ public int getShardTransactionCommitQueueCapacity() {
+ return shardTransactionCommitQueueCapacity;
+ }
+
+ public Timeout getShardInitializationTimeout() {
+ return shardInitializationTimeout;
+ }
+
+ public Timeout getShardLeaderElectionTimeout() {
+ return shardLeaderElectionTimeout;
+ }
+
+ public boolean isPersistent() {
+ return persistent;
+ }
+
+ public ConfigurationReader getConfigurationReader() {
+ return configurationReader;
+ }
+
+ public static class Builder {
+ private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
+ private Duration shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
+ private int operationTimeoutInSeconds = 5;
+ private String dataStoreMXBeanType;
+ private int shardTransactionCommitTimeoutInSeconds = 30;
+ private int shardJournalRecoveryLogBatchSize = 1000;
+ private int shardSnapshotBatchCount = 20000;
+ private int shardHeartbeatIntervalInMillis = 500;
+ private int shardTransactionCommitQueueCapacity = 20000;
+ private Timeout shardInitializationTimeout = new Timeout(5, TimeUnit.MINUTES);
+ private Timeout shardLeaderElectionTimeout = new Timeout(30, TimeUnit.SECONDS);
+ private boolean persistent = true;
+ private ConfigurationReader configurationReader = new FileConfigurationReader();
+
+ public Builder shardTransactionIdleTimeout(Duration shardTransactionIdleTimeout) {
+ this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
+ return this;
+ }
+
+ public Builder operationTimeoutInSeconds(int operationTimeoutInSeconds) {
+ this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+ return this;
+ }
+
+ public Builder dataStoreMXBeanType(String dataStoreMXBeanType) {
+ this.dataStoreMXBeanType = dataStoreMXBeanType;
+ return this;
+ }
+
+ public Builder dataStoreProperties(InMemoryDOMDataStoreConfigProperties dataStoreProperties) {
+ this.dataStoreProperties = dataStoreProperties;
+ return this;
+ }
+
+ public Builder shardTransactionCommitTimeoutInSeconds(int shardTransactionCommitTimeoutInSeconds) {
+ this.shardTransactionCommitTimeoutInSeconds = shardTransactionCommitTimeoutInSeconds;
+ return this;
+ }
+
+ public Builder shardJournalRecoveryLogBatchSize(int shardJournalRecoveryLogBatchSize) {
+ this.shardJournalRecoveryLogBatchSize = shardJournalRecoveryLogBatchSize;
+ return this;
+ }
+
+ public Builder shardSnapshotBatchCount(int shardSnapshotBatchCount) {
+ this.shardSnapshotBatchCount = shardSnapshotBatchCount;
+ return this;
+ }
+
+ public Builder shardHeartbeatIntervalInMillis(int shardHeartbeatIntervalInMillis) {
+ this.shardHeartbeatIntervalInMillis = shardHeartbeatIntervalInMillis;
+ return this;
+ }
+
+ public Builder shardTransactionCommitQueueCapacity(int shardTransactionCommitQueueCapacity) {
+ this.shardTransactionCommitQueueCapacity = shardTransactionCommitQueueCapacity;
+ return this;
+ }
+
+ public Builder shardInitializationTimeout(long timeout, TimeUnit unit) {
+ this.shardInitializationTimeout = new Timeout(timeout, unit);
+ return this;
+ }
+
+ public Builder shardLeaderElectionTimeout(long timeout, TimeUnit unit) {
+ this.shardLeaderElectionTimeout = new Timeout(timeout, unit);
+ return this;
+ }
+
+ public Builder configurationReader(ConfigurationReader configurationReader){
+ this.configurationReader = configurationReader;
+ return this;
+ }
+
+
+ public Builder persistent(boolean persistent){
+ this.persistent = persistent;
+ return this;
+ }
+
+ public DatastoreContext build() {
+ DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+ raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
+ TimeUnit.MILLISECONDS));
+ raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
+ raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
+
+ return new DatastoreContext(dataStoreProperties, raftConfig, dataStoreMXBeanType,
+ operationTimeoutInSeconds, shardTransactionIdleTimeout,
+ shardTransactionCommitTimeoutInSeconds, shardTransactionCommitQueueCapacity,
+ shardInitializationTimeout, shardLeaderElectionTimeout,
+ persistent, configurationReader);
+ }
+ }
}
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-import akka.dispatch.OnComplete;
-import akka.util.Timeout;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
/**
*
actorContext = new ActorContext(actorSystem, actorSystem.actorOf(
ShardManager.props(type, cluster, configuration, datastoreContext)
- .withMailbox(ActorContext.MAILBOX), shardManagerId ), cluster, configuration);
-
- actorContext.setOperationTimeout(datastoreContext.getOperationTimeoutInSeconds());
+ .withMailbox(ActorContext.MAILBOX), shardManagerId ),
+ cluster, configuration, datastoreContext);
}
public DistributedDataStore(ActorContext actorContext) {
Preconditions.checkNotNull(path, "path should not be null");
Preconditions.checkNotNull(listener, "listener should not be null");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
- }
- ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
- DataChangeListener.props(listener ));
+
+ LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
- Future future = actorContext.executeLocalShardOperationAsync(shardName,
- new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
- new Timeout(actorContext.getOperationDuration().$times(
- REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR)));
-
- if (future != null) {
- final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
- new DataChangeListenerRegistrationProxy(listener, dataChangeListenerActor);
-
- future.onComplete(new OnComplete(){
-
- @Override public void onComplete(Throwable failure, Object result)
- throws Throwable {
- if(failure != null){
- LOG.error("Failed to register listener at path " + path.toString(), failure);
- return;
- }
- RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
- listenerRegistrationProxy.setListenerRegistrationActor(actorContext
- .actorSelection(reply.getListenerRegistrationPath()));
- }
- }, actorContext.getActorSystem().dispatcher());
- return listenerRegistrationProxy;
- }
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "No local shard for shardName {} was found so returning a noop registration",
- shardName);
- }
- return new NoOpDataChangeListenerRegistration(listener);
+ final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
+ new DataChangeListenerRegistrationProxy(shardName, actorContext, listener);
+ listenerRegistrationProxy.init(path, scope);
+
+ return listenerRegistrationProxy;
}
@Override
public void close() throws Exception {
actorContext.shutdown();
}
+
+ @VisibleForTesting
+ ActorContext getActorContext() {
+ return actorContext;
+ }
}
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.osgi.BundleDelegatingClassLoader;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
+import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
import org.opendaylight.controller.sal.core.api.model.SchemaService;
import org.osgi.framework.BundleContext;
-import java.io.File;
import java.util.concurrent.atomic.AtomicReference;
public class DistributedDataStoreFactory {
- public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
+
public static final String CONFIGURATION_NAME = "odl-cluster-data";
- private static AtomicReference<ActorSystem> actorSystem = new AtomicReference<>();
+
+ private static AtomicReference<ActorSystem> persistentActorSystem = new AtomicReference<>();
public static DistributedDataStore createInstance(String name, SchemaService schemaService,
DatastoreContext datastoreContext, BundleContext bundleContext) {
- ActorSystem actorSystem = getOrCreateInstance(bundleContext);
+ ActorSystem actorSystem = getOrCreateInstance(bundleContext, datastoreContext.getConfigurationReader());
Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
final DistributedDataStore dataStore =
new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
return dataStore;
}
- synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext) {
+ synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext, ConfigurationReader configurationReader) {
+
+ AtomicReference<ActorSystem> actorSystemReference = persistentActorSystem;
+ String configurationName = CONFIGURATION_NAME;
+ String actorSystemName = ACTOR_SYSTEM_NAME;
- if (actorSystem.get() != null){
- return actorSystem.get();
+ if (actorSystemReference.get() != null){
+ return actorSystemReference.get();
}
+
// Create an OSGi bundle classloader for actor system
BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
Thread.currentThread().getContextClassLoader());
- ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
- ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
+ ActorSystem system = ActorSystem.create(actorSystemName,
+ ConfigFactory.load(configurationReader.read()).getConfig(configurationName), classLoader);
system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
- actorSystem.set(system);
+ actorSystemReference.set(system);
return system;
}
-
- private static final Config readAkkaConfiguration() {
- File defaultConfigFile = new File(AKKA_CONF_PATH);
- Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
- return ConfigFactory.parseFile(defaultConfigFile);
- }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.UntypedActor;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-
-public class NoOpCohort extends UntypedActor {
-
- @Override public void onReceive(Object message) throws Exception {
- if (message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new CanCommitTransactionReply(false).toSerializable(), getSelf());
- } else if (message.getClass().equals(PreCommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(
- new PreCommitTransactionReply().toSerializable(),
- getSelf());
- } else if (message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new CommitTransactionReply().toSerializable(), getSelf());
- } else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
- getSender().tell(new AbortTransactionReply().toSerializable(), getSelf());
- } else {
- throw new Exception ("Not recognized message received,message="+message);
- }
-
- }
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * When a consumer registers a data change listener and no local shard is
- * available to register that listener with then we return an instance of
- * NoOpDataChangeListenerRegistration
- *
- * <p>
- *
- * The NoOpDataChangeListenerRegistration as it's name suggests does
- * nothing when an operation is invoked on it
- */
-public class NoOpDataChangeListenerRegistration
- implements ListenerRegistration {
-
- private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
- listener;
-
- public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> NoOpDataChangeListenerRegistration(
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener) {
-
- this.listener = listener;
- }
-
- @Override
- public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
- return listener;
- }
-
- @Override public void close() {
-
- }
-}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.Cancellable;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.event.Logging;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.collect.Lists;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
import org.opendaylight.controller.cluster.common.actor.CommonConfig;
import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
+import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
-import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.controller.cluster.datastore.modification.Modification;
import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.raft.ConfigParams;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
import org.opendaylight.controller.cluster.raft.RaftActor;
import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
-import java.util.ArrayList;
+import javax.annotation.Nonnull;
+import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
*/
public class Shard extends RaftActor {
- private static final ConfigParams configParams = new ShardConfigParams();
+ private static final Object COMMIT_TRANSACTION_REPLY = new CommitTransactionReply().toSerializable();
+
+ private static final Object TX_COMMIT_TIMEOUT_CHECK_MESSAGE = "txCommitTimeoutCheck";
public static final String DEFAULT_NAME = "default";
// The state of this Shard
private final InMemoryDOMDataStore store;
- private final Map<Object, DOMStoreThreePhaseCommitCohort>
- modificationToCohort = new HashMap<>();
-
private final LoggingAdapter LOG =
Logging.getLogger(getContext().system(), this);
- // By default persistent will be true and can be turned off using the system
- // property shard.persistent
- private final boolean persistent;
-
/// The name of this shard
private final ShardIdentifier name;
private final ShardStats shardMBean;
- private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
+ private final List<ActorSelection> dataChangeListeners = Lists.newArrayList();
+
+ private final List<DelayedListenerRegistration> delayedListenerRegistrations =
+ Lists.newArrayList();
private final DatastoreContext datastoreContext;
+ private final DataPersistenceProvider dataPersistenceProvider;
+
private SchemaContext schemaContext;
private ActorRef createSnapshotTransaction;
+ private int createSnapshotTransactionCounter;
+
+ private final ShardCommitCoordinator commitCoordinator;
+
+ private final long transactionCommitTimeout;
+
+ private Cancellable txCommitTimeoutCheckSchedule;
+
+ /**
+ * Coordinates persistence recovery on startup.
+ */
+ private ShardRecoveryCoordinator recoveryCoordinator;
+ private List<Object> currentLogRecoveryBatch;
+
private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
- private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
+ protected Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
DatastoreContext datastoreContext, SchemaContext schemaContext) {
- super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
+ super(name.toString(), mapPeerAddresses(peerAddresses),
+ Optional.of(datastoreContext.getShardRaftConfig()));
this.name = name;
this.datastoreContext = datastoreContext;
this.schemaContext = schemaContext;
+ this.dataPersistenceProvider = (datastoreContext.isPersistent()) ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
- String setting = System.getProperty("shard.persistent");
-
- this.persistent = !"false".equals(setting);
-
- LOG.info("Shard created : {} persistent : {}", name, persistent);
+ LOG.info("Shard created : {} persistent : {}", name, datastoreContext.isPersistent());
store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
datastoreContext.getDataStoreProperties());
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
}
+
+ commitCoordinator = new ShardCommitCoordinator(TimeUnit.SECONDS.convert(1, TimeUnit.MINUTES),
+ datastoreContext.getShardTransactionCommitQueueCapacity());
+
+ transactionCommitTimeout = TimeUnit.MILLISECONDS.convert(
+ datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS);
}
private static Map<String, String> mapPeerAddresses(
return Props.create(new ShardCreator(name, peerAddresses, datastoreContext, schemaContext));
}
- @Override public void onReceiveRecover(Object message) {
+ @Override
+ public void postStop() {
+ super.postStop();
+
+ if(txCommitTimeoutCheckSchedule != null) {
+ txCommitTimeoutCheckSchedule.cancel();
+ }
+ }
+
+ @Override
+ public void onReceiveRecover(Object message) throws Exception {
if(LOG.isDebugEnabled()) {
LOG.debug("onReceiveRecover: Received message {} from {}",
message.getClass().toString(),
if (message instanceof RecoveryFailure){
LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
+
+ // Even though recovery failed, we still need to finish our recovery, eg send the
+ // ActorInitialized message and start the txCommitTimeoutCheckSchedule.
+ onRecoveryComplete();
} else {
super.onReceiveRecover(message);
}
}
- @Override public void onReceiveCommand(Object message) {
+ @Override
+ public void onReceiveCommand(Object message) throws Exception {
if(LOG.isDebugEnabled()) {
- LOG.debug("onReceiveCommand: Received message {} from {}",
- message.getClass().toString(),
- getSender());
+ LOG.debug("onReceiveCommand: Received message {} from {}", message, getSender());
}
if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- // This must be for install snapshot. Don't want to open this up and trigger
- // deSerialization
- self()
- .tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
- self());
-
- createSnapshotTransaction = null;
- // Send a PoisonPill instead of sending close transaction because we do not really need
- // a response
- getSender().tell(PoisonPill.getInstance(), self());
-
+ handleReadDataReply(message);
+ } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
+ handleCreateTransaction(message);
+ } else if(message instanceof ForwardedReadyTransaction) {
+ handleForwardedReadyTransaction((ForwardedReadyTransaction)message);
+ } else if(message.getClass().equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+ } else if(message.getClass().equals(CommitTransaction.SERIALIZABLE_CLASS)) {
+ handleCommitTransaction(CommitTransaction.fromSerializable(message));
+ } else if(message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
+ handleAbortTransaction(AbortTransaction.fromSerializable(message));
} else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
registerChangeListener((RegisterChangeListener) message);
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext((UpdateSchemaContext) message);
- } else if (message instanceof ForwardedCommitTransaction) {
- handleForwardedCommit((ForwardedCommitTransaction) message);
- } else if (message.getClass()
- .equals(CreateTransaction.SERIALIZABLE_CLASS)) {
- if (isLeader()) {
- createTransaction(CreateTransaction.fromSerializable(message));
- } else if (getLeader() != null) {
- getLeader().forward(message, getContext());
- } else {
- getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
- "Could not find leader so transaction cannot be created")), getSelf());
- }
} else if (message instanceof PeerAddressResolved) {
PeerAddressResolved resolved = (PeerAddressResolved) message;
setPeerAddress(resolved.getPeerId().toString(),
resolved.getPeerAddress());
+ } else if(message.equals(TX_COMMIT_TIMEOUT_CHECK_MESSAGE)) {
+ handleTransactionCommitTimeoutCheck();
} else {
super.onReceiveCommand(message);
}
}
+ private void handleTransactionCommitTimeoutCheck() {
+ CohortEntry cohortEntry = commitCoordinator.getCurrentCohortEntry();
+ if(cohortEntry != null) {
+ long elapsed = System.currentTimeMillis() - cohortEntry.getLastAccessTime();
+ if(elapsed > transactionCommitTimeout) {
+ LOG.warning("Current transaction {} has timed out after {} ms - aborting",
+ cohortEntry.getTransactionID(), transactionCommitTimeout);
+
+ doAbortTransaction(cohortEntry.getTransactionID(), null);
+ }
+ }
+ }
+
+ private void handleCommitTransaction(CommitTransaction commit) {
+ final String transactionID = commit.getTransactionID();
+
+ LOG.debug("Committing transaction {}", transactionID);
+
+ // Get the current in-progress cohort entry in the commitCoordinator if it corresponds to
+ // this transaction.
+ final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry == null) {
+ // We're not the current Tx - the Tx was likely expired b/c it took too long in
+ // between the canCommit and commit messages.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("Cannot commit transaction %s - it is not the current transaction",
+ transactionID));
+ LOG.error(ex.getMessage());
+ shardMBean.incrementFailedTransactionsCount();
+ getSender().tell(new akka.actor.Status.Failure(ex), getSelf());
+ return;
+ }
+
+ // We perform the preCommit phase here atomically with the commit phase. This is an
+ // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
+ // coordination of preCommit across shards in case of failure but preCommit should not
+ // normally fail since we ensure only one concurrent 3-phase commit.
+
+ try {
+ // We block on the future here so we don't have to worry about possibly accessing our
+ // state on a different thread outside of our dispatcher. Also, the data store
+ // currently uses a same thread executor anyway.
+ cohortEntry.getCohort().preCommit().get();
+
+ Shard.this.persistData(getSender(), transactionID,
+ new CompositeModificationPayload(cohortEntry.getModification().toSerializable()));
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.error(e, "An exception occurred while preCommitting transaction {}",
+ cohortEntry.getTransactionID());
+ shardMBean.incrementFailedTransactionsCount();
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+
+ cohortEntry.updateLastAccessTime();
+ }
+
+ private void finishCommit(@Nonnull final ActorRef sender, final @Nonnull String transactionID) {
+ // With persistence enabled, this method is called via applyState by the leader strategy
+ // after the commit has been replicated to a majority of the followers.
+
+ CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry == null) {
+ // The transaction is no longer the current commit. This can happen if the transaction
+ // was aborted prior, most likely due to timeout in the front-end. We need to finish
+ // committing the transaction though since it was successfully persisted and replicated
+ // however we can't use the original cohort b/c it was already preCommitted and may
+ // conflict with the current commit or may have been aborted so we commit with a new
+ // transaction.
+ cohortEntry = commitCoordinator.getAndRemoveCohortEntry(transactionID);
+ if(cohortEntry != null) {
+ commitWithNewTransaction(cohortEntry.getModification());
+ sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
+ } else {
+ // This really shouldn't happen - it likely means that persistence or replication
+ // took so long to complete such that the cohort entry was expired from the cache.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("Could not finish committing transaction %s - no CohortEntry found",
+ transactionID));
+ LOG.error(ex.getMessage());
+ sender.tell(new akka.actor.Status.Failure(ex), getSelf());
+ }
+
+ return;
+ }
+
+ LOG.debug("Finishing commit for transaction {}", cohortEntry.getTransactionID());
+
+ try {
+ // We block on the future here so we don't have to worry about possibly accessing our
+ // state on a different thread outside of our dispatcher. Also, the data store
+ // currently uses a same thread executor anyway.
+ cohortEntry.getCohort().commit().get();
+
+ sender.tell(COMMIT_TRANSACTION_REPLY, getSelf());
+
+ shardMBean.incrementCommittedTransactionCount();
+ shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
+
+ } catch (InterruptedException | ExecutionException e) {
+ sender.tell(new akka.actor.Status.Failure(e), getSelf());
+
+ LOG.error(e, "An exception occurred while committing transaction {}", transactionID);
+ shardMBean.incrementFailedTransactionsCount();
+ }
+
+ commitCoordinator.currentTransactionComplete(transactionID, true);
+ }
+
+ private void handleCanCommitTransaction(CanCommitTransaction canCommit) {
+ LOG.debug("Can committing transaction {}", canCommit.getTransactionID());
+ commitCoordinator.handleCanCommit(canCommit, getSender(), self());
+ }
+
+ private void handleForwardedReadyTransaction(ForwardedReadyTransaction ready) {
+ LOG.debug("Readying transaction {}", ready.getTransactionID());
+
+ // This message is forwarded by the ShardTransaction on ready. We cache the cohort in the
+ // commitCoordinator in preparation for the subsequent three phase commit initiated by
+ // the front-end.
+ commitCoordinator.transactionReady(ready.getTransactionID(), ready.getCohort(),
+ ready.getModification());
+
+ // Return our actor path as we'll handle the three phase commit.
+ ReadyTransactionReply readyTransactionReply =
+ new ReadyTransactionReply(Serialization.serializedActorPath(self()));
+ getSender().tell(
+ ready.isReturnSerialized() ? readyTransactionReply.toSerializable() : readyTransactionReply,
+ getSelf());
+ }
+
+ private void handleAbortTransaction(AbortTransaction abort) {
+ doAbortTransaction(abort.getTransactionID(), getSender());
+ }
+
+ private void doAbortTransaction(String transactionID, final ActorRef sender) {
+ final CohortEntry cohortEntry = commitCoordinator.getCohortEntryIfCurrent(transactionID);
+ if(cohortEntry != null) {
+ LOG.debug("Aborting transaction {}", transactionID);
+
+ // We don't remove the cached cohort entry here (ie pass false) in case the Tx was
+ // aborted during replication in which case we may still commit locally if replication
+ // succeeds.
+ commitCoordinator.currentTransactionComplete(transactionID, false);
+
+ final ListenableFuture<Void> future = cohortEntry.getCohort().abort();
+ final ActorRef self = getSelf();
+
+ Futures.addCallback(future, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void v) {
+ shardMBean.incrementAbortTransactionsCount();
+
+ if(sender != null) {
+ sender.tell(new AbortTransactionReply().toSerializable(), self);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error(t, "An exception happened during abort");
+
+ if(sender != null) {
+ sender.tell(new akka.actor.Status.Failure(t), self);
+ }
+ }
+ });
+ }
+ }
+
+ private void handleCreateTransaction(Object message) {
+ if (isLeader()) {
+ createTransaction(CreateTransaction.fromSerializable(message));
+ } else if (getLeader() != null) {
+ getLeader().forward(message, getContext());
+ } else {
+ getSender().tell(new akka.actor.Status.Failure(new NoShardLeaderException(
+ "Could not find shard leader so transaction cannot be created. This typically happens" +
+ " when the system is coming up or recovering and a leader is being elected. Try again" +
+ " later.")), getSelf());
+ }
+ }
+
+ private void handleReadDataReply(Object message) {
+ // This must be for install snapshot. Don't want to open this up and trigger
+ // deSerialization
+
+ self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
+ self());
+
+ createSnapshotTransaction = null;
+
+ // Send a PoisonPill instead of sending close transaction because we do not really need
+ // a response
+ getSender().tell(PoisonPill.getInstance(), self());
+ }
+
private void closeTransactionChain(CloseTransactionChain closeTransactionChain) {
DOMStoreTransactionChain chain =
transactionChains.remove(closeTransactionChain.getTransactionChainId());
throw new NullPointerException("schemaContext should not be null");
}
- if (transactionType
- == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
+ if (transactionType == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
shardMBean.incrementReadOnlyTransactionCount();
return getContext().actorOf(
ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
- schemaContext,datastoreContext, shardMBean), transactionId.toString());
+ schemaContext,datastoreContext, shardMBean,
+ transactionId.getRemoteTransactionId()), transactionId.toString());
- } else if (transactionType
- == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
+ } else if (transactionType == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
shardMBean.incrementReadWriteTransactionCount();
return getContext().actorOf(
ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean), transactionId.toString());
+ schemaContext, datastoreContext, shardMBean,
+ transactionId.getRemoteTransactionId()), transactionId.toString());
- } else if (transactionType
- == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
+ } else if (transactionType == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
shardMBean.incrementWriteOnlyTransactionCount();
return getContext().actorOf(
ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
- schemaContext, datastoreContext, shardMBean), transactionId.toString());
+ schemaContext, datastoreContext, shardMBean,
+ transactionId.getRemoteTransactionId()), transactionId.toString());
} else {
throw new IllegalArgumentException(
"Shard="+name + ":CreateTransaction message has unidentified transaction type="
commitCohort.commit().get();
}
-
- private void commit(final ActorRef sender, Object serialized) {
- Modification modification = MutableCompositeModification
- .fromSerializable(serialized, schemaContext);
- DOMStoreThreePhaseCommitCohort cohort =
- modificationToCohort.remove(serialized);
- if (cohort == null) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "Could not find cohort for modification : {}. Writing modification using a new transaction",
- modification);
- }
-
- DOMStoreWriteTransaction transaction =
- store.newWriteOnlyTransaction();
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
- }
-
- modification.apply(transaction);
- try {
- syncCommitTransaction(transaction);
- } catch (InterruptedException | ExecutionException e) {
- shardMBean.incrementFailedTransactionsCount();
- LOG.error("Failed to commit", e);
- return;
- }
- //we want to just apply the recovery commit and return
+ private void commitWithNewTransaction(Modification modification) {
+ DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
+ modification.apply(tx);
+ try {
+ syncCommitTransaction(tx);
shardMBean.incrementCommittedTransactionCount();
- return;
- }
-
-
- if(sender == null){
- LOG.error("Commit failed. Sender cannot be null");
- return;
- }
-
- ListenableFuture<Void> future = cohort.commit();
-
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(Void v) {
- sender.tell(new CommitTransactionReply().toSerializable(), getSelf());
- shardMBean.incrementCommittedTransactionCount();
- shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
- }
-
- @Override
- public void onFailure(Throwable t) {
- LOG.error(t, "An exception happened during commit");
- shardMBean.incrementFailedTransactionsCount();
- sender.tell(new akka.actor.Status.Failure(t), getSelf());
- }
- });
-
- }
-
- private void handleForwardedCommit(ForwardedCommitTransaction message) {
- Object serializedModification =
- message.getModification().toSerializable();
-
- modificationToCohort
- .put(serializedModification, message.getCohort());
-
- if (persistent) {
- this.persistData(getSender(), "identifier",
- new CompositeModificationPayload(serializedModification));
- } else {
- this.commit(getSender(), serializedModification);
+ shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
+ } catch (InterruptedException | ExecutionException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error(e, "Failed to commit");
}
}
store.onGlobalContextUpdated(message.getSchemaContext());
}
- @VisibleForTesting void updateSchemaContext(SchemaContext schemaContext) {
+ @VisibleForTesting
+ void updateSchemaContext(SchemaContext schemaContext) {
store.onGlobalContextUpdated(schemaContext);
}
- private void registerChangeListener(
- RegisterChangeListener registerChangeListener) {
+ private void registerChangeListener(RegisterChangeListener registerChangeListener) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("registerDataChangeListener for {}", registerChangeListener
- .getPath());
+ LOG.debug("registerDataChangeListener for {}", registerChangeListener.getPath());
+
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration;
+ if(isLeader()) {
+ registration = doChangeListenerRegistration(registerChangeListener);
+ } else {
+ LOG.debug("Shard is not the leader - delaying registration");
+
+ DelayedListenerRegistration delayedReg =
+ new DelayedListenerRegistration(registerChangeListener);
+ delayedListenerRegistrations.add(delayedReg);
+ registration = delayedReg;
}
+ ActorRef listenerRegistration = getContext().actorOf(
+ DataChangeListenerRegistration.props(registration));
- ActorSelection dataChangeListenerPath = getContext()
- .system().actorSelection(
- registerChangeListener.getDataChangeListenerPath());
+ LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+ listenerRegistration.path());
+
+ getSender().tell(new RegisterChangeListenerReply(listenerRegistration.path()),getSelf());
+ }
+
+ private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> doChangeListenerRegistration(
+ RegisterChangeListener registerChangeListener) {
+ ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
+ registerChangeListener.getDataChangeListenerPath());
// Notify the listener if notifications should be enabled or not
// If this shard is the leader then it will enable notifications else
// it will not
- dataChangeListenerPath
- .tell(new EnableNotification(isLeader()), getSelf());
+ dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
// Now store a reference to the data change listener so it can be notified
// at a later point if notifications should be enabled or disabled
dataChangeListeners.add(dataChangeListenerPath);
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
- listener = new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
+ new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
- ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
- registration = store.registerChangeListener(registerChangeListener.getPath(),
- listener, registerChangeListener.getScope());
- ActorRef listenerRegistration =
- getContext().actorOf(
- DataChangeListenerRegistration.props(registration));
+ LOG.debug("Registering for path {}", registerChangeListener.getPath());
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
- , listenerRegistration.path().toString());
- }
-
- getSender()
- .tell(new RegisterChangeListenerReply(listenerRegistration.path()),
- getSelf());
+ return store.registerChangeListener(registerChangeListener.getPath(), listener,
+ registerChangeListener.getScope());
}
private boolean isMetricsCaptureEnabled(){
return config.isMetricCaptureEnabled();
}
- @Override protected void applyState(ActorRef clientActor, String identifier,
- Object data) {
+ @Override
+ protected
+ void startLogRecoveryBatch(int maxBatchSize) {
+ currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
+ }
+ }
+ @Override
+ protected void appendRecoveredLogEntry(Payload data) {
if (data instanceof CompositeModificationPayload) {
- Object modification =
- ((CompositeModificationPayload) data).getModification();
+ currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
+ } else {
+ LOG.error("Unknown state received {} during recovery", data);
+ }
+ }
- if (modification != null) {
- commit(clientActor, modification);
- } else {
- LOG.error(
- "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
- identifier, clientActor.path().toString());
+ @Override
+ protected void applyRecoverySnapshot(ByteString snapshot) {
+ if(recoveryCoordinator == null) {
+ recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+ }
+
+ recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : submitted recovery sbapshot", persistenceId());
+ }
+ }
+
+ @Override
+ protected void applyCurrentLogRecoveryBatch() {
+ if(recoveryCoordinator == null) {
+ recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+ }
+
+ recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
+ currentLogRecoveryBatch.size());
+ }
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ if(recoveryCoordinator != null) {
+ Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
+
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
}
+ for(DOMStoreWriteTransaction tx: txList) {
+ try {
+ syncCommitTransaction(tx);
+ shardMBean.incrementCommittedTransactionCount();
+ } catch (InterruptedException | ExecutionException e) {
+ shardMBean.incrementFailedTransactionsCount();
+ LOG.error(e, "Failed to commit");
+ }
+ }
+ }
+
+ recoveryCoordinator = null;
+ currentLogRecoveryBatch = null;
+ updateJournalStats();
+
+ //notify shard manager
+ getContext().parent().tell(new ActorInitialized(), getSelf());
+
+ // Being paranoid here - this method should only be called once but just in case...
+ if(txCommitTimeoutCheckSchedule == null) {
+ // Schedule a message to be periodically sent to check if the current in-progress
+ // transaction should be expired and aborted.
+ FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
+ txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
+ period, period, getSelf(),
+ TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
+ }
+ }
+
+ @Override
+ protected void applyState(ActorRef clientActor, String identifier, Object data) {
+
+ if (data instanceof CompositeModificationPayload) {
+ Object modification = ((CompositeModificationPayload) data).getModification();
+
+ if(modification == null) {
+ LOG.error(
+ "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
+ identifier, clientActor != null ? clientActor.path().toString() : null);
+ } else if(clientActor == null) {
+ // There's no clientActor to which to send a commit reply so we must be applying
+ // replicated state from the leader.
+ commitWithNewTransaction(MutableCompositeModification.fromSerializable(
+ modification, schemaContext));
+ } else {
+ // This must be the OK to commit after replication consensus.
+ finishCommit(clientActor, identifier);
+ }
} else {
- LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
+ LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
+ data, data.getClass().getClassLoader(),
+ CompositeModificationPayload.class.getClassLoader());
}
- // Update stats
+ updateJournalStats();
+
+ }
+
+ private void updateJournalStats() {
ReplicatedLogEntry lastLogEntry = getLastLogEntry();
if (lastLogEntry != null) {
shardMBean.setCommitIndex(getCommitIndex());
shardMBean.setLastApplied(getLastApplied());
-
}
- @Override protected void createSnapshot() {
+ @Override
+ protected void createSnapshot() {
if (createSnapshotTransaction == null) {
// Create a transaction. We are really going to treat the transaction as a worker
// so that this actor does not get block building the snapshot
createSnapshotTransaction = createTransaction(
TransactionProxy.TransactionType.READ_ONLY.ordinal(),
- "createSnapshot", "");
+ "createSnapshot" + ++createSnapshotTransactionCounter, "");
createSnapshotTransaction.tell(
new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
}
}
- @VisibleForTesting @Override protected void applySnapshot(ByteString snapshot) {
+ @VisibleForTesting
+ @Override
+ protected void applySnapshot(ByteString snapshot) {
// Since this will be done only on Recovery or when this actor is a Follower
// we can safely commit everything in here. We not need to worry about event notifications
// as they would have already been disabled on the follower
DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext)
- .decode(YangInstanceIdentifier.builder().build(), serializedNode);
+ .decode(serializedNode);
// delete everything first
transaction.delete(YangInstanceIdentifier.builder().build());
}
}
- @Override protected void onStateChanged() {
+ @Override
+ protected void onStateChanged() {
+ boolean isLeader = isLeader();
for (ActorSelection dataChangeListener : dataChangeListeners) {
- dataChangeListener
- .tell(new EnableNotification(isLeader()), getSelf());
+ dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
+ }
+
+ if(isLeader) {
+ for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
+ if(!reg.isClosed()) {
+ reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
+ }
+ }
+
+ delayedListenerRegistrations.clear();
}
shardMBean.setRaftState(getRaftState().name());
shardMBean.setCurrentTerm(getCurrentTerm());
// If this actor is no longer the leader close all the transaction chains
- if(!isLeader()){
+ if(!isLeader){
for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
if(LOG.isDebugEnabled()) {
LOG.debug(
}
}
+ @Override
+ protected DataPersistenceProvider persistence() {
+ return dataPersistenceProvider;
+ }
+
@Override protected void onLeaderChanged(String oldLeader, String newLeader) {
shardMBean.setLeader(newLeader);
}
return this.name.toString();
}
-
- private static class ShardConfigParams extends DefaultConfigParamsImpl {
- public static final FiniteDuration HEART_BEAT_INTERVAL =
- new FiniteDuration(500, TimeUnit.MILLISECONDS);
-
- @Override public FiniteDuration getHeartBeatInterval() {
- return HEART_BEAT_INTERVAL;
- }
+ @VisibleForTesting
+ DataPersistenceProvider getDataPersistenceProvider() {
+ return dataPersistenceProvider;
}
private static class ShardCreator implements Creator<Shard> {
}
}
- @VisibleForTesting NormalizedNode readStore() throws ExecutionException, InterruptedException {
- DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
+ @VisibleForTesting
+ InMemoryDOMDataStore getDataStore() {
+ return store;
+ }
- CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
- transaction.read(YangInstanceIdentifier.builder().build());
+ @VisibleForTesting
+ ShardStats getShardMBean() {
+ return shardMBean;
+ }
- NormalizedNode<?, ?> node = future.get().get();
+ private static class DelayedListenerRegistration implements
+ ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
- transaction.close();
+ private volatile boolean closed;
- return node;
- }
+ private final RegisterChangeListener registerChangeListener;
- @VisibleForTesting void writeToStore(YangInstanceIdentifier id, NormalizedNode node)
- throws ExecutionException, InterruptedException {
- DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
+ private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> delegate;
- transaction.write(id, node);
+ DelayedListenerRegistration(RegisterChangeListener registerChangeListener) {
+ this.registerChangeListener = registerChangeListener;
+ }
- syncCommitTransaction(transaction);
- }
+ void setDelegate( ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+ NormalizedNode<?, ?>>> registration) {
+ this.delegate = registration;
+ }
+
+ boolean isClosed() {
+ return closed;
+ }
+
+ RegisterChangeListener getRegisterChangeListener() {
+ return registerChangeListener;
+ }
+
+ @Override
+ public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
+ return delegate != null ? delegate.getInstance() : null;
+ }
+ @Override
+ public void close() {
+ closed = true;
+ if(delegate != null) {
+ delegate.close();
+ }
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import akka.actor.ActorRef;
+import akka.actor.Status;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
+/**
+ * Coordinates commits for a shard ensuring only one concurrent 3-phase commit.
+ *
+ * @author Thomas Pantelis
+ */
+public class ShardCommitCoordinator {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ShardCommitCoordinator.class);
+
+ private static final Object CAN_COMMIT_REPLY_TRUE =
+ new CanCommitTransactionReply(Boolean.TRUE).toSerializable();
+
+ private static final Object CAN_COMMIT_REPLY_FALSE =
+ new CanCommitTransactionReply(Boolean.FALSE).toSerializable();
+
+ private final Cache<String, CohortEntry> cohortCache;
+
+ private CohortEntry currentCohortEntry;
+
+ private final Queue<CohortEntry> queuedCohortEntries;
+
+ private final int queueCapacity;
+
+ public ShardCommitCoordinator(long cacheExpiryTimeoutInSec, int queueCapacity) {
+ cohortCache = CacheBuilder.newBuilder().expireAfterAccess(
+ cacheExpiryTimeoutInSec, TimeUnit.SECONDS).build();
+
+ this.queueCapacity = queueCapacity;
+
+ // We use a LinkedList here to avoid synchronization overhead with concurrent queue impls
+ // since this should only be accessed on the shard's dispatcher.
+ queuedCohortEntries = new LinkedList<>();
+ }
+
+ /**
+ * This method caches a cohort entry for the given transactions ID in preparation for the
+ * subsequent 3-phase commit.
+ *
+ * @param transactionID the ID of the transaction
+ * @param cohort the cohort to participate in the transaction commit
+ * @param modification the modification made by the transaction
+ */
+ public void transactionReady(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
+ Modification modification) {
+
+ cohortCache.put(transactionID, new CohortEntry(transactionID, cohort, modification));
+ }
+
+ /**
+ * This method handles the canCommit phase for a transaction.
+ *
+ * @param canCommit the CanCommitTransaction message
+ * @param sender the actor that sent the message
+ * @param shard the transaction's shard actor
+ */
+ public void handleCanCommit(CanCommitTransaction canCommit, final ActorRef sender,
+ final ActorRef shard) {
+ String transactionID = canCommit.getTransactionID();
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Processing canCommit for transaction {} for shard {}",
+ transactionID, shard.path());
+ }
+
+ // Lookup the cohort entry that was cached previously (or should have been) by
+ // transactionReady (via the ForwardedReadyTransaction message).
+ final CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
+ if(cohortEntry == null) {
+ // Either canCommit was invoked before ready(shouldn't happen) or a long time passed
+ // between canCommit and ready and the entry was expired from the cache.
+ IllegalStateException ex = new IllegalStateException(
+ String.format("No cohort entry found for transaction %s", transactionID));
+ LOG.error(ex.getMessage());
+ sender.tell(new Status.Failure(ex), shard);
+ return;
+ }
+
+ cohortEntry.setCanCommitSender(sender);
+ cohortEntry.setShard(shard);
+
+ if(currentCohortEntry != null) {
+ // There's already a Tx commit in progress - attempt to queue this entry to be
+ // committed after the current Tx completes.
+ LOG.debug("Transaction {} is already in progress - queueing transaction {}",
+ currentCohortEntry.getTransactionID(), transactionID);
+
+ if(queuedCohortEntries.size() < queueCapacity) {
+ queuedCohortEntries.offer(cohortEntry);
+ } else {
+ removeCohortEntry(transactionID);
+
+ RuntimeException ex = new RuntimeException(
+ String.format("Could not enqueue transaction %s - the maximum commit queue"+
+ " capacity %d has been reached.",
+ transactionID, queueCapacity));
+ LOG.error(ex.getMessage());
+ sender.tell(new Status.Failure(ex), shard);
+ }
+ } else {
+ // No Tx commit currently in progress - make this the current entry and proceed with
+ // canCommit.
+ cohortEntry.updateLastAccessTime();
+ currentCohortEntry = cohortEntry;
+
+ doCanCommit(cohortEntry);
+ }
+ }
+
+ private void doCanCommit(final CohortEntry cohortEntry) {
+
+ try {
+ // We block on the future here so we don't have to worry about possibly accessing our
+ // state on a different thread outside of our dispatcher. Also, the data store
+ // currently uses a same thread executor anyway.
+ Boolean canCommit = cohortEntry.getCohort().canCommit().get();
+
+ cohortEntry.getCanCommitSender().tell(
+ canCommit ? CAN_COMMIT_REPLY_TRUE : CAN_COMMIT_REPLY_FALSE, cohortEntry.getShard());
+
+ if(!canCommit) {
+ // Remove the entry from the cache now since the Tx will be aborted.
+ removeCohortEntry(cohortEntry.getTransactionID());
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ LOG.debug("An exception occurred during canCommit", e);
+
+ // Remove the entry from the cache now since the Tx will be aborted.
+ removeCohortEntry(cohortEntry.getTransactionID());
+ cohortEntry.getCanCommitSender().tell(new Status.Failure(e), cohortEntry.getShard());
+ }
+ }
+
+ /**
+ * Returns the cohort entry for the Tx commit currently in progress if the given transaction ID
+ * matches the current entry.
+ *
+ * @param transactionID the ID of the transaction
+ * @return the current CohortEntry or null if the given transaction ID does not match the
+ * current entry.
+ */
+ public CohortEntry getCohortEntryIfCurrent(String transactionID) {
+ if(isCurrentTransaction(transactionID)) {
+ return currentCohortEntry;
+ }
+
+ return null;
+ }
+
+ public CohortEntry getCurrentCohortEntry() {
+ return currentCohortEntry;
+ }
+
+ public CohortEntry getAndRemoveCohortEntry(String transactionID) {
+ CohortEntry cohortEntry = cohortCache.getIfPresent(transactionID);
+ cohortCache.invalidate(transactionID);
+ return cohortEntry;
+ }
+
+ public void removeCohortEntry(String transactionID) {
+ cohortCache.invalidate(transactionID);
+ }
+
+ public boolean isCurrentTransaction(String transactionID) {
+ return currentCohortEntry != null &&
+ currentCohortEntry.getTransactionID().equals(transactionID);
+ }
+
+ /**
+ * This method is called when a transaction is complete, successful or not. If the given
+ * given transaction ID matches the current in-progress transaction, the next cohort entry,
+ * if any, is dequeued and processed.
+ *
+ * @param transactionID the ID of the completed transaction
+ * @param removeCohortEntry if true the CohortEntry for the transaction is also removed from
+ * the cache.
+ */
+ public void currentTransactionComplete(String transactionID, boolean removeCohortEntry) {
+ if(removeCohortEntry) {
+ removeCohortEntry(transactionID);
+ }
+
+ if(isCurrentTransaction(transactionID)) {
+ // Dequeue the next cohort entry waiting in the queue.
+ currentCohortEntry = queuedCohortEntries.poll();
+ if(currentCohortEntry != null) {
+ doCanCommit(currentCohortEntry);
+ }
+ }
+ }
+
+ static class CohortEntry {
+ private final String transactionID;
+ private final DOMStoreThreePhaseCommitCohort cohort;
+ private final Modification modification;
+ private ActorRef canCommitSender;
+ private ActorRef shard;
+ private long lastAccessTime;
+
+ CohortEntry(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
+ Modification modification) {
+ this.transactionID = transactionID;
+ this.cohort = cohort;
+ this.modification = modification;
+ }
+
+ void updateLastAccessTime() {
+ lastAccessTime = System.currentTimeMillis();
+ }
+
+ long getLastAccessTime() {
+ return lastAccessTime;
+ }
+
+ String getTransactionID() {
+ return transactionID;
+ }
+
+ DOMStoreThreePhaseCommitCohort getCohort() {
+ return cohort;
+ }
+
+ Modification getModification() {
+ return modification;
+ }
+
+ ActorRef getCanCommitSender() {
+ return canCommitSender;
+ }
+
+ void setCanCommitSender(ActorRef canCommitSender) {
+ this.canCommitSender = canCommitSender;
+ }
+
+ ActorRef getShard() {
+ return shard;
+ }
+
+ void setShard(ActorRef shard) {
+ this.shard = shard;
+ }
+ }
+}
import akka.actor.Props;
import akka.actor.SupervisorStrategy;
import akka.cluster.ClusterEvent;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
import akka.japi.Creator;
import akka.japi.Function;
+import akka.japi.Procedure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.RecoveryFailure;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
+import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import scala.concurrent.duration.Duration;
+import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
/**
* The ShardManager has the following jobs,
* <li> Monitor the cluster members and store their addresses
* <ul>
*/
-public class ShardManager extends AbstractUntypedActorWithMetering {
+public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
+
+ protected final LoggingAdapter LOG =
+ Logging.getLogger(getContext().system(), this);
// Stores a mapping between a member name and the address of the member
// Member names look like "member-1", "member-2" etc and are as specified
private final DatastoreContext datastoreContext;
+ private final Collection<String> knownModules = new HashSet<>(128);
+
+ private final DataPersistenceProvider dataPersistenceProvider;
+
/**
* @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
* configuration or operational
*/
- private ShardManager(String type, ClusterWrapper cluster, Configuration configuration,
+ protected ShardManager(String type, ClusterWrapper cluster, Configuration configuration,
DatastoreContext datastoreContext) {
this.type = Preconditions.checkNotNull(type, "type should not be null");
this.cluster = Preconditions.checkNotNull(cluster, "cluster should not be null");
this.configuration = Preconditions.checkNotNull(configuration, "configuration should not be null");
this.datastoreContext = datastoreContext;
+ this.dataPersistenceProvider = createDataPersistenceProvider(datastoreContext.isPersistent());
// Subscribe this actor to cluster member events
cluster.subscribeToMemberEvents(getSelf());
- //createLocalShards(null);
+ createLocalShards();
+ }
+
+ protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
+ return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
}
public static Props props(final String type,
}
@Override
- public void handleReceive(Object message) throws Exception {
+ public void handleCommand(Object message) throws Exception {
if (message.getClass().equals(FindPrimary.SERIALIZABLE_CLASS)) {
- findPrimary(
- FindPrimary.fromSerializable(message));
+ findPrimary(FindPrimary.fromSerializable(message));
} else if(message instanceof FindLocalShard){
findLocalShard((FindLocalShard) message);
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext(message);
+ } else if(message instanceof ActorInitialized) {
+ onActorInitialized(message);
} else if (message instanceof ClusterEvent.MemberUp){
memberUp((ClusterEvent.MemberUp) message);
} else if(message instanceof ClusterEvent.MemberRemoved) {
}
+ private void onActorInitialized(Object message) {
+ final ActorRef sender = getSender();
+
+ if (sender == null) {
+ return; //why is a non-actor sending this message? Just ignore.
+ }
+
+ String actorName = sender.path().name();
+ //find shard name from actor name; actor name is stringified shardId
+ ShardIdentifier shardId = ShardIdentifier.builder().fromShardIdString(actorName).build();
+
+ if (shardId.getShardName() == null) {
+ return;
+ }
+ markShardAsInitialized(shardId.getShardName());
+ }
+
+ private void markShardAsInitialized(String shardName) {
+ LOG.debug("Initializing shard [{}]", shardName);
+ ShardInformation shardInformation = localShards.get(shardName);
+ if (shardInformation != null) {
+ shardInformation.setActorInitialized();
+ }
+ }
+
+ @Override
+ protected void handleRecover(Object message) throws Exception {
+ if(dataPersistenceProvider.isRecoveryApplicable()) {
+ if (message instanceof SchemaContextModules) {
+ SchemaContextModules msg = (SchemaContextModules) message;
+ knownModules.clear();
+ knownModules.addAll(msg.getModules());
+ } else if (message instanceof RecoveryFailure) {
+ RecoveryFailure failure = (RecoveryFailure) message;
+ LOG.error(failure.cause(), "Recovery failed");
+ } else if (message instanceof RecoveryCompleted) {
+ LOG.info("Recovery complete : {}", persistenceId());
+
+ // Delete all the messages from the akka journal except the last one
+ deleteMessages(lastSequenceNr() - 1);
+ }
+ } else {
+ if (message instanceof RecoveryCompleted) {
+ LOG.info("Recovery complete : {}", persistenceId());
+
+ // Delete all the messages from the akka journal
+ deleteMessages(lastSequenceNr());
+ }
+ }
+ }
+
private void findLocalShard(FindLocalShard message) {
- ShardInformation shardInformation =
- localShards.get(message.getShardName());
+ final ShardInformation shardInformation = localShards.get(message.getShardName());
+
+ if(shardInformation == null){
+ getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
+ return;
+ }
+
+ sendResponse(shardInformation, message.isWaitUntilInitialized(), new Supplier<Object>() {
+ @Override
+ public Object get() {
+ return new LocalShardFound(shardInformation.getActor());
+ }
+ });
+ }
+
+ private void sendResponse(ShardInformation shardInformation, boolean waitUntilInitialized,
+ final Supplier<Object> messageSupplier) {
+ if (!shardInformation.isShardInitialized()) {
+ if(waitUntilInitialized) {
+ final ActorRef sender = getSender();
+ final ActorRef self = self();
+ shardInformation.addRunnableOnInitialized(new Runnable() {
+ @Override
+ public void run() {
+ sender.tell(messageSupplier.get(), self);
+ }
+ });
+ } else {
+ getSender().tell(new ActorNotInitialized(), getSelf());
+ }
- if(shardInformation != null){
- getSender().tell(new LocalShardFound(shardInformation.getActor()), getSelf());
return;
}
- getSender().tell(new LocalShardNotFound(message.getShardName()),
- getSelf());
+ getSender().tell(messageSupplier.get(), getSelf());
}
private void memberRemoved(ClusterEvent.MemberRemoved message) {
private void memberUp(ClusterEvent.MemberUp message) {
String memberName = message.member().roles().head();
- memberNameToAddress.put(memberName , message.member().address());
+ memberNameToAddress.put(memberName, message.member().address());
for(ShardInformation info : localShards.values()){
String shardName = info.getShardName();
*
* @param message
*/
- private void updateSchemaContext(Object message) {
- SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+ private void updateSchemaContext(final Object message) {
+ final SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+
+ Set<ModuleIdentifier> allModuleIdentifiers = schemaContext.getAllModuleIdentifiers();
+ Set<String> newModules = new HashSet<>(128);
+
+ for(ModuleIdentifier moduleIdentifier : allModuleIdentifiers){
+ String s = moduleIdentifier.getNamespace().toString();
+ newModules.add(s);
+ }
+
+ if(newModules.containsAll(knownModules)) {
+
+ LOG.info("New SchemaContext has a super set of current knownModules - persisting info");
+
+ knownModules.clear();
+ knownModules.addAll(newModules);
+
+ dataPersistenceProvider.persist(new SchemaContextModules(newModules), new Procedure<SchemaContextModules>() {
+
+ @Override
+ public void apply(SchemaContextModules param) throws Exception {
+ LOG.info("Sending new SchemaContext to Shards");
+ for (ShardInformation info : localShards.values()) {
+ if (info.getActor() == null) {
+ info.setActor(getContext().actorOf(Shard.props(info.getShardId(),
+ info.getPeerAddresses(), datastoreContext, schemaContext),
+ info.getShardId().toString()));
+ } else {
+ info.getActor().tell(message, getSelf());
+ }
+ }
+ }
- if(localShards.size() == 0){
- createLocalShards(schemaContext);
+ });
} else {
- for (ShardInformation info : localShards.values()) {
- info.getActor().tell(message, getSelf());
- }
+ LOG.info("Rejecting schema context update because it is not a super set of previously known modules");
}
+
}
private void findPrimary(FindPrimary message) {
String shardName = message.getShardName();
// First see if the there is a local replica for the shard
- ShardInformation info = localShards.get(shardName);
- if(info != null) {
- ActorPath shardPath = info.getActorPath();
- if (shardPath != null) {
- getSender()
- .tell(
- new PrimaryFound(shardPath.toString()).toSerializable(),
- getSelf());
- return;
- }
+ final ShardInformation info = localShards.get(shardName);
+ if (info != null) {
+ sendResponse(info, message.isWaitUntilInitialized(), new Supplier<Object>() {
+ @Override
+ public Object get() {
+ return new PrimaryFound(info.getActorPath().toString()).toSerializable();
+ }
+ });
+
+ return;
}
- List<String> members =
- configuration.getMembersFromShardName(shardName);
+ List<String> members = configuration.getMembersFromShardName(shardName);
if(cluster.getCurrentMemberName() != null) {
members.remove(cluster.getCurrentMemberName());
}
+ /**
+ * FIXME: Instead of sending remote shard actor path back to sender,
+ * forward FindPrimary message to remote shard manager
+ */
// There is no way for us to figure out the primary (for now) so assume
// that one of the remote nodes is a primary
for(String memberName : members) {
* runs
*
*/
- private void createLocalShards(SchemaContext schemaContext) {
+ private void createLocalShards() {
String memberName = this.cluster.getCurrentMemberName();
List<String> memberShardNames =
this.configuration.getMemberShardNames(memberName);
for(String shardName : memberShardNames){
ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
- ActorRef actor = getContext()
- .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext).
- withMailbox(ActorContext.MAILBOX), shardId.toString());
localShardActorNames.add(shardId.toString());
- localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
+ localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses));
}
mBean = ShardManagerInfo.createShardManagerMBean("shard-manager-" + this.type,
}
+ @Override
+ public String persistenceId() {
+ return "shard-manager-" + type;
+ }
+
+ @VisibleForTesting
+ Collection<String> getKnownModules() {
+ return knownModules;
+ }
+
+ @VisibleForTesting
+ DataPersistenceProvider getDataPersistenceProvider() {
+ return dataPersistenceProvider;
+ }
+
private class ShardInformation {
+ private final ShardIdentifier shardId;
private final String shardName;
- private final ActorRef actor;
- private final ActorPath actorPath;
+ private ActorRef actor;
+ private ActorPath actorPath;
private final Map<ShardIdentifier, String> peerAddresses;
- private ShardInformation(String shardName, ActorRef actor,
- Map<ShardIdentifier, String> peerAddresses) {
+ // flag that determines if the actor is ready for business
+ private boolean actorInitialized = false;
+
+ private final List<Runnable> runnablesOnInitialized = Lists.newArrayList();
+
+ private ShardInformation(String shardName, ShardIdentifier shardId,
+ Map<ShardIdentifier, String> peerAddresses) {
this.shardName = shardName;
- this.actor = actor;
- this.actorPath = actor.path();
+ this.shardId = shardId;
this.peerAddresses = peerAddresses;
}
- public String getShardName() {
+ String getShardName() {
return shardName;
}
- public ActorRef getActor(){
+ ActorRef getActor(){
return actor;
}
- public ActorPath getActorPath() {
+ ActorPath getActorPath() {
return actorPath;
}
- public void updatePeerAddress(ShardIdentifier peerId, String peerAddress){
+ void setActor(ActorRef actor) {
+ this.actor = actor;
+ this.actorPath = actor.path();
+ }
+
+ ShardIdentifier getShardId() {
+ return shardId;
+ }
+
+ Map<ShardIdentifier, String> getPeerAddresses() {
+ return peerAddresses;
+ }
+
+ void updatePeerAddress(ShardIdentifier peerId, String peerAddress){
LOG.info("updatePeerAddress for peer {} with address {}", peerId,
peerAddress);
if(peerAddresses.containsKey(peerId)){
peerAddresses.put(peerId, peerAddress);
- if(LOG.isDebugEnabled()) {
- LOG.debug(
- "Sending PeerAddressResolved for peer {} with address {} to {}",
- peerId, peerAddress, actor.path());
+
+ if(actor != null) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Sending PeerAddressResolved for peer {} with address {} to {}",
+ peerId, peerAddress, actor.path());
+ }
+
+ actor.tell(new PeerAddressResolved(peerId, peerAddress), getSelf());
}
- actor
- .tell(new PeerAddressResolved(peerId, peerAddress),
- getSelf());
+ }
+ }
+
+ boolean isShardInitialized() {
+ return getActor() != null && actorInitialized;
+ }
+
+ void setActorInitialized() {
+ this.actorInitialized = true;
+ for(Runnable runnable: runnablesOnInitialized) {
+ runnable.run();
}
+
+ runnablesOnInitialized.clear();
+ }
+
+ void addRunnableOnInitialized(Runnable runnable) {
+ runnablesOnInitialized.add(runnable);
}
}
return new ShardManager(type, cluster, configuration, datastoreContext);
}
}
+
+ static class SchemaContextModules implements Serializable {
+ private final Set<String> modules;
+
+ SchemaContextModules(Set<String> modules){
+ this.modules = modules;
+ }
+
+ public Set<String> getModules() {
+ return modules;
+ }
+ }
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
private final DOMStoreReadTransaction transaction;
public ShardReadTransaction(DOMStoreReadTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats) {
- super(shardActor, schemaContext, shardStats);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
+ super(shardActor, schemaContext, shardStats, transactionID);
this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
- if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message));
+ if(message instanceof ReadData) {
+ readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DataExists) {
+ dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+
+ } else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message));
+ dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
+
} else {
super.handleReceive(message);
}
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
-import org.opendaylight.controller.cluster.datastore.messages.MergeData;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.WriteData;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
/**
* @author: syedbahm
* Date: 8/6/14
*/
-public class ShardReadWriteTransaction extends ShardTransaction {
+public class ShardReadWriteTransaction extends ShardWriteTransaction {
private final DOMStoreReadWriteTransaction transaction;
public ShardReadWriteTransaction(DOMStoreReadWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats) {
- super(shardActor, schemaContext, shardStats);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
+ super(transaction, shardActor, schemaContext, shardStats, transactionID);
this.transaction = transaction;
}
@Override
public void handleReceive(Object message) throws Exception {
- if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readData(transaction, ReadData.fromSerializable(message));
- } else if(WriteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- writeData(transaction, WriteData.fromSerializable(message, schemaContext));
- } else if(MergeData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- mergeData(transaction, MergeData.fromSerializable(message, schemaContext));
- } else if(DeleteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- deleteData(transaction, DeleteData.fromSerializable(message));
- } else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, new ReadyTransaction());
+ if (message instanceof ReadData) {
+ readData(transaction, (ReadData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DataExists) {
+ dataExists(transaction, (DataExists) message, !SERIALIZED_REPLY);
+
+ } else if(ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ readData(transaction, ReadData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
- dataExists(transaction, DataExists.fromSerializable(message));
+ dataExists(transaction, DataExists.fromSerializable(message), SERIALIZED_REPLY);
+
} else {
super.handleReceive(message);
}
}
-
- @Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
- return transaction;
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
+ * and journal log entry batch are de-serialized and applied to their own write transaction
+ * instance in parallel on a thread pool for faster recovery time. However the transactions are
+ * committed to the data store in the order the corresponding snapshot or log batch are received
+ * to preserve data store integrity.
+ *
+ * @author Thomas Panetelis
+ */
+class ShardRecoveryCoordinator {
+
+ private static final int TIME_OUT = 10;
+
+ private static final Logger LOG = LoggerFactory.getLogger(ShardRecoveryCoordinator.class);
+
+ private final List<DOMStoreWriteTransaction> resultingTxList = Lists.newArrayList();
+ private final SchemaContext schemaContext;
+ private final String shardName;
+ private final ExecutorService executor;
+
+ ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext) {
+ this.schemaContext = schemaContext;
+ this.shardName = shardName;
+
+ executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
+ new ThreadFactoryBuilder().setDaemon(true)
+ .setNameFormat("ShardRecovery-" + shardName + "-%d").build());
+ }
+
+ /**
+ * Submits a batch of journal log entries.
+ *
+ * @param logEntries the serialized journal log entries
+ * @param resultingTx the write Tx to which to apply the entries
+ */
+ void submit(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+ LogRecoveryTask task = new LogRecoveryTask(logEntries, resultingTx);
+ resultingTxList.add(resultingTx);
+ executor.execute(task);
+ }
+
+ /**
+ * Submits a snapshot.
+ *
+ * @param snapshot the serialized snapshot
+ * @param resultingTx the write Tx to which to apply the entries
+ */
+ void submit(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+ SnapshotRecoveryTask task = new SnapshotRecoveryTask(snapshot, resultingTx);
+ resultingTxList.add(resultingTx);
+ executor.execute(task);
+ }
+
+ Collection<DOMStoreWriteTransaction> getTransactions() {
+ // Shutdown the executor and wait for task completion.
+ executor.shutdown();
+
+ try {
+ if(executor.awaitTermination(TIME_OUT, TimeUnit.MINUTES)) {
+ return resultingTxList;
+ } else {
+ LOG.error("Recovery for shard {} timed out after {} minutes", shardName, TIME_OUT);
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+
+ return Collections.emptyList();
+ }
+
+ private static abstract class ShardRecoveryTask implements Runnable {
+
+ final DOMStoreWriteTransaction resultingTx;
+
+ ShardRecoveryTask(DOMStoreWriteTransaction resultingTx) {
+ this.resultingTx = resultingTx;
+ }
+ }
+
+ private class LogRecoveryTask extends ShardRecoveryTask {
+
+ private final List<Object> logEntries;
+
+ LogRecoveryTask(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+ super(resultingTx);
+ this.logEntries = logEntries;
+ }
+
+ @Override
+ public void run() {
+ for(int i = 0; i < logEntries.size(); i++) {
+ MutableCompositeModification.fromSerializable(
+ logEntries.get(i), schemaContext).apply(resultingTx);
+ // Null out to GC quicker.
+ logEntries.set(i, null);
+ }
+ }
+ }
+
+ private class SnapshotRecoveryTask extends ShardRecoveryTask {
+
+ private final ByteString snapshot;
+
+ SnapshotRecoveryTask(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+ super(resultingTx);
+ this.snapshot = snapshot;
+ }
+
+ @Override
+ public void run() {
+ try {
+ NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
+ NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext).decode(
+ serializedNode);
+
+ // delete everything first
+ resultingTx.delete(YangInstanceIdentifier.builder().build());
+
+ // Add everything from the remote node back
+ resultingTx.write(YangInstanceIdentifier.builder().build(), node);
+ } catch (InvalidProtocolBufferException e) {
+ LOG.error("Error deserializing snapshot", e);
+ }
+ }
+ }
+}
import akka.actor.Props;
import akka.actor.ReceiveTimeout;
import akka.japi.Creator;
-
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-
-
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
-import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.MergeData;
-import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadData;
import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.WriteData;
-import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
-import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.ImmutableCompositeModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* <li> {@link org.opendaylight.controller.cluster.datastore.messages.CloseTransaction}
* </p>
*/
-public abstract class ShardTransaction extends AbstractUntypedActor {
+public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
private final ActorRef shardActor;
- protected final SchemaContext schemaContext;
+ private final SchemaContext schemaContext;
private final ShardStats shardStats;
-
- private final MutableCompositeModification modification = new MutableCompositeModification();
+ private final String transactionID;
+ protected static final boolean SERIALIZED_REPLY = true;
protected ShardTransaction(ActorRef shardActor, SchemaContext schemaContext,
- ShardStats shardStats) {
+ ShardStats shardStats, String transactionID) {
+ super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
this.schemaContext = schemaContext;
this.shardStats = shardStats;
+ this.transactionID = transactionID;
}
public static Props props(DOMStoreTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats) {
+ SchemaContext schemaContext,DatastoreContext datastoreContext, ShardStats shardStats,
+ String transactionID) {
return Props.create(new ShardTransactionCreator(transaction, shardActor, schemaContext,
- datastoreContext, shardStats));
+ datastoreContext, shardStats, transactionID));
}
protected abstract DOMStoreTransaction getDOMStoreTransaction();
+ protected ActorRef getShardActor() {
+ return shardActor;
+ }
+
+ protected String getTransactionID() {
+ return transactionID;
+ }
+
+ protected SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
@Override
public void handleReceive(Object message) throws Exception {
if (message.getClass().equals(CloseTransaction.SERIALIZABLE_CLASS)) {
closeTransaction(true);
- } else if (message instanceof GetCompositedModification) {
- // This is here for testing only
- getSender().tell(new GetCompositeModificationReply(
- new ImmutableCompositeModification(modification)), getSelf());
} else if (message instanceof ReceiveTimeout) {
if(LOG.isDebugEnabled()) {
LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
getSelf().tell(PoisonPill.getInstance(), getSelf());
}
- protected void readData(DOMStoreReadTransaction transaction,ReadData message) {
+ protected void readData(DOMStoreReadTransaction transaction, ReadData message, final boolean returnSerialized) {
final ActorRef sender = getSender();
final ActorRef self = getSelf();
final YangInstanceIdentifier path = message.getPath();
final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
transaction.read(path);
+
future.addListener(new Runnable() {
@Override
public void run() {
try {
Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
- if (optional.isPresent()) {
- sender.tell(new ReadDataReply(schemaContext,optional.get()).toSerializable(), self);
- } else {
- sender.tell(new ReadDataReply(schemaContext,null).toSerializable(), self);
- }
+ ReadDataReply readDataReply = new ReadDataReply(schemaContext, optional.orNull());
+
+ sender.tell((returnSerialized ? readDataReply.toSerializable():
+ readDataReply), self);
+
} catch (Exception e) {
shardStats.incrementFailedReadTransactionsCount();
sender.tell(new akka.actor.Status.Failure(e), self);
}, getContext().dispatcher());
}
- protected void dataExists(DOMStoreReadTransaction transaction, DataExists message) {
+ protected void dataExists(DOMStoreReadTransaction transaction, DataExists message,
+ final boolean returnSerialized) {
final YangInstanceIdentifier path = message.getPath();
try {
Boolean exists = transaction.exists(path).checkedGet();
- getSender().tell(new DataExistsReply(exists).toSerializable(), getSelf());
+ DataExistsReply dataExistsReply = new DataExistsReply(exists);
+ getSender().tell(returnSerialized ? dataExistsReply.toSerializable() :
+ dataExistsReply, getSelf());
} catch (ReadFailedException e) {
getSender().tell(new akka.actor.Status.Failure(e),getSelf());
}
}
- protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
- modification.addModification(
- new WriteModification(message.getPath(), message.getData(),schemaContext));
- if(LOG.isDebugEnabled()) {
- LOG.debug("writeData at path : " + message.getPath().toString());
- }
- try {
- transaction.write(message.getPath(), message.getData());
- getSender().tell(new WriteDataReply().toSerializable(), getSelf());
- }catch(Exception e){
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
- }
- }
-
- protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
- modification.addModification(
- new MergeModification(message.getPath(), message.getData(), schemaContext));
- if(LOG.isDebugEnabled()) {
- LOG.debug("mergeData at path : " + message.getPath().toString());
- }
- try {
- transaction.merge(message.getPath(), message.getData());
- getSender().tell(new MergeDataReply().toSerializable(), getSelf());
- }catch(Exception e){
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
- }
- }
-
- protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("deleteData at path : " + message.getPath().toString());
- }
- modification.addModification(new DeleteModification(message.getPath()));
- try {
- transaction.delete(message.getPath());
- getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
- }catch(Exception e){
- getSender().tell(new akka.actor.Status.Failure(e), getSelf());
- }
- }
-
- protected void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message) {
- DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
- ActorRef cohortActor = getContext().actorOf(
- ThreePhaseCommitCohort.props(cohort, shardActor, modification, shardStats), "cohort");
- getSender()
- .tell(new ReadyTransactionReply(cohortActor.path()).toSerializable(), getSelf());
-
- }
-
private static class ShardTransactionCreator implements Creator<ShardTransaction> {
private static final long serialVersionUID = 1L;
final SchemaContext schemaContext;
final DatastoreContext datastoreContext;
final ShardStats shardStats;
+ final String transactionID;
ShardTransactionCreator(DOMStoreTransaction transaction, ActorRef shardActor,
SchemaContext schemaContext, DatastoreContext datastoreContext,
- ShardStats shardStats) {
+ ShardStats shardStats, String transactionID) {
this.transaction = transaction;
this.shardActor = shardActor;
this.shardStats = shardStats;
this.schemaContext = schemaContext;
this.datastoreContext = datastoreContext;
+ this.transactionID = transactionID;
}
@Override
ShardTransaction tx;
if(transaction instanceof DOMStoreReadWriteTransaction) {
tx = new ShardReadWriteTransaction((DOMStoreReadWriteTransaction)transaction,
- shardActor, schemaContext, shardStats);
+ shardActor, schemaContext, shardStats, transactionID);
} else if(transaction instanceof DOMStoreReadTransaction) {
tx = new ShardReadTransaction((DOMStoreReadTransaction)transaction, shardActor,
- schemaContext, shardStats);
+ schemaContext, shardStats, transactionID);
} else {
tx = new ShardWriteTransaction((DOMStoreWriteTransaction)transaction,
- shardActor, schemaContext, shardStats);
+ shardActor, schemaContext, shardStats, transactionID);
}
tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
return tx;
}
}
-
- // These classes are in here for test purposes only
-
- static class GetCompositedModification {
- }
-
-
- static class GetCompositeModificationReply {
- private final CompositeModification modification;
-
-
- GetCompositeModificationReply(CompositeModification modification) {
- this.modification = modification;
- }
-
-
- public CompositeModification getModification() {
- return modification;
- }
- }
}
return getContext().parent();
}
- private ActorRef createTypedTransactionActor(CreateTransaction createTransaction,
- String transactionId) {
+ private ActorRef createTypedTransactionActor(CreateTransaction createTransaction) {
+ String transactionName = "shard-" + createTransaction.getTransactionId();
if(createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats), transactionId);
+ schemaContext, datastoreContext, shardStats,
+ createTransaction.getTransactionId()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats), transactionId);
+ schemaContext, datastoreContext, shardStats,
+ createTransaction.getTransactionId()), transactionName);
} else if (createTransaction.getTransactionType() ==
TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
return getContext().actorOf(
ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(),
- schemaContext, datastoreContext, shardStats), transactionId);
+ schemaContext, datastoreContext, shardStats,
+ createTransaction.getTransactionId()), transactionName);
} else {
throw new IllegalArgumentException (
"CreateTransaction message has unidentified transaction type=" +
private void createTransaction(CreateTransaction createTransaction) {
- ActorRef transactionActor = createTypedTransactionActor(createTransaction, "shard-" + createTransaction.getTransactionId());
- getSender()
- .tell(new CreateTransactionReply(transactionActor.path().toString(),createTransaction.getTransactionId()).toSerializable(),
- getSelf());
+ ActorRef transactionActor = createTypedTransactionActor(createTransaction);
+ getSender().tell(new CreateTransactionReply(transactionActor.path().toString(),
+ createTransaction.getTransactionId()).toSerializable(), getSelf());
}
public static Props props(DOMStoreTransactionChain chain, SchemaContext schemaContext,
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-
+import akka.actor.PoisonPill;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
+import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.MergeData;
+import org.opendaylight.controller.cluster.datastore.messages.MergeDataReply;
import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.WriteData;
+import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
+import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
+import org.opendaylight.controller.cluster.datastore.modification.ImmutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
* Date: 8/6/14
*/
public class ShardWriteTransaction extends ShardTransaction {
+
+ private final MutableCompositeModification modification = new MutableCompositeModification();
private final DOMStoreWriteTransaction transaction;
public ShardWriteTransaction(DOMStoreWriteTransaction transaction, ActorRef shardActor,
- SchemaContext schemaContext, ShardStats shardStats) {
- super(shardActor, schemaContext, shardStats);
+ SchemaContext schemaContext, ShardStats shardStats, String transactionID) {
+ super(shardActor, schemaContext, shardStats, transactionID);
this.transaction = transaction;
}
+ @Override
+ protected DOMStoreTransaction getDOMStoreTransaction() {
+ return transaction;
+ }
+
@Override
public void handleReceive(Object message) throws Exception {
- if(WriteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- writeData(transaction, WriteData.fromSerializable(message, schemaContext));
+
+ if (message instanceof WriteData) {
+ writeData(transaction, (WriteData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof MergeData) {
+ mergeData(transaction, (MergeData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof DeleteData) {
+ deleteData(transaction, (DeleteData) message, !SERIALIZED_REPLY);
+
+ } else if (message instanceof ReadyTransaction) {
+ readyTransaction(transaction, new ReadyTransaction(), !SERIALIZED_REPLY);
+
+ } else if(WriteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
+ writeData(transaction, WriteData.fromSerializable(message, getSchemaContext()), SERIALIZED_REPLY);
+
} else if(MergeData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- mergeData(transaction, MergeData.fromSerializable(message, schemaContext));
+ mergeData(transaction, MergeData.fromSerializable(message, getSchemaContext()), SERIALIZED_REPLY);
+
} else if(DeleteData.SERIALIZABLE_CLASS.equals(message.getClass())) {
- deleteData(transaction, DeleteData.fromSerializable(message));
+ deleteData(transaction, DeleteData.fromSerializable(message), SERIALIZED_REPLY);
+
} else if(ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
- readyTransaction(transaction, new ReadyTransaction());
+ readyTransaction(transaction, new ReadyTransaction(), SERIALIZED_REPLY);
+
+ } else if (message instanceof GetCompositedModification) {
+ // This is here for testing only
+ getSender().tell(new GetCompositeModificationReply(
+ new ImmutableCompositeModification(modification)), getSelf());
} else {
super.handleReceive(message);
}
}
- @Override
- protected DOMStoreTransaction getDOMStoreTransaction() {
- return transaction;
+ private void writeData(DOMStoreWriteTransaction transaction, WriteData message,
+ boolean returnSerialized) {
+ LOG.debug("writeData at path : {}", message.getPath());
+
+ modification.addModification(
+ new WriteModification(message.getPath(), message.getData(), getSchemaContext()));
+ try {
+ transaction.write(message.getPath(), message.getData());
+ WriteDataReply writeDataReply = new WriteDataReply();
+ getSender().tell(returnSerialized ? writeDataReply.toSerializable() : writeDataReply,
+ getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
+ private void mergeData(DOMStoreWriteTransaction transaction, MergeData message,
+ boolean returnSerialized) {
+ LOG.debug("mergeData at path : {}", message.getPath());
+
+ modification.addModification(
+ new MergeModification(message.getPath(), message.getData(), getSchemaContext()));
+
+ try {
+ transaction.merge(message.getPath(), message.getData());
+ MergeDataReply mergeDataReply = new MergeDataReply();
+ getSender().tell(returnSerialized ? mergeDataReply.toSerializable() : mergeDataReply ,
+ getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
+ private void deleteData(DOMStoreWriteTransaction transaction, DeleteData message,
+ boolean returnSerialized) {
+ LOG.debug("deleteData at path : {}", message.getPath());
+
+ modification.addModification(new DeleteModification(message.getPath()));
+ try {
+ transaction.delete(message.getPath());
+ DeleteDataReply deleteDataReply = new DeleteDataReply();
+ getSender().tell(returnSerialized ? deleteDataReply.toSerializable() : deleteDataReply,
+ getSelf());
+ }catch(Exception e){
+ getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+ }
+ }
+
+ private void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message,
+ boolean returnSerialized) {
+ String transactionID = getTransactionID();
+
+ LOG.debug("readyTransaction : {}", transactionID);
+
+ DOMStoreThreePhaseCommitCohort cohort = transaction.ready();
+
+ getShardActor().forward(new ForwardedReadyTransaction(transactionID, cohort, modification,
+ returnSerialized), getContext());
+
+ // The shard will handle the commit from here so we're no longer needed - self-destruct.
+ getSelf().tell(PoisonPill.getInstance(), getSelf());
+ }
+
+ // These classes are in here for test purposes only
+
+ static class GetCompositedModification {
+ }
+
+ static class GetCompositeModificationReply {
+ private final CompositeModification modification;
+
+
+ GetCompositeModificationReply(CompositeModification modification) {
+ this.modification = modification;
+ }
+
+ public CompositeModification getModification() {
+ return modification;
+ }
}
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import akka.japi.Creator;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-
-public class ThreePhaseCommitCohort extends AbstractUntypedActor {
- private final DOMStoreThreePhaseCommitCohort cohort;
- private final ActorRef shardActor;
- private final CompositeModification modification;
- private final ShardStats shardStats;
-
- public ThreePhaseCommitCohort(DOMStoreThreePhaseCommitCohort cohort,
- ActorRef shardActor, CompositeModification modification, ShardStats shardStats) {
-
- this.cohort = cohort;
- this.shardActor = shardActor;
- this.modification = modification;
- this.shardStats = shardStats;
- }
-
- private final LoggingAdapter log =
- Logging.getLogger(getContext().system(), this);
-
- public static Props props(final DOMStoreThreePhaseCommitCohort cohort,
- final ActorRef shardActor, final CompositeModification modification,
- ShardStats shardStats) {
- return Props.create(new ThreePhaseCommitCohortCreator(cohort, shardActor, modification,
- shardStats));
- }
-
- @Override
- public void handleReceive(Object message) throws Exception {
- if (message.getClass()
- .equals(CanCommitTransaction.SERIALIZABLE_CLASS)) {
- canCommit(new CanCommitTransaction());
- } else if (message.getClass()
- .equals(PreCommitTransaction.SERIALIZABLE_CLASS)) {
- preCommit(new PreCommitTransaction());
- } else if (message.getClass()
- .equals(CommitTransaction.SERIALIZABLE_CLASS)) {
- commit(new CommitTransaction());
- } else if (message.getClass()
- .equals(AbortTransaction.SERIALIZABLE_CLASS)) {
- abort(new AbortTransaction());
- } else {
- unknownMessage(message);
- }
- }
-
- private void abort(AbortTransaction message) {
- final ListenableFuture<Void> future = cohort.abort();
- final ActorRef sender = getSender();
- final ActorRef self = getSelf();
-
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(Void v) {
- shardStats.incrementAbortTransactionsCount();
- sender
- .tell(new AbortTransactionReply().toSerializable(),
- self);
- }
-
- @Override
- public void onFailure(Throwable t) {
- LOG.error(t, "An exception happened during abort");
- sender
- .tell(new akka.actor.Status.Failure(t), self);
- }
- });
- }
-
- private void commit(CommitTransaction message) {
- // Forward the commit to the shard
- if(log.isDebugEnabled()) {
- log.debug("Forward commit transaction to Shard {} ", shardActor);
- }
- shardActor.forward(new ForwardedCommitTransaction(cohort, modification),
- getContext());
-
- getContext().parent().tell(PoisonPill.getInstance(), getSelf());
-
- }
-
- private void preCommit(PreCommitTransaction message) {
- final ListenableFuture<Void> future = cohort.preCommit();
- final ActorRef sender = getSender();
- final ActorRef self = getSelf();
- Futures.addCallback(future, new FutureCallback<Void>() {
- @Override
- public void onSuccess(Void v) {
- sender
- .tell(new PreCommitTransactionReply().toSerializable(),
- self);
- }
-
- @Override
- public void onFailure(Throwable t) {
- LOG.error(t, "An exception happened during pre-commit");
- sender
- .tell(new akka.actor.Status.Failure(t), self);
- }
- });
-
- }
-
- private void canCommit(CanCommitTransaction message) {
- final ListenableFuture<Boolean> future = cohort.canCommit();
- final ActorRef sender = getSender();
- final ActorRef self = getSelf();
- Futures.addCallback(future, new FutureCallback<Boolean>() {
- @Override
- public void onSuccess(Boolean canCommit) {
- sender.tell(new CanCommitTransactionReply(canCommit)
- .toSerializable(), self);
- }
-
- @Override
- public void onFailure(Throwable t) {
- LOG.error(t, "An exception happened during canCommit");
- sender
- .tell(new akka.actor.Status.Failure(t), self);
- }
- });
- }
-
- private static class ThreePhaseCommitCohortCreator implements Creator<ThreePhaseCommitCohort> {
- final DOMStoreThreePhaseCommitCohort cohort;
- final ActorRef shardActor;
- final CompositeModification modification;
- final ShardStats shardStats;
-
- ThreePhaseCommitCohortCreator(DOMStoreThreePhaseCommitCohort cohort,
- ActorRef shardActor, CompositeModification modification, ShardStats shardStats) {
- this.cohort = cohort;
- this.shardActor = shardActor;
- this.modification = modification;
- this.shardStats = shardStats;
- }
-
- @Override
- public ThreePhaseCommitCohort create() throws Exception {
- return new ThreePhaseCommitCohort(cohort, shardActor, modification, shardStats);
- }
- }
-}
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorPath;
import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import akka.dispatch.OnComplete;
-
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
-
import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
import scala.concurrent.Future;
import scala.runtime.AbstractFunction1;
-
import java.util.Collections;
import java.util.List;
private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
+ private static final ListenableFuture<Void> IMMEDIATE_SUCCESS =
+ com.google.common.util.concurrent.Futures.immediateFuture(null);
+
private final ActorContext actorContext;
- private final List<Future<ActorPath>> cohortPathFutures;
- private volatile List<ActorPath> cohortPaths;
+ private final List<Future<ActorSelection>> cohortFutures;
+ private volatile List<ActorSelection> cohorts;
private final String transactionId;
public ThreePhaseCommitCohortProxy(ActorContext actorContext,
- List<Future<ActorPath>> cohortPathFutures, String transactionId) {
+ List<Future<ActorSelection>> cohortFutures, String transactionId) {
this.actorContext = actorContext;
- this.cohortPathFutures = cohortPathFutures;
+ this.cohortFutures = cohortFutures;
this.transactionId = transactionId;
}
- private Future<Void> buildCohortPathsList() {
+ private Future<Void> buildCohortList() {
- Future<Iterable<ActorPath>> combinedFutures = Futures.sequence(cohortPathFutures,
+ Future<Iterable<ActorSelection>> combinedFutures = Futures.sequence(cohortFutures,
actorContext.getActorSystem().dispatcher());
- return combinedFutures.transform(new AbstractFunction1<Iterable<ActorPath>, Void>() {
+ return combinedFutures.transform(new AbstractFunction1<Iterable<ActorSelection>, Void>() {
@Override
- public Void apply(Iterable<ActorPath> paths) {
- cohortPaths = Lists.newArrayList(paths);
+ public Void apply(Iterable<ActorSelection> actorSelections) {
+ cohorts = Lists.newArrayList(actorSelections);
if(LOG.isDebugEnabled()) {
LOG.debug("Tx {} successfully built cohort path list: {}",
- transactionId, cohortPaths);
+ transactionId, cohorts);
}
return null;
}
// extracted from ReadyTransactionReply messages by the Futures that were obtained earlier
// and passed to us from upstream processing. If any one fails then we'll fail canCommit.
- buildCohortPathsList().onComplete(new OnComplete<Void>() {
+ buildCohortList().onComplete(new OnComplete<Void>() {
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+ LOG.debug("Tx {}: a cohort Future failed: {}", transactionId, failure);
}
returnFuture.setException(failure);
} else {
// their canCommit processing. If any one fails then we'll fail canCommit.
Future<Iterable<Object>> combinedFuture =
- invokeCohorts(new CanCommitTransaction().toSerializable());
+ invokeCohorts(new CanCommitTransaction(transactionId).toSerializable());
combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
@Override
}
private Future<Iterable<Object>> invokeCohorts(Object message) {
- List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohortPaths.size());
- for(ActorPath actorPath : cohortPaths) {
+ List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohorts.size());
+ for(ActorSelection cohort : cohorts) {
if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
+ LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, cohort);
}
- ActorSelection cohort = actorContext.actorSelection(actorPath);
- futureList.add(actorContext.executeRemoteOperationAsync(cohort, message));
+ futureList.add(actorContext.executeOperationAsync(cohort, message));
}
return Futures.sequence(futureList, actorContext.getActorSystem().dispatcher());
@Override
public ListenableFuture<Void> preCommit() {
- return voidOperation("preCommit", new PreCommitTransaction().toSerializable(),
- PreCommitTransactionReply.SERIALIZABLE_CLASS, true);
+ // We don't need to do anything here - preCommit is done atomically with the commit phase
+ // by the shard.
+ return IMMEDIATE_SUCCESS;
}
@Override
// exception then that exception will supersede and suppress the original exception. But
// it's the original exception that is the root cause and of more interest to the client.
- return voidOperation("abort", new AbortTransaction().toSerializable(),
+ return voidOperation("abort", new AbortTransaction(transactionId).toSerializable(),
AbortTransactionReply.SERIALIZABLE_CLASS, false);
}
@Override
public ListenableFuture<Void> commit() {
- return voidOperation("commit", new CommitTransaction().toSerializable(),
+ return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
CommitTransactionReply.SERIALIZABLE_CLASS, true);
}
// The cohort actor list should already be built at this point by the canCommit phase but,
// if not for some reason, we'll try to build it here.
- if(cohortPaths != null) {
+ if(cohorts != null) {
finishVoidOperation(operationName, message, expectedResponseClass, propagateException,
returnFuture);
} else {
- buildCohortPathsList().onComplete(new OnComplete<Void>() {
+ buildCohortList().onComplete(new OnComplete<Void>() {
@Override
public void onComplete(Throwable failure, Void notUsed) throws Throwable {
if(failure != null) {
}
@VisibleForTesting
- List<Future<ActorPath>> getCohortPathFutures() {
- return Collections.unmodifiableList(cohortPathFutures);
+ List<Future<ActorSelection>> getCohortFutures() {
+ return Collections.unmodifiableList(cohortFutures);
}
}
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorPath;
+import akka.actor.ActorSelection;
import akka.dispatch.Futures;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
public class TransactionChainProxy implements DOMStoreTransactionChain{
private final ActorContext actorContext;
private final String transactionChainId;
- private volatile List<Future<ActorPath>> cohortPathFutures = Collections.emptyList();
+ private volatile List<Future<ActorSelection>> cohortFutures = Collections.emptyList();
public TransactionChainProxy(ActorContext actorContext) {
this.actorContext = actorContext;
return transactionChainId;
}
- public void onTransactionReady(List<Future<ActorPath>> cohortPathFutures){
- this.cohortPathFutures = cohortPathFutures;
+ public void onTransactionReady(List<Future<ActorSelection>> cohortFutures){
+ this.cohortFutures = cohortFutures;
}
public void waitTillCurrentTransactionReady(){
try {
Await.result(Futures
- .sequence(this.cohortPathFutures, actorContext.getActorSystem().dispatcher()),
+ .sequence(this.cohortFutures, actorContext.getActorSystem().dispatcher()),
actorContext.getOperationDuration());
} catch (Exception e) {
throw new IllegalStateException("Failed when waiting for transaction on a chain to become ready", e);
package org.opendaylight.controller.cluster.datastore;
-import akka.actor.ActorPath;
import akka.actor.ActorSelection;
+import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
-
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.FinalizablePhantomReference;
import com.google.common.base.FinalizableReferenceQueue;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.SettableFuture;
-
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-import scala.Function1;
import scala.concurrent.Future;
-import scala.runtime.AbstractFunction1;
-
+import scala.concurrent.Promise;
+import scala.concurrent.duration.FiniteDuration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.concurrent.GuardedBy;
/**
* TransactionProxy acts as a proxy for one or more transactions that were created on a remote shard
*/
public class TransactionProxy implements DOMStoreReadWriteTransaction {
- private final TransactionChainProxy transactionChainProxy;
-
-
-
- public enum TransactionType {
+ public static enum TransactionType {
READ_ONLY,
WRITE_ONLY,
READ_WRITE
}
- static Function1<Throwable, Throwable> SAME_FAILURE_TRANSFORMER = new AbstractFunction1<
- Throwable, Throwable>() {
+ static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER =
+ new Mapper<Throwable, Throwable>() {
@Override
public Throwable apply(Throwable failure) {
return failure;
private static final AtomicLong counter = new AtomicLong();
- private static final Logger
- LOG = LoggerFactory.getLogger(TransactionProxy.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
+ /**
+ * Time interval in between transaction create retries.
+ */
+ private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
+ FiniteDuration.create(1, TimeUnit.SECONDS);
/**
* Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
if(remoteTransactionActorsMB.get()) {
for(ActorSelection actor : remoteTransactionActors) {
LOG.trace("Sending CloseTransaction to {}", actor);
- actorContext.sendRemoteOperationAsync(actor,
+ actorContext.sendOperationAsync(actor,
new CloseTransaction().toSerializable());
}
}
private List<ActorSelection> remoteTransactionActors;
private AtomicBoolean remoteTransactionActorsMB;
- private final Map<String, TransactionContext> remoteTransactionPaths = new HashMap<>();
+ /**
+ * Stores the create transaction results per shard.
+ */
+ private final Map<String, TransactionFutureCallback> txFutureCallbackMap = new HashMap<>();
private final TransactionType transactionType;
private final ActorContext actorContext;
private final TransactionIdentifier identifier;
+ private final TransactionChainProxy transactionChainProxy;
private final SchemaContext schemaContext;
private boolean inReadyState;
this(actorContext, transactionType, null);
}
- @VisibleForTesting
- List<Future<Object>> getRecordedOperationFutures() {
- List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
- }
-
- return recordedOperationFutures;
- }
-
- public TransactionProxy(ActorContext actorContext, TransactionType transactionType, TransactionChainProxy transactionChainProxy) {
+ public TransactionProxy(ActorContext actorContext, TransactionType transactionType,
+ TransactionChainProxy transactionChainProxy) {
this.actorContext = Preconditions.checkNotNull(actorContext,
"actorContext should not be null");
this.transactionType = Preconditions.checkNotNull(transactionType,
new TransactionProxyCleanupPhantomReference(this);
phantomReferenceCache.put(cleanup, cleanup);
}
- if(LOG.isDebugEnabled()) {
- LOG.debug("Created txn {} of type {}", identifier, transactionType);
+
+ LOG.debug("Created txn {} of type {}", identifier, transactionType);
+ }
+
+ @VisibleForTesting
+ List<Future<Object>> getRecordedOperationFutures() {
+ List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+ }
}
+
+ return recordedOperationFutures;
}
@Override
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Read operation on write-only transaction is not allowed");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read {}", identifier, path);
+ LOG.debug("Tx {} read {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future;
+ if(transactionContext != null) {
+ future = transactionContext.readData(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture = SettableFuture.create();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ Futures.addCallback(transactionContext.readData(path),
+ new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
+ @Override
+ public void onSuccess(Optional<NormalizedNode<?, ?>> data) {
+ proxyFuture.set(data);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ proxyFuture.setException(t);
+ }
+ });
+ }
+ });
+
+ future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
- createTransactionIfMissing(actorContext, path);
- return transactionContext(path).readData(path);
+ return future;
}
@Override
- public CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path) {
+ public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
"Exists operation on write-only transaction is not allowed");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} exists {}", identifier, path);
+ LOG.debug("Tx {} exists {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+
+ CheckedFuture<Boolean, ReadFailedException> future;
+ if(transactionContext != null) {
+ future = transactionContext.dataExists(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ final SettableFuture<Boolean> proxyFuture = SettableFuture.create();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ Futures.addCallback(transactionContext.dataExists(path),
+ new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean exists) {
+ proxyFuture.set(exists);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ proxyFuture.setException(t);
+ }
+ });
+ }
+ });
+
+ future = MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
}
- createTransactionIfMissing(actorContext, path);
- return transactionContext(path).dataExists(path);
+ return future;
}
private void checkModificationState() {
}
@Override
- public void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} write {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
+ LOG.debug("Tx {} write {}", identifier, path);
- transactionContext(path).writeData(path, data);
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.writeData(path, data);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.writeData(path, data);
+ }
+ });
+ }
}
@Override
- public void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
+ public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} merge {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
+ LOG.debug("Tx {} merge {}", identifier, path);
- transactionContext(path).mergeData(path, data);
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.mergeData(path, data);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.mergeData(path, data);
+ }
+ });
+ }
}
@Override
- public void delete(YangInstanceIdentifier path) {
+ public void delete(final YangInstanceIdentifier path) {
checkModificationState();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} delete {}", identifier, path);
- }
- createTransactionIfMissing(actorContext, path);
- transactionContext(path).deleteData(path);
+ LOG.debug("Tx {} delete {}", identifier, path);
+
+ TransactionFutureCallback txFutureCallback = getOrCreateTxFutureCallback(path);
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.deleteData(path);
+ } else {
+ // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
+ // callback to be executed after the Tx is created.
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.deleteData(path);
+ }
+ });
+ }
}
@Override
inReadyState = true;
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
- remoteTransactionPaths.size());
- }
- List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
+ LOG.debug("Tx {} Readying {} transactions for commit", identifier,
+ txFutureCallbackMap.size());
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+ List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Readying transaction for shard {}", identifier,
- transactionContext.getShardName());
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+
+ LOG.debug("Tx {} Readying transaction for shard {}", identifier,
+ txFutureCallback.getShardName());
+
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ cohortFutures.add(transactionContext.readyTransaction());
+ } else {
+ // The shard Tx hasn't been created yet so create a promise to ready the Tx later
+ // after it's created.
+ final Promise<ActorSelection> cohortPromise = akka.dispatch.Futures.promise();
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ cohortPromise.completeWith(transactionContext.readyTransaction());
+ }
+ });
+
+ cohortFutures.add(cohortPromise.future());
}
- cohortPathFutures.add(transactionContext.readyTransaction());
}
if(transactionChainProxy != null){
- transactionChainProxy.onTransactionReady(cohortPathFutures);
+ transactionChainProxy.onTransactionReady(cohortFutures);
}
- return new ThreePhaseCommitCohortProxy(actorContext, cohortPathFutures,
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
identifier.toString());
}
@Override
public void close() {
- for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
- transactionContext.closeTransaction();
+ for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
+ TransactionContext transactionContext = txFutureCallback.getTransactionContext();
+ if(transactionContext != null) {
+ transactionContext.closeTransaction();
+ } else {
+ txFutureCallback.addTxOperationOnComplete(new TransactionOperation() {
+ @Override
+ public void invoke(TransactionContext transactionContext) {
+ transactionContext.closeTransaction();
+ }
+ });
+ }
}
- remoteTransactionPaths.clear();
+ txFutureCallbackMap.clear();
if(transactionType == TransactionType.READ_ONLY) {
remoteTransactionActors.clear();
}
}
- private TransactionContext transactionContext(YangInstanceIdentifier path){
- String shardName = shardNameFromIdentifier(path);
- return remoteTransactionPaths.get(shardName);
- }
-
private String shardNameFromIdentifier(YangInstanceIdentifier path){
return ShardStrategyFactory.getStrategy(path).findShard(path);
}
- private void createTransactionIfMissing(ActorContext actorContext,
- YangInstanceIdentifier path) {
+ private TransactionFutureCallback getOrCreateTxFutureCallback(YangInstanceIdentifier path) {
+ String shardName = shardNameFromIdentifier(path);
+ TransactionFutureCallback txFutureCallback = txFutureCallbackMap.get(shardName);
+ if(txFutureCallback == null) {
+ Future<ActorSelection> findPrimaryFuture = actorContext.findPrimaryShardAsync(shardName);
- if(transactionChainProxy != null){
- transactionChainProxy.waitTillCurrentTransactionReady();
+ final TransactionFutureCallback newTxFutureCallback =
+ new TransactionFutureCallback(shardName);
+
+ txFutureCallback = newTxFutureCallback;
+ txFutureCallbackMap.put(shardName, txFutureCallback);
+
+ findPrimaryFuture.onComplete(new OnComplete<ActorSelection>() {
+ @Override
+ public void onComplete(Throwable failure, ActorSelection primaryShard) {
+ if(failure != null) {
+ newTxFutureCallback.onComplete(failure, null);
+ } else {
+ newTxFutureCallback.setPrimaryShard(primaryShard);
+ }
+ }
+ }, actorContext.getActorSystem().dispatcher());
}
- String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
+ return txFutureCallback;
+ }
+
+ public String getTransactionChainId() {
+ if(transactionChainProxy == null){
+ return "";
+ }
+ return transactionChainProxy.getTransactionChainId();
+ }
+
+ /**
+ * Interface for a transaction operation to be invoked later.
+ */
+ private static interface TransactionOperation {
+ void invoke(TransactionContext transactionContext);
+ }
+
+ /**
+ * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
+ * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
+ * retry task after a short delay.
+ * <p>
+ * The end result from a completed CreateTransaction message is a TransactionContext that is
+ * used to perform transaction operations. Transaction operations that occur before the
+ * CreateTransaction completes are cache and executed once the CreateTransaction completes,
+ * successfully or not.
+ */
+ private class TransactionFutureCallback extends OnComplete<Object> {
+
+ /**
+ * The list of transaction operations to execute once the CreateTransaction completes.
+ */
+ @GuardedBy("txOperationsOnComplete")
+ private final List<TransactionOperation> txOperationsOnComplete = Lists.newArrayList();
+
+ /**
+ * The TransactionContext resulting from the CreateTransaction reply.
+ */
+ private volatile TransactionContext transactionContext;
+
+ /**
+ * The target primary shard.
+ */
+ private volatile ActorSelection primaryShard;
+
+ private volatile int createTxTries = (int) (actorContext.getDatastoreContext().
+ getShardLeaderElectionTimeout().duration().toMillis() /
+ CREATE_TX_TRY_INTERVAL.toMillis());
- TransactionContext transactionContext =
- remoteTransactionPaths.get(shardName);
+ private final String shardName;
- if (transactionContext != null) {
- // A transaction already exists with that shard
- return;
+ TransactionFutureCallback(String shardName) {
+ this.shardName = shardName;
+ }
+
+ String getShardName() {
+ return shardName;
}
- try {
- Object response = actorContext.executeShardOperation(shardName,
- new CreateTransaction(identifier.toString(), this.transactionType.ordinal(),
- getTransactionChainId()).toSerializable());
- if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
- CreateTransactionReply reply =
- CreateTransactionReply.fromSerializable(response);
+ TransactionContext getTransactionContext() {
+ return transactionContext;
+ }
- String transactionPath = reply.getTransactionPath();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
- }
- ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
+ /**
+ * Sets the target primary shard and initiates a CreateTransaction try.
+ */
+ void setPrimaryShard(ActorSelection primaryShard) {
+ LOG.debug("Tx {} Primary shard found - trying create transaction", identifier);
- if (transactionType == TransactionType.READ_ONLY) {
- // Add the actor to the remoteTransactionActors list for access by the
- // cleanup PhantonReference.
- remoteTransactionActors.add(transactionActor);
+ this.primaryShard = primaryShard;
+ tryCreateTransaction();
+ }
- // Write to the memory barrier volatile to publish the above update to the
- // remoteTransactionActors list for thread visibility.
- remoteTransactionActorsMB.set(true);
+ /**
+ * Adds a TransactionOperation to be executed after the CreateTransaction completes.
+ */
+ void addTxOperationOnComplete(TransactionOperation operation) {
+ synchronized(txOperationsOnComplete) {
+ if(transactionContext == null) {
+ LOG.debug("Tx {} Adding operation on complete {}", identifier);
+
+ txOperationsOnComplete.add(operation);
+ } else {
+ operation.invoke(transactionContext);
}
+ }
+ }
- transactionContext = new TransactionContextImpl(shardName, transactionPath,
- transactionActor, identifier, actorContext, schemaContext);
+ /**
+ * Performs a CreateTransaction try async.
+ */
+ private void tryCreateTransaction() {
+ Future<Object> createTxFuture = actorContext.executeOperationAsync(primaryShard,
+ new CreateTransaction(identifier.toString(),
+ TransactionProxy.this.transactionType.ordinal(),
+ getTransactionChainId()).toSerializable());
- remoteTransactionPaths.put(shardName, transactionContext);
- } else {
- throw new IllegalArgumentException(String.format(
- "Invalid reply type {} for CreateTransaction", response.getClass()));
+ createTxFuture.onComplete(this, actorContext.getActorSystem().dispatcher());
+ }
+
+ @Override
+ public void onComplete(Throwable failure, Object response) {
+ if(failure instanceof NoShardLeaderException) {
+ // There's no leader for the shard yet - schedule and try again, unless we're out
+ // of retries. Note: createTxTries is volatile as it may be written by different
+ // threads however not concurrently, therefore decrementing it non-atomically here
+ // is ok.
+ if(--createTxTries > 0) {
+ LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
+ identifier, shardName);
+
+ actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
+ new Runnable() {
+ @Override
+ public void run() {
+ tryCreateTransaction();
+ }
+ }, actorContext.getActorSystem().dispatcher());
+ return;
+ }
}
- } catch (Exception e) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+
+ // Create the TransactionContext from the response or failure and execute delayed
+ // TransactionOperations. This entire section is done atomically (ie synchronized) with
+ // respect to #addTxOperationOnComplete to handle timing issues and ensure no
+ // TransactionOperation is missed and that they are processed in the order they occurred.
+ synchronized(txOperationsOnComplete) {
+ if(failure != null) {
+ LOG.debug("Tx {} Creating NoOpTransaction because of error: {}", identifier,
+ failure.getMessage());
+
+ transactionContext = new NoOpTransactionContext(failure, identifier);
+ } else if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
+ createValidTransactionContext(CreateTransactionReply.fromSerializable(response));
+ } else {
+ IllegalArgumentException exception = new IllegalArgumentException(String.format(
+ "Invalid reply type %s for CreateTransaction", response.getClass()));
+
+ transactionContext = new NoOpTransactionContext(exception, identifier);
+ }
+
+ for(TransactionOperation oper: txOperationsOnComplete) {
+ oper.invoke(transactionContext);
+ }
+
+ txOperationsOnComplete.clear();
}
- remoteTransactionPaths
- .put(shardName, new NoOpTransactionContext(shardName, e, identifier));
}
- }
- public String getTransactionChainId() {
- if(transactionChainProxy == null){
- return "";
+ private void createValidTransactionContext(CreateTransactionReply reply) {
+ String transactionPath = reply.getTransactionPath();
+
+ LOG.debug("Tx {} Received transaction actor path {}", identifier, transactionPath);
+
+ ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
+
+ if (transactionType == TransactionType.READ_ONLY) {
+ // Add the actor to the remoteTransactionActors list for access by the
+ // cleanup PhantonReference.
+ remoteTransactionActors.add(transactionActor);
+
+ // Write to the memory barrier volatile to publish the above update to the
+ // remoteTransactionActors list for thread visibility.
+ remoteTransactionActorsMB.set(true);
+ }
+
+ // TxActor is always created where the leader of the shard is.
+ // Check if TxActor is created in the same node
+ boolean isTxActorLocal = actorContext.isLocalPath(transactionPath);
+
+ transactionContext = new TransactionContextImpl(transactionActor, identifier,
+ actorContext, schemaContext, isTxActorLocal);
}
- return transactionChainProxy.getTransactionChainId();
}
-
private interface TransactionContext {
- String getShardName();
-
void closeTransaction();
- Future<ActorPath> readyTransaction();
+ Future<ActorSelection> readyTransaction();
void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
private static abstract class AbstractTransactionContext implements TransactionContext {
protected final TransactionIdentifier identifier;
- protected final String shardName;
protected final List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
- AbstractTransactionContext(String shardName, TransactionIdentifier identifier) {
- this.shardName = shardName;
+ AbstractTransactionContext(TransactionIdentifier identifier) {
this.identifier = identifier;
}
- @Override
- public String getShardName() {
- return shardName;
- }
-
@Override
public List<Future<Object>> getRecordedOperationFutures() {
return recordedOperationFutures;
private final ActorContext actorContext;
private final SchemaContext schemaContext;
- private final String actorPath;
private final ActorSelection actor;
+ private final boolean isTxActorLocal;
- private TransactionContextImpl(String shardName, String actorPath,
- ActorSelection actor, TransactionIdentifier identifier, ActorContext actorContext,
- SchemaContext schemaContext) {
- super(shardName, identifier);
- this.actorPath = actorPath;
+ private TransactionContextImpl(ActorSelection actor, TransactionIdentifier identifier,
+ ActorContext actorContext, SchemaContext schemaContext,
+ boolean isTxActorLocal) {
+ super(identifier);
this.actor = actor;
this.actorContext = actorContext;
this.schemaContext = schemaContext;
+ this.isTxActorLocal = isTxActorLocal;
}
private ActorSelection getActor() {
return actor;
}
- private String getResolvedCohortPath(String cohortPath) {
- return actorContext.resolvePath(actorPath, cohortPath);
- }
-
@Override
public void closeTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} closeTransaction called", identifier);
- }
- actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable());
+ LOG.debug("Tx {} closeTransaction called", identifier);
+
+ actorContext.sendOperationAsync(getActor(), new CloseTransaction().toSerializable());
}
@Override
- public Future<ActorPath> readyTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+ public Future<ActorSelection> readyTransaction() {
+ LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
identifier, recordedOperationFutures.size());
- }
+
// Send the ReadyTransaction message to the Tx actor.
- final Future<Object> replyFuture = actorContext.executeRemoteOperationAsync(getActor(),
- new ReadyTransaction().toSerializable());
+ ReadyTransaction readyTransaction = new ReadyTransaction();
+ final Future<Object> replyFuture = actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? readyTransaction : readyTransaction.toSerializable());
// Combine all the previously recorded put/merge/delete operation reply Futures and the
// ReadyTransactionReply Future into one Future. If any one fails then the combined
// Transform the combined Future into a Future that returns the cohort actor path from
// the ReadyTransactionReply. That's the end result of the ready operation.
- return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorPath>() {
+ return combinedFutures.transform(new Mapper<Iterable<Object>, ActorSelection>() {
@Override
- public ActorPath apply(Iterable<Object> notUsed) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+ public ActorSelection checkedApply(Iterable<Object> notUsed) {
+ LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
identifier);
- }
+
// At this point all the Futures succeeded and we need to extract the cohort
// actor path from the ReadyTransactionReply. For the recorded operations, they
// don't return any data so we're only interested that they completed
// Note the Future get call here won't block as it's complete.
Object serializedReadyReply = replyFuture.value().get().get();
- if(serializedReadyReply.getClass().equals(
- ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(
- actorContext.getActorSystem(), serializedReadyReply);
+ if (serializedReadyReply instanceof ReadyTransactionReply) {
+ return actorContext.actorSelection(((ReadyTransactionReply)serializedReadyReply).getCohortPath());
- String resolvedCohortPath = getResolvedCohortPath(
- reply.getCohortPath().toString());
+ } else if(serializedReadyReply.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
+ ReadyTransactionReply reply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
+ return actorContext.actorSelection(reply.getCohortPath());
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
- identifier, resolvedCohortPath);
- }
- return actorContext.actorFor(resolvedCohortPath);
} else {
// Throwing an exception here will fail the Future.
-
throw new IllegalArgumentException(String.format("Invalid reply type {}",
serializedReadyReply.getClass()));
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- }
- recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
- new DeleteData(path).toSerializable() ));
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+
+ DeleteData deleteData = new DeleteData(path);
+ recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? deleteData : deleteData.toSerializable()));
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- }
- recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
- new MergeData(path, data, schemaContext).toSerializable()));
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+
+ MergeData mergeData = new MergeData(path, data, schemaContext);
+ recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? mergeData : mergeData.toSerializable()));
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
- }
- recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
- new WriteData(path, data, schemaContext).toSerializable()));
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
+
+ WriteData writeData = new WriteData(path, data, schemaContext);
+ recordedOperationFutures.add(actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? writeData : writeData.toSerializable()));
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
final YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
+
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishReadData(path, returnFuture);
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+ LOG.debug("Tx {} readData: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
- }
+
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
Lists.newArrayList(recordedOperationFutures),
actorContext.getActorSystem().dispatcher());
+
OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
@Override
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData: a recorded operation failed: {}",
+ LOG.debug("Tx {} readData: a recorded operation failed: {}",
identifier, failure);
- }
returnFuture.setException(new ReadFailedException(
"The read could not be performed because a previous put, merge,"
+ "or delete operation failed", failure));
private void finishReadData(final YangInstanceIdentifier path,
final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object readResponse) throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read operation failed: {}", identifier, failure);
- }
+ LOG.debug("Tx {} read operation failed: {}", identifier, failure);
returnFuture.setException(new ReadFailedException(
"Error reading data for path " + path, failure));
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} read operation succeeded", identifier, failure);
- }
- if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
- path, readResponse);
- if (reply.getNormalizedNode() == null) {
- returnFuture.set(Optional.<NormalizedNode<?, ?>>absent());
- } else {
- returnFuture.set(Optional.<NormalizedNode<?, ?>>of(
- reply.getNormalizedNode()));
- }
+ LOG.debug("Tx {} read operation succeeded", identifier, failure);
+
+ if (readResponse instanceof ReadDataReply) {
+ ReadDataReply reply = (ReadDataReply) readResponse;
+ returnFuture.set(Optional.<NormalizedNode<?, ?>>fromNullable(reply.getNormalizedNode()));
+
+ } else if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+ ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext, path, readResponse);
+ returnFuture.set(Optional.<NormalizedNode<?, ?>>fromNullable(reply.getNormalizedNode()));
+
} else {
returnFuture.setException(new ReadFailedException(
- "Invalid response reading data for path " + path));
+ "Invalid response reading data for path " + path));
}
}
}
};
- Future<Object> readFuture = actorContext.executeRemoteOperationAsync(getActor(),
- new ReadData(path).toSerializable());
+ ReadData readData = new ReadData(path);
+ Future<Object> readFuture = actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? readData : readData.toSerializable());
+
readFuture.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
public CheckedFuture<Boolean, ReadFailedException> dataExists(
final YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+
final SettableFuture<Boolean> returnFuture = SettableFuture.create();
// If there were any previous recorded put/merge/delete operation reply Futures then we
if(recordedOperationFutures.isEmpty()) {
finishDataExists(path, returnFuture);
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+ LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
identifier, recordedOperationFutures.size());
- }
+
// Note: we make a copy of recordedOperationFutures to be on the safe side in case
// Futures#sequence accesses the passed List on a different thread, as
// recordedOperationFutures is not synchronized.
public void onComplete(Throwable failure, Iterable<Object> notUsed)
throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+ LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
identifier, failure);
- }
returnFuture.setException(new ReadFailedException(
"The data exists could not be performed because a previous "
+ "put, merge, or delete operation failed", failure));
private void finishDataExists(final YangInstanceIdentifier path,
final SettableFuture<Boolean> returnFuture) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+
OnComplete<Object> onComplete = new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object response) throws Throwable {
if(failure != null) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
- }
+ LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
returnFuture.setException(new ReadFailedException(
"Error checking data exists for path " + path, failure));
} else {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
- }
- if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- returnFuture.set(Boolean.valueOf(DataExistsReply.
- fromSerializable(response).exists()));
+ LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+
+ if (response instanceof DataExistsReply) {
+ returnFuture.set(Boolean.valueOf(((DataExistsReply) response).exists()));
+
+ } else if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+ returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists()));
+
} else {
returnFuture.setException(new ReadFailedException(
"Invalid response checking exists for path " + path));
}
};
- Future<Object> future = actorContext.executeRemoteOperationAsync(getActor(),
- new DataExists(path).toSerializable());
+ DataExists dataExists = new DataExists(path);
+ Future<Object> future = actorContext.executeOperationAsync(getActor(),
+ isTxActorLocal ? dataExists : dataExists.toSerializable());
+
future.onComplete(onComplete, actorContext.getActorSystem().dispatcher());
}
}
private final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class);
- private final Exception failure;
+ private final Throwable failure;
- public NoOpTransactionContext(String shardName, Exception failure,
- TransactionIdentifier identifier){
- super(shardName, identifier);
+ public NoOpTransactionContext(Throwable failure, TransactionIdentifier identifier){
+ super(identifier);
this.failure = failure;
}
@Override
public void closeTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
- }
+ LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
}
@Override
- public Future<ActorPath> readyTransaction() {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readyTransaction called", identifier);
- }
+ public Future<ActorSelection> readyTransaction() {
+ LOG.debug("Tx {} readyTransaction called", identifier);
return akka.dispatch.Futures.failed(failure);
}
@Override
public void deleteData(YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} deleteData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} deleteData called path = {}", identifier, path);
}
@Override
public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} mergeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} mergeData called path = {}", identifier, path);
}
@Override
public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} writeData called path = {}", identifier, path);
- }
+ LOG.debug("Tx {} writeData called path = {}", identifier, path);
}
@Override
public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
- YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} readData called path = {}", identifier, path);
- }
+ YangInstanceIdentifier path) {
+ LOG.debug("Tx {} readData called path = {}", identifier, path);
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error reading data for path " + path, failure));
}
@Override
public CheckedFuture<Boolean, ReadFailedException> dataExists(
- YangInstanceIdentifier path) {
- if(LOG.isDebugEnabled()) {
- LOG.debug("Tx {} dataExists called path = {}", identifier, path);
- }
+ YangInstanceIdentifier path) {
+ LOG.debug("Tx {} dataExists called path = {}", identifier, path);
return Futures.immediateFailedCheckedFuture(new ReadFailedException(
"Error checking exists for path " + path, failure));
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.typesafe.config.Config;
+
+public interface ConfigurationReader {
+ Config read();
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.io.File;
+
+public class FileConfigurationReader implements ConfigurationReader{
+
+ public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+
+ @Override
+ public Config read() {
+ File defaultConfigFile = new File(AKKA_CONF_PATH);
+ Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+ return ConfigFactory.parseFile(defaultConfigFile);
+
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+public class ResourceConfigurationReader implements ConfigurationReader {
+ @Override
+ public Config read() {
+ return ConfigFactory.load();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+/**
+ * Exception thrown when attempting to find a local shard but it doesn't exist.
+ *
+ * @author Thomas Pantelis
+ */
+public class LocalShardNotFoundException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public LocalShardNotFoundException(String message){
+ super(message);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+/**
+ * Exception indicating a shard has no current leader.
+ *
+ * @author Thomas Pantelis
+ */
+public class NoShardLeaderException extends RuntimeException {
+ private static final long serialVersionUID = 1L;
+
+ public NoShardLeaderException(String message){
+ super(message);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+public class NotInitializedException extends RuntimeException {
+ public NotInitializedException(String message) {
+ super(message);
+ }
+}
import com.google.common.base.Preconditions;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
public class ShardIdentifier {
private final String shardName;
private final String memberName;
private final String type;
+ //format and pattern should be in sync
+ private final String format = "%s-shard-%s-%s";
+ private static final Pattern pattern = Pattern.compile("(\\S+)-shard-(\\S+)-(\\S+)");
public ShardIdentifier(String shardName, String memberName, String type) {
}
@Override public String toString() {
- StringBuilder builder = new StringBuilder();
- builder.append(memberName).append("-shard-").append(shardName).append("-").append(type);
- return builder.toString();
+ //ensure the output of toString matches the pattern above
+ return new StringBuilder(memberName)
+ .append("-shard-")
+ .append(shardName)
+ .append("-")
+ .append(type)
+ .toString();
}
public static Builder builder(){
return new Builder();
}
+ public String getShardName() {
+ return shardName;
+ }
+
+ public String getMemberName() {
+ return memberName;
+ }
+
+ public String getType() {
+ return type;
+ }
+
public static class Builder {
private String shardName;
private String memberName;
return this;
}
+ public Builder fromShardIdString(String shardId){
+ Matcher matcher = pattern.matcher(shardId);
+
+ if (matcher.matches()) {
+ memberName = matcher.group(1);
+ shardName = matcher.group(2);
+ type = matcher.group(3);
+ }
+ return this;
+ }
}
}
public class ShardTransactionIdentifier {
private final String remoteTransactionId;
- public ShardTransactionIdentifier(String remoteTransactionId) {
- this.remoteTransactionId = Preconditions.checkNotNull(remoteTransactionId, "remoteTransactionId should not be null");
+ private ShardTransactionIdentifier(String remoteTransactionId) {
+ this.remoteTransactionId = Preconditions.checkNotNull(remoteTransactionId,
+ "remoteTransactionId should not be null");
}
public static Builder builder(){
return new Builder();
}
+ public String getRemoteTransactionId() {
+ return remoteTransactionId;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
}
@Override public String toString() {
- final StringBuilder sb =
- new StringBuilder();
+ final StringBuilder sb = new StringBuilder();
sb.append("shard-").append(remoteTransactionId);
return sb.toString();
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class AbortTransaction implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransaction.class;
+ public static final Class<ThreePhaseCommitCohortMessages.AbortTransaction> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.AbortTransaction.class;
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.AbortTransaction.newBuilder().build();
- }
+ private final String transactionID;
+
+ public AbortTransaction(String transactionID) {
+ this.transactionID = transactionID;
+ }
+
+ public String getTransactionID() {
+ return transactionID;
+ }
+
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.AbortTransaction.newBuilder().
+ setTransactionId(transactionID).build();
+ }
+
+ public static AbortTransaction fromSerializable(Object message) {
+ return new AbortTransaction(((ThreePhaseCommitCohortMessages.AbortTransaction)message).
+ getTransactionId());
+ }
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class AbortTransactionReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
+ public static final Class<ThreePhaseCommitCohortMessages.AbortTransactionReply> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
-
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.AbortTransactionReply.newBuilder().build();
- }
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.AbortTransactionReply.newBuilder().build();
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Serializable;
+
+public class ActorInitialized implements Serializable {
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Serializable;
+
+public class ActorNotInitialized implements Serializable {
+}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CanCommitTransaction implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
+ public static final Class<?> SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.CanCommitTransaction.newBuilder().build();
- }
+ private final String transactionID;
+
+ public CanCommitTransaction(String transactionID) {
+ this.transactionID = transactionID;
+ }
+
+ public String getTransactionID() {
+ return transactionID;
+ }
+
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.CanCommitTransaction.newBuilder().
+ setTransactionId(transactionID).build();
+ }
+
+ public static CanCommitTransaction fromSerializable(Object message) {
+ return new CanCommitTransaction(((ThreePhaseCommitCohortMessages.CanCommitTransaction)message).
+ getTransactionId());
+ }
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CanCommitTransactionReply implements SerializableMessage {
- public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransactionReply.class;
- private final Boolean canCommit;
+ public static Class<ThreePhaseCommitCohortMessages.CanCommitTransactionReply> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.CanCommitTransactionReply.class;
- public CanCommitTransactionReply(Boolean canCommit) {
- this.canCommit = canCommit;
- }
+ private final Boolean canCommit;
- public Boolean getCanCommit() {
- return canCommit;
- }
+ public CanCommitTransactionReply(Boolean canCommit) {
+ this.canCommit = canCommit;
+ }
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.CanCommitTransactionReply.newBuilder().setCanCommit(canCommit).build();
- }
+ public Boolean getCanCommit() {
+ return canCommit;
+ }
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.CanCommitTransactionReply.newBuilder().setCanCommit(canCommit).build();
+ }
- public static CanCommitTransactionReply fromSerializable(Object message) {
- return new CanCommitTransactionReply(((ThreePhaseCommitCohortMessages.CanCommitTransactionReply)message).getCanCommit());
- }
+ public static CanCommitTransactionReply fromSerializable(Object message) {
+ return new CanCommitTransactionReply(
+ ((ThreePhaseCommitCohortMessages.CanCommitTransactionReply) message).getCanCommit());
+ }
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CommitTransaction implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransaction.class;
+ public static final Class<ThreePhaseCommitCohortMessages.CommitTransaction> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.CommitTransaction.class;
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.CommitTransaction.newBuilder().build();
- }
+ private final String transactionID;
+
+ public CommitTransaction(String transactionID) {
+ this.transactionID = transactionID;
+ }
+
+ public String getTransactionID() {
+ return transactionID;
+ }
+
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.CommitTransaction.newBuilder().setTransactionId(
+ transactionID).build();
+ }
+
+ public static CommitTransaction fromSerializable(Object message) {
+ return new CommitTransaction(((ThreePhaseCommitCohortMessages.
+ CommitTransaction)message).getTransactionId());
+ }
}
import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
public class CommitTransactionReply implements SerializableMessage {
+ public static final Class<ThreePhaseCommitCohortMessages.CommitTransactionReply> SERIALIZABLE_CLASS =
+ ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
- public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
-
- @Override
- public Object toSerializable() {
- return ThreePhaseCommitCohortMessages.CommitTransactionReply.newBuilder().build();
- }
+ @Override
+ public Object toSerializable() {
+ return ThreePhaseCommitCohortMessages.CommitTransactionReply.newBuilder().build();
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
-
-public class CreateTransactionChain implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChain.class;
-
- @Override
- public Object toSerializable() {
- return ShardTransactionChainMessages.CreateTransactionChain.newBuilder().build();
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorSystem;
-import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
-
-public class CreateTransactionChainReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChainReply.class;
- private final ActorPath transactionChainPath;
-
- public CreateTransactionChainReply(ActorPath transactionChainPath) {
- this.transactionChainPath = transactionChainPath;
- }
-
- public ActorPath getTransactionChainPath() {
- return transactionChainPath;
- }
-
- @Override
- public ShardTransactionChainMessages.CreateTransactionChainReply toSerializable() {
- return ShardTransactionChainMessages.CreateTransactionChainReply.newBuilder()
- .setTransactionChainPath(transactionChainPath.toString()).build();
- }
-
- public static CreateTransactionChainReply fromSerializable(ActorSystem actorSystem,Object serializable){
- ShardTransactionChainMessages.CreateTransactionChainReply o = (ShardTransactionChainMessages.CreateTransactionChainReply) serializable;
- return new CreateTransactionChainReply(
- actorSystem.actorFor(o.getTransactionChainPath()).path());
- }
-
-}
package org.opendaylight.controller.cluster.datastore.messages;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.datachange.notification.DataChangeListenerMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
public class DataChanged implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS =
+ public static final Class<DataChangeListenerMessages.DataChanged> SERIALIZABLE_CLASS =
DataChangeListenerMessages.DataChanged.class;
+
final private SchemaContext schemaContext;
private final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>
change;
NormalizedNode<?, ?> normalizedNode) {
return new NormalizedNodeToNodeCodec(schemaContext)
- .encode(YangInstanceIdentifier.builder().build(), normalizedNode)
+ .encode(normalizedNode)
.getNormalizedNode();
}
removedPathInstanceIds.add(InstanceIdentifierUtils.toSerializable(id));
}
return new Iterable<NormalizedNodeMessages.InstanceIdentifier>() {
+ @Override
public Iterator<NormalizedNodeMessages.InstanceIdentifier> iterator() {
return removedPathInstanceIds.iterator();
}
builder.setInstanceIdentifierPath(instanceIdentifier)
.setNormalizedNode(normalizedNodeToNodeCodec
- .encode(entry.getKey(), entry.getValue())
+ .encode(entry.getValue())
.getNormalizedNode());
nodeMapBuilder.addMapEntries(builder.build());
}
static class DataChangedEvent implements
AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private final SchemaContext schemaContext;
private Map<YangInstanceIdentifier, NormalizedNode<?, ?>> createdData;
private final NormalizedNodeToNodeCodec nodeCodec;
private Map<YangInstanceIdentifier, NormalizedNode<?, ?>> updatedData;
private Set<YangInstanceIdentifier> removedPathIds;
DataChangedEvent(SchemaContext schemaContext) {
- this.schemaContext = schemaContext;
nodeCodec = new NormalizedNodeToNodeCodec(schemaContext);
}
YangInstanceIdentifier id = InstanceIdentifierUtils
.fromSerializable(nodeMapEntry.getInstanceIdentifierPath());
mapEntries.put(id,
- nodeCodec.decode(id, nodeMapEntry.getNormalizedNode()));
+ nodeCodec.decode(nodeMapEntry.getNormalizedNode()));
}
return mapEntries;
}
DataChangedEvent setOriginalSubtree(NormalizedNodeMessages.Node node,
YangInstanceIdentifier instanceIdentifierPath) {
- originalSubTree = nodeCodec.decode(instanceIdentifierPath, node);
+ originalSubTree = nodeCodec.decode(node);
return this;
}
DataChangedEvent setUpdatedSubtree(NormalizedNodeMessages.Node node,
YangInstanceIdentifier instanceIdentifierPath) {
- updatedSubTree = nodeCodec.decode(instanceIdentifierPath, node);
+ updatedSubTree = nodeCodec.decode(node);
return this;
}
package org.opendaylight.controller.cluster.datastore.messages;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*/
public class FindLocalShard {
private final String shardName;
+ private final boolean waitUntilInitialized;
- public FindLocalShard(String shardName) {
+ public FindLocalShard(String shardName, boolean waitUntilInitialized) {
this.shardName = shardName;
+ this.waitUntilInitialized = waitUntilInitialized;
}
public String getShardName() {
return shardName;
}
+
+ public boolean isWaitUntilInitialized() {
+ return waitUntilInitialized;
+ }
}
*
*/
public class FindPrimary implements SerializableMessage{
- public static final Class SERIALIZABLE_CLASS = FindPrimary.class;
+ public static final Class<FindPrimary> SERIALIZABLE_CLASS = FindPrimary.class;
+
private final String shardName;
+ private final boolean waitUntilInitialized;
- public FindPrimary(String shardName){
+ public FindPrimary(String shardName, boolean waitUntilInitialized){
Preconditions.checkNotNull(shardName, "shardName should not be null");
this.shardName = shardName;
+ this.waitUntilInitialized = waitUntilInitialized;
}
public String getShardName() {
return shardName;
}
- @Override
- public Object toSerializable() {
- return this;
- }
+ public boolean isWaitUntilInitialized() {
+ return waitUntilInitialized;
+ }
- public static FindPrimary fromSerializable(Object message){
- return (FindPrimary) message;
- }
+ @Override
+ public Object toSerializable() {
+ return this;
+ }
+
+ public static FindPrimary fromSerializable(Object message){
+ return (FindPrimary) message;
+ }
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-
-public class ForwardedCommitTransaction {
- private final DOMStoreThreePhaseCommitCohort cohort;
- private final Modification modification;
-
- public ForwardedCommitTransaction(DOMStoreThreePhaseCommitCohort cohort, Modification modification){
- this.cohort = cohort;
- this.modification = modification;
- }
-
- public DOMStoreThreePhaseCommitCohort getCohort() {
- return cohort;
- }
-
- public Modification getModification() {
- return modification;
- }
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+
+/**
+ * Transaction ReadyTransaction message that is forwarded to the local Shard from the ShardTransaction.
+ *
+ * @author Thomas Pantelis
+ */
+public class ForwardedReadyTransaction {
+ private final String transactionID;
+ private final DOMStoreThreePhaseCommitCohort cohort;
+ private final Modification modification;
+ private final boolean returnSerialized;
+
+ public ForwardedReadyTransaction(String transactionID, DOMStoreThreePhaseCommitCohort cohort,
+ Modification modification, boolean returnSerialized) {
+ this.transactionID = transactionID;
+ this.cohort = cohort;
+ this.modification = modification;
+ this.returnSerialized = returnSerialized;
+
+ }
+
+ public String getTransactionID() {
+ return transactionID;
+ }
+
+ public DOMStoreThreePhaseCommitCohort getCohort() {
+ return cohort;
+ }
+
+ public Modification getModification() {
+ return modification;
+ }
+
+ public boolean isReturnSerialized() {
+ return returnSerialized;
+ }
+}
package org.opendaylight.controller.cluster.datastore.messages;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Encoded;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public class MergeData extends ModifyData{
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.MergeData.class;
+ public static final Class<ShardTransactionMessages.MergeData> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.MergeData.class;
public MergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data,
SchemaContext context) {
super(path, data, context);
}
- @Override public Object toSerializable() {
-
- NormalizedNodeMessages.Node normalizedNode =
- new NormalizedNodeToNodeCodec(schemaContext).encode(path, data)
- .getNormalizedNode();
+ @Override
+ public Object toSerializable() {
+ Encoded encoded = new NormalizedNodeToNodeCodec(schemaContext).encode(path, data);
return ShardTransactionMessages.MergeData.newBuilder()
- .setInstanceIdentifierPathArguments(InstanceIdentifierUtils.toSerializable(path))
- .setNormalizedNode(normalizedNode).build();
+ .setInstanceIdentifierPathArguments(encoded.getEncodedPath())
+ .setNormalizedNode(encoded.getEncodedNode().getNormalizedNode()).build();
}
public static MergeData fromSerializable(Object serializable, SchemaContext schemaContext){
ShardTransactionMessages.MergeData o = (ShardTransactionMessages.MergeData) serializable;
- YangInstanceIdentifier identifier = InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments());
-
- NormalizedNode<?, ?> normalizedNode =
- new NormalizedNodeToNodeCodec(schemaContext)
- .decode(identifier, o.getNormalizedNode());
-
- return new MergeData(identifier, normalizedNode, schemaContext);
+ Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(
+ o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
+ return new MergeData(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
}
}
package org.opendaylight.controller.cluster.datastore.messages;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-public class ReadDataReply implements SerializableMessage{
-
- private final NormalizedNode<?, ?> normalizedNode;
- private final SchemaContext schemaContext;
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.ReadDataReply.class;
- public ReadDataReply(SchemaContext context,NormalizedNode<?, ?> normalizedNode){
-
- this.normalizedNode = normalizedNode;
- this.schemaContext = context;
- }
-
- public NormalizedNode<?, ?> getNormalizedNode() {
- return normalizedNode;
- }
-
- public Object toSerializable(){
- if(normalizedNode != null) {
- return ShardTransactionMessages.ReadDataReply.newBuilder()
- .setNormalizedNode(new NormalizedNodeToNodeCodec(schemaContext)
- .encode(YangInstanceIdentifier.builder().build(), normalizedNode).getNormalizedNode()
- ).build();
- }else{
- return ShardTransactionMessages.ReadDataReply.newBuilder().build();
+public class ReadDataReply implements SerializableMessage {
+ public static final Class<ShardTransactionMessages.ReadDataReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.ReadDataReply.class;
+ private final NormalizedNode<?, ?> normalizedNode;
+ private final SchemaContext schemaContext;
+
+ public ReadDataReply(SchemaContext context,NormalizedNode<?, ?> normalizedNode){
+
+ this.normalizedNode = normalizedNode;
+ this.schemaContext = context;
+ }
+
+ public NormalizedNode<?, ?> getNormalizedNode() {
+ return normalizedNode;
}
- }
+ @Override
+ public Object toSerializable(){
+ if(normalizedNode != null) {
+ return ShardTransactionMessages.ReadDataReply.newBuilder()
+ .setNormalizedNode(new NormalizedNodeToNodeCodec(schemaContext)
+ .encode(normalizedNode).getNormalizedNode()).build();
+ } else {
+ return ShardTransactionMessages.ReadDataReply.newBuilder().build();
- public static ReadDataReply fromSerializable(SchemaContext schemaContext,YangInstanceIdentifier id,Object serializable){
- ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
- return new ReadDataReply(schemaContext,new NormalizedNodeToNodeCodec(schemaContext).decode(id, o.getNormalizedNode()));
- }
+ }
+ }
+
+ public static ReadDataReply fromSerializable(SchemaContext schemaContext,
+ YangInstanceIdentifier id, Object serializable) {
+ ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
+ return new ReadDataReply(schemaContext, new NormalizedNodeToNodeCodec(schemaContext).decode(
+ o.getNormalizedNode()));
+ }
- public static ByteString getNormalizedNodeByteString(Object serializable){
- ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
- return ((ShardTransactionMessages.ReadDataReply) serializable).getNormalizedNode().toByteString();
- }
+ public static ByteString getNormalizedNodeByteString(Object serializable){
+ ShardTransactionMessages.ReadDataReply o = (ShardTransactionMessages.ReadDataReply) serializable;
+ return ((ShardTransactionMessages.ReadDataReply) serializable).getNormalizedNode().toByteString();
+ }
}
package org.opendaylight.controller.cluster.datastore.messages;
-import akka.actor.ActorPath;
-import akka.actor.ActorSystem;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
public class ReadyTransactionReply implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.ReadyTransactionReply.class;
- private final ActorPath cohortPath;
+ public static final Class<ShardTransactionMessages.ReadyTransactionReply> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.ReadyTransactionReply.class;
- public ReadyTransactionReply(ActorPath cohortPath) {
+ private final String cohortPath;
- this.cohortPath = cohortPath;
- }
+ public ReadyTransactionReply(String cohortPath) {
- public ActorPath getCohortPath() {
- return cohortPath;
- }
+ this.cohortPath = cohortPath;
+ }
- @Override
- public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
- return ShardTransactionMessages.ReadyTransactionReply.newBuilder()
- .setActorPath(cohortPath.toString()).build();
- }
+ public String getCohortPath() {
+ return cohortPath;
+ }
- public static ReadyTransactionReply fromSerializable(ActorSystem actorSystem,Object serializable){
- ShardTransactionMessages.ReadyTransactionReply o = (ShardTransactionMessages.ReadyTransactionReply) serializable;
- return new ReadyTransactionReply(
- actorSystem.actorFor(o.getActorPath()).path());
- }
+ @Override
+ public ShardTransactionMessages.ReadyTransactionReply toSerializable() {
+ return ShardTransactionMessages.ReadyTransactionReply.newBuilder().
+ setActorPath(cohortPath).build();
+ }
+
+ public static ReadyTransactionReply fromSerializable(Object serializable) {
+ ShardTransactionMessages.ReadyTransactionReply o =
+ (ShardTransactionMessages.ReadyTransactionReply) serializable;
+
+ return new ReadyTransactionReply(o.getActorPath());
+ }
}
import akka.actor.ActorPath;
import akka.actor.ActorSystem;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
public class RegisterChangeListener implements SerializableMessage {
- public static final Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.RegisterChangeListener.class;
+ public static final Class<ListenerRegistrationMessages.RegisterChangeListener> SERIALIZABLE_CLASS =
+ ListenerRegistrationMessages.RegisterChangeListener.class;
+
private final YangInstanceIdentifier path;
private final ActorPath dataChangeListenerPath;
private final AsyncDataBroker.DataChangeScope scope;
package org.opendaylight.controller.cluster.datastore.messages;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Encoded;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-public class WriteData extends ModifyData{
+public class WriteData extends ModifyData {
- public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.WriteData.class;
+ public static final Class<ShardTransactionMessages.WriteData> SERIALIZABLE_CLASS =
+ ShardTransactionMessages.WriteData.class;
- public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, SchemaContext schemaContext) {
- super(path, data, schemaContext);
- }
-
- @Override public Object toSerializable() {
+ public WriteData(YangInstanceIdentifier path, NormalizedNode<?, ?> data, SchemaContext schemaContext) {
+ super(path, data, schemaContext);
+ }
- NormalizedNodeMessages.Node normalizedNode =
- new NormalizedNodeToNodeCodec(schemaContext).encode(path, data)
- .getNormalizedNode();
+ @Override
+ public Object toSerializable() {
+ Encoded encoded = new NormalizedNodeToNodeCodec(schemaContext).encode(path, data);
return ShardTransactionMessages.WriteData.newBuilder()
- .setInstanceIdentifierPathArguments(InstanceIdentifierUtils.toSerializable(path))
- .setNormalizedNode(normalizedNode).build();
-
+ .setInstanceIdentifierPathArguments(encoded.getEncodedPath())
+ .setNormalizedNode(encoded.getEncodedNode().getNormalizedNode()).build();
}
public static WriteData fromSerializable(Object serializable, SchemaContext schemaContext){
ShardTransactionMessages.WriteData o = (ShardTransactionMessages.WriteData) serializable;
- YangInstanceIdentifier identifier = InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments());
-
- NormalizedNode<?, ?> normalizedNode =
- new NormalizedNodeToNodeCodec(schemaContext)
- .decode(identifier, o.getNormalizedNode());
-
- return new WriteData(identifier, normalizedNode, schemaContext);
+ Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(
+ o.getInstanceIdentifierPathArguments(), o.getNormalizedNode());
+ return new WriteData(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
}
-
}
protected AbstractModification(YangInstanceIdentifier path) {
this.path = path;
}
+
+ public YangInstanceIdentifier getPath() {
+ return path;
+ }
}
package org.opendaylight.controller.cluster.datastore.modification;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
* DeleteModification store all the parameters required to delete a path from the data tree
*/
public class DeleteModification extends AbstractModification {
- public DeleteModification(YangInstanceIdentifier path) {
- super(path);
- }
+ private static final long serialVersionUID = 1L;
- @Override
- public void apply(DOMStoreWriteTransaction transaction) {
- transaction.delete(path);
- }
+ public DeleteModification(YangInstanceIdentifier path) {
+ super(path);
+ }
+
+ @Override
+ public void apply(DOMStoreWriteTransaction transaction) {
+ transaction.delete(path);
+ }
- @Override public Object toSerializable() {
- return PersistentMessages.Modification.newBuilder()
- .setType(this.getClass().toString())
- .setPath(InstanceIdentifierUtils.toSerializable(this.path))
- .build();
+ @Override
+ public Object toSerializable() {
+ return PersistentMessages.Modification.newBuilder().setType(this.getClass().toString())
+ .setPath(InstanceIdentifierUtils.toSerializable(this.path)).build();
}
- public static DeleteModification fromSerializable(Object serializable){
+ public static DeleteModification fromSerializable(Object serializable) {
PersistentMessages.Modification o = (PersistentMessages.Modification) serializable;
return new DeleteModification(InstanceIdentifierUtils.fromSerializable(o.getPath()));
}
package org.opendaylight.controller.cluster.datastore.modification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
/**
* MergeModification stores all the parameters required to merge data into the specified path
*/
-public class MergeModification extends AbstractModification {
- private final NormalizedNode data;
- private final SchemaContext schemaContext;
-
+public class MergeModification extends WriteModification {
public MergeModification(YangInstanceIdentifier path, NormalizedNode data,
SchemaContext schemaContext) {
- super(path);
- this.data = data;
- this.schemaContext = schemaContext;
+ super(path, data, schemaContext);
}
@Override
transaction.merge(path, data);
}
- @Override public Object toSerializable() {
- NormalizedNodeMessages.Container encode =
- new NormalizedNodeToNodeCodec(schemaContext).encode(
- path, data);
-
- return PersistentMessages.Modification.newBuilder()
- .setType(this.getClass().toString())
- .setPath(InstanceIdentifierUtils.toSerializable(this.path))
- .setData(encode.getNormalizedNode())
- .build();
-
- }
-
- public static MergeModification fromSerializable(
- Object serializable,
- SchemaContext schemaContext) {
+ public static MergeModification fromSerializable(Object serializable, SchemaContext schemaContext) {
PersistentMessages.Modification o = (PersistentMessages.Modification) serializable;
-
- YangInstanceIdentifier path = InstanceIdentifierUtils.fromSerializable(o.getPath());
- NormalizedNode data = new NormalizedNodeToNodeCodec(schemaContext).decode(
- path, o.getData());
-
- return new MergeModification(path, data, schemaContext);
+ Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(o.getPath(), o.getData());
+ return new MergeModification(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
}
-
}
package org.opendaylight.controller.cluster.datastore.modification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.datastore.utils.InstanceIdentifierUtils;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Decoded;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec.Encoded;
import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
*/
public class WriteModification extends AbstractModification {
- private final NormalizedNode data;
+ protected final NormalizedNode data;
private final SchemaContext schemaContext;
public WriteModification(YangInstanceIdentifier path, NormalizedNode data, SchemaContext schemaContext) {
- super(path);
- this.data = data;
+ super(path);
+ this.data = data;
this.schemaContext = schemaContext;
}
- @Override
- public void apply(DOMStoreWriteTransaction transaction) {
- transaction.write(path, data);
- }
+ @Override
+ public void apply(DOMStoreWriteTransaction transaction) {
+ transaction.write(path, data);
+ }
- @Override public Object toSerializable() {
- NormalizedNodeMessages.Container encode =
- new NormalizedNodeToNodeCodec(schemaContext).encode(
- path, data);
+ public NormalizedNode getData() {
+ return data;
+ }
+ @Override
+ public Object toSerializable() {
+ Encoded encoded = new NormalizedNodeToNodeCodec(schemaContext).encode(path, data);
return PersistentMessages.Modification.newBuilder()
- .setType(this.getClass().toString())
- .setPath(InstanceIdentifierUtils.toSerializable(this.path))
- .setData(encode.getNormalizedNode())
- .build();
-
+ .setType(this.getClass().toString())
+ .setPath(encoded.getEncodedPath())
+ .setData(encoded.getEncodedNode().getNormalizedNode())
+ .build();
}
- public static WriteModification fromSerializable(
- Object serializable,
- SchemaContext schemaContext) {
+ public static WriteModification fromSerializable(Object serializable, SchemaContext schemaContext) {
PersistentMessages.Modification o = (PersistentMessages.Modification) serializable;
-
- YangInstanceIdentifier path = InstanceIdentifierUtils.fromSerializable(o.getPath());
- NormalizedNode data = new NormalizedNodeToNodeCodec(schemaContext).decode(
- path, o.getData());
-
- return new WriteModification(path, data, schemaContext);
+ Decoded decoded = new NormalizedNodeToNodeCodec(schemaContext).decode(o.getPath(), o.getData());
+ return new WriteModification(decoded.getDecodedPath(), decoded.getDecodedNode(), schemaContext);
}
}
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
-import akka.pattern.Patterns;
+import akka.dispatch.Mapper;
+import akka.pattern.AskTimeoutException;
import akka.util.Timeout;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.Configuration;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
+import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.slf4j.Logger;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
import scala.concurrent.duration.FiniteDuration;
-
import java.util.concurrent.TimeUnit;
-
import static akka.pattern.Patterns.ask;
/**
private static final Logger
LOG = LoggerFactory.getLogger(ActorContext.class);
- private static final FiniteDuration DEFAULT_OPER_DURATION = Duration.create(5, TimeUnit.SECONDS);
-
public static final String MAILBOX = "bounded-mailbox";
+ private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
+ new Mapper<Throwable, Throwable>() {
+ @Override
+ public Throwable apply(Throwable failure) {
+ Throwable actualFailure = failure;
+ if(failure instanceof AskTimeoutException) {
+ // A timeout exception most likely means the shard isn't initialized.
+ actualFailure = new NotInitializedException(
+ "Timed out trying to find the primary shard. Most likely cause is the " +
+ "shard is not initialized yet.");
+ }
+
+ return actualFailure;
+ }
+ };
+
private final ActorSystem actorSystem;
private final ActorRef shardManager;
private final ClusterWrapper clusterWrapper;
private final Configuration configuration;
+ private final DatastoreContext datastoreContext;
private volatile SchemaContext schemaContext;
- private FiniteDuration operationDuration = DEFAULT_OPER_DURATION;
- private Timeout operationTimeout = new Timeout(operationDuration);
+ private final FiniteDuration operationDuration;
+ private final Timeout operationTimeout;
public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
- ClusterWrapper clusterWrapper,
- Configuration configuration) {
+ ClusterWrapper clusterWrapper, Configuration configuration) {
+ this(actorSystem, shardManager, clusterWrapper, configuration,
+ DatastoreContext.newBuilder().build());
+ }
+
+ public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
+ ClusterWrapper clusterWrapper, Configuration configuration,
+ DatastoreContext datastoreContext) {
this.actorSystem = actorSystem;
this.shardManager = shardManager;
this.clusterWrapper = clusterWrapper;
this.configuration = configuration;
+ this.datastoreContext = datastoreContext;
+
+ operationDuration = Duration.create(datastoreContext.getOperationTimeoutInSeconds(),
+ TimeUnit.SECONDS);
+ operationTimeout = new Timeout(operationDuration);
+ }
+
+ public DatastoreContext getDatastoreContext() {
+ return datastoreContext;
}
public ActorSystem getActorSystem() {
}
}
- public void setOperationTimeout(int timeoutInSeconds) {
- operationDuration = Duration.create(timeoutInSeconds, TimeUnit.SECONDS);
- operationTimeout = new Timeout(operationDuration);
- }
-
public SchemaContext getSchemaContext() {
return schemaContext;
}
/**
- * Finds the primary for a given shard
+ * Finds the primary shard for the given shard name
*
* @param shardName
* @return
*/
- public ActorSelection findPrimary(String shardName) {
- String path = findPrimaryPath(shardName);
- return actorSystem.actorSelection(path);
+ public Optional<ActorSelection> findPrimaryShard(String shardName) {
+ String path = findPrimaryPathOrNull(shardName);
+ if (path == null){
+ return Optional.absent();
+ }
+ return Optional.of(actorSystem.actorSelection(path));
+ }
+
+ public Future<ActorSelection> findPrimaryShardAsync(final String shardName) {
+ Future<Object> future = executeOperationAsync(shardManager,
+ new FindPrimary(shardName, true).toSerializable(),
+ datastoreContext.getShardInitializationTimeout());
+
+ return future.transform(new Mapper<Object, ActorSelection>() {
+ @Override
+ public ActorSelection checkedApply(Object response) throws Exception {
+ if(response.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
+ PrimaryFound found = PrimaryFound.fromSerializable(response);
+
+ LOG.debug("Primary found {}", found.getPrimaryPath());
+ return actorSystem.actorSelection(found.getPrimaryPath());
+ } else if(response instanceof ActorNotInitialized) {
+ throw new NotInitializedException(
+ String.format("Found primary shard %s but it's not initialized yet. " +
+ "Please try again later", shardName));
+ } else if(response instanceof PrimaryNotFound) {
+ throw new PrimaryNotFoundException(
+ String.format("No primary shard found for %S.", shardName));
+ }
+
+ throw new UnknownMessageException(String.format(
+ "FindPrimary returned unkown response: %s", response));
+ }
+ }, FIND_PRIMARY_FAILURE_TRANSFORMER, getActorSystem().dispatcher());
}
/**
- * Finds a local shard given it's shard name and return it's ActorRef
+ * Finds a local shard given its shard name and return it's ActorRef
*
* @param shardName the name of the local shard that needs to be found
* @return a reference to a local shard actor which represents the shard
* specified by the shardName
*/
- public ActorRef findLocalShard(String shardName) {
- Object result = executeLocalOperation(shardManager,
- new FindLocalShard(shardName));
+ public Optional<ActorRef> findLocalShard(String shardName) {
+ Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
if (result instanceof LocalShardFound) {
LocalShardFound found = (LocalShardFound) result;
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Local shard found {}", found.getPath());
- }
- return found.getPath();
+ LOG.debug("Local shard found {}", found.getPath());
+ return Optional.of(found.getPath());
}
- return null;
+ return Optional.absent();
}
+ /**
+ * Finds a local shard async given its shard name and return a Future from which to obtain the
+ * ActorRef.
+ *
+ * @param shardName the name of the local shard that needs to be found
+ */
+ public Future<ActorRef> findLocalShardAsync( final String shardName) {
+ Future<Object> future = executeOperationAsync(shardManager,
+ new FindLocalShard(shardName, true), datastoreContext.getShardInitializationTimeout());
+
+ return future.map(new Mapper<Object, ActorRef>() {
+ @Override
+ public ActorRef checkedApply(Object response) throws Throwable {
+ if(response instanceof LocalShardFound) {
+ LocalShardFound found = (LocalShardFound)response;
+ LOG.debug("Local shard found {}", found.getPath());
+ return found.getPath();
+ } else if(response instanceof ActorNotInitialized) {
+ throw new NotInitializedException(
+ String.format("Found local shard for %s but it's not initialized yet.",
+ shardName));
+ } else if(response instanceof LocalShardNotFound) {
+ throw new LocalShardNotFoundException(
+ String.format("Local shard for %s does not exist.", shardName));
+ }
+
+ throw new UnknownMessageException(String.format(
+ "FindLocalShard returned unkown response: %s", response));
+ }
+ }, getActorSystem().dispatcher());
+ }
- public String findPrimaryPath(String shardName) {
- Object result = executeLocalOperation(shardManager,
- new FindPrimary(shardName).toSerializable());
+ private String findPrimaryPathOrNull(String shardName) {
+ Object result = executeOperation(shardManager, new FindPrimary(shardName, false).toSerializable());
if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
PrimaryFound found = PrimaryFound.fromSerializable(result);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Primary found {}", found.getPrimaryPath());
- }
+ LOG.debug("Primary found {}", found.getPrimaryPath());
return found.getPrimaryPath();
+
+ } else if (result.getClass().equals(ActorNotInitialized.class)){
+ throw new NotInitializedException(
+ String.format("Found primary shard[%s] but its not initialized yet. Please try again later", shardName)
+ );
+
+ } else {
+ return null;
}
- throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName);
}
* @param message
* @return The response of the operation
*/
- public Object executeLocalOperation(ActorRef actor, Object message) {
- Future<Object> future = ask(actor, message, operationTimeout);
+ public Object executeOperation(ActorRef actor, Object message) {
+ Future<Object> future = executeOperationAsync(actor, message, operationTimeout);
try {
return Await.result(future, operationDuration);
} catch (Exception e) {
- throw new TimeoutException("Sending message " + message.getClass().toString() + " to actor " + actor.toString() + " failed" , e);
+ throw new TimeoutException("Sending message " + message.getClass().toString() +
+ " to actor " + actor.toString() + " failed. Try again later.", e);
}
}
+ public Future<Object> executeOperationAsync(ActorRef actor, Object message, Timeout timeout) {
+ Preconditions.checkArgument(actor != null, "actor must not be null");
+ Preconditions.checkArgument(message != null, "message must not be null");
+
+ LOG.debug("Sending message {} to {}", message.getClass().toString(), actor.toString());
+ return ask(actor, message, timeout);
+ }
+
/**
* Execute an operation on a remote actor and wait for it's response
*
* @param message
* @return
*/
- public Object executeRemoteOperation(ActorSelection actor, Object message) {
-
- if(LOG.isDebugEnabled()) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
- actor.toString());
- }
- Future<Object> future = ask(actor, message, operationTimeout);
+ public Object executeOperation(ActorSelection actor, Object message) {
+ Future<Object> future = executeOperationAsync(actor, message);
try {
return Await.result(future, operationDuration);
} catch (Exception e) {
throw new TimeoutException("Sending message " + message.getClass().toString() +
- " to actor " + actor.toString() + " failed" , e);
+ " to actor " + actor.toString() + " failed. Try again later.", e);
}
}
*
* @param actor the ActorSelection
* @param message the message to send
+ * @param timeout the operation timeout
* @return a Future containing the eventual result
*/
- public Future<Object> executeRemoteOperationAsync(ActorSelection actor, Object message) {
+ public Future<Object> executeOperationAsync(ActorSelection actor, Object message,
+ Timeout timeout) {
+ Preconditions.checkArgument(actor != null, "actor must not be null");
+ Preconditions.checkArgument(message != null, "message must not be null");
- if(LOG.isDebugEnabled()) {
- LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
- }
- return ask(actor, message, operationTimeout);
+ LOG.debug("Sending message {} to {}", message.getClass().toString(), actor.toString());
+
+ return ask(actor, message, timeout);
}
/**
- * Sends an operation to be executed by a remote actor asynchronously without waiting for a
- * reply (essentially set and forget).
+ * Execute an operation on a remote actor asynchronously.
*
* @param actor the ActorSelection
* @param message the message to send
+ * @return a Future containing the eventual result
*/
- public void sendRemoteOperationAsync(ActorSelection actor, Object message) {
- actor.tell(message, ActorRef.noSender());
- }
-
- public void sendShardOperationAsync(String shardName, Object message) {
- ActorSelection primary = findPrimary(shardName);
-
- primary.tell(message, ActorRef.noSender());
- }
-
-
- /**
- * Execute an operation on the primary for a given shard
- * <p>
- * This method first finds the primary for a given shard ,then sends
- * the message to the remote shard and waits for a response
- * </p>
- *
- * @param shardName
- * @param message
- * @return
- * @throws org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException if the message to the remote shard times out
- * @throws org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException if the primary shard is not found
- */
- public Object executeShardOperation(String shardName, Object message) {
- ActorSelection primary = findPrimary(shardName);
-
- return executeRemoteOperation(primary, message);
+ public Future<Object> executeOperationAsync(ActorSelection actor, Object message) {
+ return executeOperationAsync(actor, message, operationTimeout);
}
/**
- * Execute an operation on the the local shard only
- * <p>
- * This method first finds the address of the local shard if any. It then
- * executes the operation on it.
- * </p>
+ * Sends an operation to be executed by a remote actor asynchronously without waiting for a
+ * reply (essentially set and forget).
*
- * @param shardName the name of the shard on which the operation needs to be executed
- * @param message the message that needs to be sent to the shard
- * @return the message that was returned by the local actor on which the
- * the operation was executed. If a local shard was not found then
- * null is returned
- * @throws org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException
- * if the operation does not complete in a specified time duration
+ * @param actor the ActorSelection
+ * @param message the message to send
*/
- public Object executeLocalShardOperation(String shardName, Object message) {
- ActorRef local = findLocalShard(shardName);
-
- if(local != null) {
- return executeLocalOperation(local, message);
- }
-
- return null;
- }
+ public void sendOperationAsync(ActorSelection actor, Object message) {
+ Preconditions.checkArgument(actor != null, "actor must not be null");
+ Preconditions.checkArgument(message != null, "message must not be null");
+ LOG.debug("Sending message {} to {}", message.getClass().toString(), actor.toString());
- /**
- * Execute an operation on the the local shard only asynchronously
- *
- * <p>
- * This method first finds the address of the local shard if any. It then
- * executes the operation on it.
- * </p>
- *
- * @param shardName the name of the shard on which the operation needs to be executed
- * @param message the message that needs to be sent to the shard
- * @param timeout the amount of time that this method should wait for a response before timing out
- * @return null if the shard could not be located else a future on which the caller can wait
- *
- */
- public Future executeLocalShardOperationAsync(String shardName, Object message, Timeout timeout) {
- ActorRef local = findLocalShard(shardName);
- if(local == null){
- return null;
- }
- return Patterns.ask(local, message, timeout);
+ actor.tell(message, ActorRef.noSender());
}
-
-
public void shutdown() {
shardManager.tell(PoisonPill.getInstance(), null);
actorSystem.shutdown();
}
- /**
- * @deprecated Need to stop using this method. There are ways to send a
- * remote ActorRef as a string which should be used instead of this hack
- *
- * @param primaryPath
- * @param localPathOfRemoteActor
- * @return
- */
- @Deprecated
- public String resolvePath(final String primaryPath,
- final String localPathOfRemoteActor) {
- StringBuilder builder = new StringBuilder();
- String[] primaryPathElements = primaryPath.split("/");
- builder.append(primaryPathElements[0]).append("//")
- .append(primaryPathElements[1]).append(primaryPathElements[2]);
- String[] remotePathElements = localPathOfRemoteActor.split("/");
- for (int i = 3; i < remotePathElements.length; i++) {
- builder.append("/").append(remotePathElements[i]);
- }
-
- return builder.toString();
-
- }
-
- public ActorPath actorFor(String path){
- return actorSystem.actorFor(path).path();
+ public ClusterWrapper getClusterWrapper() {
+ return clusterWrapper;
}
public String getCurrentMemberName(){
*/
public void broadcast(Object message){
for(String shardName : configuration.getAllShardNames()){
- try {
- sendShardOperationAsync(shardName, message);
- } catch(Exception e){
- LOG.warn("broadcast failed to send message " + message.getClass().getSimpleName() + " to shard " + shardName, e);
+
+ Optional<ActorSelection> primary = findPrimaryShard(shardName);
+ if (primary.isPresent()) {
+ primary.get().tell(message, ActorRef.noSender());
+ } else {
+ LOG.warn("broadcast failed to send message {} to shard {}. Primary not found",
+ message.getClass().getSimpleName(), shardName);
}
}
}
public FiniteDuration getOperationDuration() {
return operationDuration;
}
+
+ public boolean isLocalPath(String path) {
+ String selfAddress = clusterWrapper.getSelfAddress();
+ if (path == null || selfAddress == null) {
+ return false;
+ }
+
+ int atIndex1 = path.indexOf("@");
+ int atIndex2 = selfAddress.indexOf("@");
+
+ if (atIndex1 == -1 || atIndex2 == -1) {
+ return false;
+ }
+
+ int slashIndex1 = path.indexOf("/", atIndex1);
+ int slashIndex2 = selfAddress.indexOf("/", atIndex2);
+
+ if (slashIndex1 == -1 || slashIndex2 == -1) {
+ return false;
+ }
+
+ String hostPort1 = path.substring(atIndex1, slashIndex1);
+ String hostPort2 = selfAddress.substring(atIndex2, slashIndex2);
+
+ return hostPort1.equals(hostPort2);
+ }
}
+++ /dev/null
-package org.opendaylight.controller.cluster.datastore.utils;
-
-import org.opendaylight.controller.cluster.datastore.node.utils.NodeIdentifierFactory;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * @author: syedbahm
- */
-public class InstanceIdentifierUtils {
-
- protected static final Logger logger = LoggerFactory
- .getLogger(InstanceIdentifierUtils.class);
-
- public static String getParentPath(String currentElementPath) {
-
- StringBuilder parentPath = new StringBuilder();
-
- if (currentElementPath != null) {
- String[] parentPaths = currentElementPath.split("/");
- if (parentPaths.length > 2) {
- for (int i = 0; i < parentPaths.length - 1; i++) {
- if (parentPaths[i].length() > 0) {
- parentPath.append( "/");
- parentPath.append( parentPaths[i]);
- }
- }
- }
- }
- return parentPath.toString();
- }
-
- @Deprecated
- public static YangInstanceIdentifier from(String path) {
- String[] ids = path.split("/");
-
- List<YangInstanceIdentifier.PathArgument> pathArguments =
- new ArrayList<>();
- for (String nodeId : ids) {
- if (!"".equals(nodeId)) {
- pathArguments
- .add(NodeIdentifierFactory.getArgument(nodeId));
- }
- }
- final YangInstanceIdentifier instanceIdentifier =
- YangInstanceIdentifier.create(pathArguments);
- return instanceIdentifier;
- }
-
- /**
- * @deprecated Use {@link org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils} instead
- * @param path
- * @return
- */
- @Deprecated
- public static NormalizedNodeMessages.InstanceIdentifier toSerializable(YangInstanceIdentifier path){
- return org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils.toSerializable(path);
- }
-
- /**
- * @deprecated Use {@link org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils} instead
- * @param path
- * @return
- */
- @Deprecated
- public static YangInstanceIdentifier fromSerializable(NormalizedNodeMessages.InstanceIdentifier path){
- return org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils.fromSerializable(path);
- }
-}
props = new ConfigProperties();
}
- DatastoreContext datastoreContext = new DatastoreContext("DistributedConfigDatastore",
- InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue()),
- Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
- TimeUnit.MINUTES),
- props.getOperationTimeoutInSeconds().getValue());
+ DatastoreContext datastoreContext = DatastoreContext.newBuilder()
+ .dataStoreMXBeanType("DistributedConfigDatastore")
+ .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
+ props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+ props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+ props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+ props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
+ .shardTransactionIdleTimeout(Duration.create(
+ props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
+ .shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
+ getValue().intValue())
+ .shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
+ .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
+ .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardTransactionCommitTimeoutInSeconds(
+ props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
+ .shardTransactionCommitQueueCapacity(
+ props.getShardTransactionCommitQueueCapacity().getValue().intValue())
+ .persistent(props.getPersistent().booleanValue())
+ .build();
return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
datastoreContext, bundleContext);
props = new OperationalProperties();
}
- DatastoreContext datastoreContext = new DatastoreContext("DistributedOperationalDatastore",
- InMemoryDOMDataStoreConfigProperties.create(
- props.getMaxShardDataChangeExecutorPoolSize().getValue(),
- props.getMaxShardDataChangeExecutorQueueSize().getValue(),
- props.getMaxShardDataChangeListenerQueueSize().getValue(),
- props.getMaxShardDataStoreExecutorQueueSize().getValue()),
- Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
- TimeUnit.MINUTES),
- props.getOperationTimeoutInSeconds().getValue());
+ DatastoreContext datastoreContext = DatastoreContext.newBuilder()
+ .dataStoreMXBeanType("DistributedOperationalDatastore")
+ .dataStoreProperties(InMemoryDOMDataStoreConfigProperties.create(
+ props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+ props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+ props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+ props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()))
+ .shardTransactionIdleTimeout(Duration.create(
+ props.getShardTransactionIdleTimeoutInMinutes().getValue(), TimeUnit.MINUTES))
+ .operationTimeoutInSeconds(props.getOperationTimeoutInSeconds().getValue())
+ .shardJournalRecoveryLogBatchSize(props.getShardJournalRecoveryLogBatchSize().
+ getValue().intValue())
+ .shardSnapshotBatchCount(props.getShardSnapshotBatchCount().getValue().intValue())
+ .shardHeartbeatIntervalInMillis(props.getShardHearbeatIntervalInMillis().getValue())
+ .shardInitializationTimeout(props.getShardInitializationTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardLeaderElectionTimeout(props.getShardLeaderElectionTimeoutInSeconds().getValue(),
+ TimeUnit.SECONDS)
+ .shardTransactionCommitTimeoutInSeconds(
+ props.getShardTransactionCommitTimeoutInSeconds().getValue().intValue())
+ .shardTransactionCommitQueueCapacity(
+ props.getShardTransactionCommitQueueCapacity().getValue().intValue())
+ .persistent(props.getPersistent().booleanValue())
+ .build();
return DistributedDataStoreFactory.createInstance("operational",
getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
config:java-name-prefix DistributedOperationalDataStoreProvider;
}
- typedef non-zero-uint16-type {
- type uint16 {
+ typedef non-zero-uint32-type {
+ type uint32 {
range "1..max";
}
}
}
}
+ typedef heartbeat-interval-type {
+ type uint16 {
+ range "100..max";
+ }
+ }
+
grouping data-store-properties {
leaf max-shard-data-change-executor-queue-size {
default 1000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum queue size for each shard's data store data change notification executor.";
}
leaf max-shard-data-change-executor-pool-size {
default 20;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum thread pool size for each shard's data store data change notification executor.";
}
leaf max-shard-data-change-listener-queue-size {
default 1000;
- type non-zero-uint16-type;
- description "The maximum queue size for each shard's data store data change listeners.";
+ type non-zero-uint32-type;
+ description "The maximum queue size for each shard's data store data change listener.";
}
leaf max-shard-data-store-executor-queue-size {
default 5000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum queue size for each shard's data store executor.";
}
leaf shard-transaction-idle-timeout-in-minutes {
default 10;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
}
+ leaf shard-snapshot-batch-count {
+ default 20000;
+ type non-zero-uint32-type;
+ description "The minimum number of entries to be present in the in-memory journal log before a snapshot to be taken.";
+ }
+
+ leaf shard-hearbeat-interval-in-millis {
+ default 500;
+ type heartbeat-interval-type;
+ description "The interval at which a shard will send a heart beat message to its remote shard.";
+ }
+
leaf operation-timeout-in-seconds {
default 5;
type operation-timeout-type;
description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
}
+ leaf shard-journal-recovery-log-batch-size {
+ default 5000;
+ type non-zero-uint32-type;
+ description "The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.";
+ }
+
+ leaf shard-transaction-commit-timeout-in-seconds {
+ default 30;
+ type non-zero-uint32-type;
+ description "The maximum amount of time a shard transaction three-phase commit can be idle without receiving the next messages before it aborts the transaction";
+ }
+
+ leaf shard-transaction-commit-queue-capacity {
+ default 20000;
+ type non-zero-uint32-type;
+ description "The maximum allowed capacity for each shard's transaction commit queue.";
+ }
+
+ leaf shard-initialization-timeout-in-seconds {
+ default 300; // 5 minutes
+ type non-zero-uint32-type;
+ description "The maximum amount of time to wait for a shard to initialize from persistence
+ on startup before failing an operation (eg transaction create and change
+ listener registration).";
+ }
+
+ leaf shard-leader-election-timeout-in-seconds {
+ default 30;
+ type non-zero-uint32-type;
+ description "The maximum amount of time to wait for a shard to elect a leader before failing
+ an operation (eg transaction create).";
+ }
+
leaf enable-metric-capture {
default false;
type boolean;
leaf bounded-mailbox-capacity {
default 1000;
- type non-zero-uint16-type;
+ type non-zero-uint32-type;
description "Max queue size that an actor's mailbox can reach";
}
+
+ leaf persistent {
+ default true;
+ type boolean;
+ description "Enable or disable data persistence";
+ }
}
// Augments the 'configuration' choice node under modules/module.
import akka.actor.ActorSystem;
import akka.testkit.JavaTestKit;
-import org.apache.commons.io.FileUtils;
+
import org.junit.AfterClass;
import org.junit.BeforeClass;
-import java.io.File;
import java.io.IOException;
public abstract class AbstractActorTest {
System.setProperty("shard.persistent", "false");
system = ActorSystem.create("test");
-
- deletePersistenceFiles();
}
@AfterClass
public static void tearDownClass() throws IOException {
JavaTestKit.shutdownActorSystem(system);
system = null;
-
- deletePersistenceFiles();
- }
-
- protected static void deletePersistenceFiles() throws IOException {
- File journal = new File("journal");
-
- if(journal.exists()) {
- FileUtils.deleteDirectory(journal);
- }
-
- File snapshots = new File("snapshots");
-
- if(snapshots.exists()){
- FileUtils.deleteDirectory(snapshots);
- }
-
}
protected ActorSystem getSystem() {
return system;
}
-
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.Props;
-import akka.event.Logging;
-import akka.testkit.JavaTestKit;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.messages.WriteData;
-import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.Collections;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-
-public class BasicIntegrationTest extends AbstractActorTest {
-
- @Test
- public void integrationTest() throws Exception{
- // System.setProperty("shard.persistent", "true");
- // This test will
- // - create a Shard
- // - initiate a transaction
- // - write something
- // - read the transaction for commit
- // - commit the transaction
-
-
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final SchemaContext schemaContext = TestModel.createTestContext();
- DatastoreContext datastoreContext = new DatastoreContext();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext());
- final ActorRef shard = getSystem().actorOf(props);
-
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
- shard.tell(new UpdateSchemaContext(schemaContext), getRef());
-
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(shard.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
-
- assertEquals(true, result);
-
- // Create a transaction on the shard
- shard.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.WRITE_ONLY.ordinal() ).toSerializable(), getRef());
-
- final ActorSelection transaction =
- new ExpectMsg<ActorSelection>(duration("3 seconds"), "CreateTransactionReply") {
- @Override
- protected ActorSelection match(Object in) {
- if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(in.getClass())) {
- CreateTransactionReply reply = CreateTransactionReply.fromSerializable(in);
- return getSystem()
- .actorSelection(reply
- .getTransactionPath());
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertNotNull(transaction);
-
- System.out.println("Successfully created transaction");
-
- // 3. Write some data
- transaction.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), schemaContext).toSerializable(),
- getRef());
-
- Boolean writeDone = new ExpectMsg<Boolean>(duration("3 seconds"), "WriteDataReply") {
- @Override
- protected Boolean match(Object in) {
- if (in.getClass().equals(WriteDataReply.SERIALIZABLE_CLASS)) {
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertTrue(writeDone);
-
- System.out.println("Successfully wrote data");
-
- // 4. Ready the transaction for commit
-
- transaction.tell(new ReadyTransaction().toSerializable(), getRef());
-
- final ActorSelection cohort =
- new ExpectMsg<ActorSelection>(duration("3 seconds"), "ReadyTransactionReply") {
- @Override
- protected ActorSelection match(Object in) {
- if (in.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- ActorPath cohortPath =
- ReadyTransactionReply.fromSerializable(getSystem(),in)
- .getCohortPath();
- return getSystem()
- .actorSelection(cohortPath);
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertNotNull(cohort);
-
- System.out.println("Successfully readied the transaction");
-
- // 5. PreCommit the transaction
-
- cohort.tell(new PreCommitTransaction().toSerializable(), getRef());
-
- Boolean preCommitDone =
- new ExpectMsg<Boolean>(duration("3 seconds"), "PreCommitTransactionReply") {
- @Override
- protected Boolean match(Object in) {
- if (in.getClass().equals(PreCommitTransactionReply.SERIALIZABLE_CLASS)) {
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertTrue(preCommitDone);
-
- System.out.println("Successfully pre-committed the transaction");
-
- // 6. Commit the transaction
- cohort.tell(new CommitTransaction().toSerializable(), getRef());
-
- // FIXME : Add assertions that the commit worked and that the cohort and transaction actors were terminated
-
- System.out.println("TODO : Check Successfully committed the transaction");
- }
-
-
- };
- }
-
- private ActorRef watchActor(ActorSelection actor) {
- Future<ActorRef> future = actor
- .resolveOne(FiniteDuration.apply(100, "milliseconds"));
-
- try {
- ActorRef actorRef = Await.result(future,
- FiniteDuration.apply(100, "milliseconds"));
-
- watch(actorRef);
-
- return actorRef;
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
-
- }
- };
-
-
- }
-}
ActorContext
testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)), new MockClusterWrapper(), new MockConfiguration());
Object messages = testContext
- .executeLocalOperation(actorRef, "messages");
+ .executeOperation(actorRef, "messages");
Assert.assertNotNull(messages);
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
package org.opendaylight.controller.cluster.datastore;
+import java.util.concurrent.TimeUnit;
import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
import akka.actor.Props;
-import junit.framework.Assert;
+import akka.actor.Terminated;
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import org.junit.Assert;
import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-import org.opendaylight.controller.cluster.datastore.utils.MessageCollectorActor;
-import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.eq;
-import java.util.List;
+/**
+ * Unit tests for DataChangeListenerRegistrationProxy.
+ *
+ * @author Thomas Pantelis
+ */
+public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest {
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertNotNull;
-import static junit.framework.TestCase.assertTrue;
+ @SuppressWarnings("unchecked")
+ private final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> mockListener =
+ Mockito.mock(AsyncDataChangeListener.class);
-public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{
+ @Test
+ public void testGetInstance() throws Exception {
+ DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard", Mockito.mock(ActorContext.class), mockListener);
+
+ Assert.assertEquals(mockListener, proxy.getInstance());
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test(timeout=10000)
+ public void testSuccessfulRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
+
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
+
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardFound(getRef()));
+
+ RegisterChangeListener registerMsg = expectMsgClass(timeout, RegisterChangeListener.class);
+ Assert.assertEquals("getPath", path, registerMsg.getPath());
+ Assert.assertEquals("getScope", scope, registerMsg.getScope());
+
+ reply(new RegisterChangeListenerReply(getRef().path()));
+
+ for(int i = 0; (i < 20 * 5) && proxy.getListenerRegistrationActor() == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.assertEquals("getListenerRegistrationActor", getSystem().actorSelection(getRef().path()),
+ proxy.getListenerRegistrationActor());
+
+ watch(proxy.getDataChangeListenerActor());
- private ActorRef dataChangeListenerActor = getSystem().actorOf(Props.create(DoNothingActor.class));
+ proxy.close();
- private static class MockDataChangeListener implements
- AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+ // The listener registration actor should get a Close message
+ expectMsgClass(timeout, CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS);
- @Override public void onDataChanged(
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- throw new UnsupportedOperationException("onDataChanged");
- }
+ // The DataChangeListener actor should be terminated
+ expectMsgClass(timeout, Terminated.class);
+
+ proxy.close();
+
+ expectNoMsg();
+ }};
}
- @Test
- public void testGetInstance() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ @Test(timeout=10000)
+ public void testLocalShardNotFound() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
- MockDataChangeListener listener =
- new MockDataChangeListener();
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- getSystem().actorSelection(actorRef.path()),
- listener, dataChangeListenerActor);
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
- Assert.assertEquals(listener, proxy.getInstance());
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new LocalShardNotFound("shard-1"));
+
+ expectNoMsg(duration("1 seconds"));
+ }};
}
- @Test
- public void testClose() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ @Test(timeout=10000)
+ public void testLocalShardNotInitialized() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = new ActorContext(getSystem(), getRef(),
+ mock(ClusterWrapper.class), mock(Configuration.class));
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- getSystem().actorSelection(actorRef.path()),
- new MockDataChangeListener(), dataChangeListenerActor);
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ "shard-1", actorContext, mockListener);
- proxy.close();
+ final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+ final DataChangeScope scope = AsyncDataBroker.DataChangeScope.ONE;
+ new Thread() {
+ @Override
+ public void run() {
+ proxy.init(path, scope);
+ }
+
+ }.start();
+
+ FiniteDuration timeout = duration("5 seconds");
+ FindLocalShard findLocalShard = expectMsgClass(timeout, FindLocalShard.class);
+ Assert.assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+
+ reply(new ActorNotInitialized());
+
+ new Within(duration("1 seconds")) {
+ @Override
+ protected void run() {
+ expectNoMsg();
+ }
+ };
+ }};
+ }
+
+ @Test
+ public void testFailedRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorSystem mockActorSystem = mock(ActorSystem.class);
- //Check if it was received by the remote actor
- ActorContext
- testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
- Object messages = testContext
- .executeLocalOperation(actorRef, "messages");
+ ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class),
+ "testFailedRegistration");
+ doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
+ ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(
+ MoreExecutors.sameThreadExecutor());
+ doReturn(executor).when(mockActorSystem).dispatcher();
- assertNotNull(messages);
+ ActorContext actorContext = mock(ActorContext.class);
- assertTrue(messages instanceof List);
+ String shardName = "shard-1";
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ shardName, actorContext, mockListener);
- List<Object> listMessages = (List<Object>) messages;
+ doReturn(mockActorSystem).when(actorContext).getActorSystem();
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
+ doReturn(Futures.failed(new RuntimeException("mock"))).
+ when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
- assertEquals(1, listMessages.size());
+ proxy.init(YangInstanceIdentifier.of(TestModel.TEST_QNAME),
+ AsyncDataBroker.DataChangeScope.ONE);
- assertTrue(listMessages.get(0).getClass()
- .equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
}
+ @SuppressWarnings("unchecked")
@Test
- public void testCloseWhenRegistrationIsNull() throws Exception {
- final Props props = Props.create(MessageCollectorActor.class);
- final ActorRef actorRef = getSystem().actorOf(props);
+ public void testCloseBeforeRegistration() {
+ new JavaTestKit(getSystem()) {{
+ ActorContext actorContext = mock(ActorContext.class);
- DataChangeListenerRegistrationProxy proxy =
- new DataChangeListenerRegistrationProxy(
- new MockDataChangeListener(), dataChangeListenerActor);
+ String shardName = "shard-1";
+ final DataChangeListenerRegistrationProxy proxy = new DataChangeListenerRegistrationProxy(
+ shardName, actorContext, mockListener);
- proxy.close();
+ doReturn(DatastoreContext.newBuilder().build()).when(actorContext).getDatastoreContext();
+ doReturn(getSystem()).when(actorContext).getActorSystem();
+ doReturn(getSystem().actorSelection(getRef().path())).
+ when(actorContext).actorSelection(getRef().path());
+ doReturn(duration("5 seconds")).when(actorContext).getOperationDuration();
+ doReturn(Futures.successful(getRef())).when(actorContext).findLocalShardAsync(eq(shardName));
- //Check if it was received by the remote actor
- ActorContext
- testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
- Object messages = testContext
- .executeLocalOperation(actorRef, "messages");
+ Answer<Future<Object>> answer = new Answer<Future<Object>>() {
+ @Override
+ public Future<Object> answer(InvocationOnMock invocation) {
+ proxy.close();
+ return Futures.successful((Object)new RegisterChangeListenerReply(getRef().path()));
+ }
+ };
- assertNotNull(messages);
+ doAnswer(answer).when(actorContext).executeOperationAsync(any(ActorRef.class),
+ any(Object.class), any(Timeout.class));
- assertTrue(messages instanceof List);
+ proxy.init(YangInstanceIdentifier.of(TestModel.TEST_QNAME),
+ AsyncDataBroker.DataChangeScope.ONE);
- List<Object> listMessages = (List<Object>) messages;
+ expectMsgClass(duration("5 seconds"), CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS);
- assertEquals(0, listMessages.size());
+ Assert.assertEquals("getListenerRegistrationActor", null,
+ proxy.getListenerRegistrationActor());
+ }};
}
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
+import akka.actor.DeadLetter;
import akka.actor.Props;
import akka.testkit.JavaTestKit;
import org.junit.Test;
+import org.mockito.Mockito;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
import org.opendaylight.controller.md.cluster.datastore.model.CompositeModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
public class DataChangeListenerTest extends AbstractActorTest {
- private static class MockDataChangedEvent implements AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> createdData = new HashMap<>();
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> updatedData = new HashMap<>();
- Map<YangInstanceIdentifier,NormalizedNode<?,?>> originalData = new HashMap<>();
-
-
-
- @Override
- public Map<YangInstanceIdentifier, NormalizedNode<?, ?>> getCreatedData() {
- createdData.put(CompositeModel.FAMILY_PATH, CompositeModel.createFamily());
- return createdData;
- }
-
- @Override
- public Map<YangInstanceIdentifier, NormalizedNode<?, ?>> getUpdatedData() {
- updatedData.put(CompositeModel.FAMILY_PATH, CompositeModel.createFamily());
- return updatedData;
-
- }
-
- @Override
- public Set<YangInstanceIdentifier> getRemovedPaths() {
- Set<YangInstanceIdentifier>ids = new HashSet();
- ids.add( CompositeModel.TEST_PATH);
- return ids;
- }
-
- @Override
- public Map<YangInstanceIdentifier, NormalizedNode<?, ?>> getOriginalData() {
- originalData.put(CompositeModel.FAMILY_PATH, CompositeModel.createFamily());
- return originalData;
- }
-
- @Override public NormalizedNode<?, ?> getOriginalSubtree() {
-
-
- return originalData.put(CompositeModel.FAMILY_PATH, CompositeModel.createFamily());
- }
-
- @Override public NormalizedNode<?, ?> getUpdatedSubtree() {
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ @Test
+ public void testDataChangedWhenNotificationsAreEnabled(){
+ new JavaTestKit(getSystem()) {{
+ final AsyncDataChangeEvent mockChangeEvent = Mockito.mock(AsyncDataChangeEvent.class);
+ final AsyncDataChangeListener mockListener = Mockito.mock(AsyncDataChangeListener.class);
+ final Props props = DataChangeListener.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataChangedNotificationsEnabled");
- //fixme: need to have some valid data here
- return originalData.put(CompositeModel.FAMILY_PATH, CompositeModel.createFamily());
- }
- }
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), getRef());
- private class MockDataChangeListener implements AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> {
- private boolean gotIt = false;
- private AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change;
+ subject.tell(new DataChanged(CompositeModel.createTestContext(), mockChangeEvent),
+ getRef());
- @Override public void onDataChanged(
- AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- gotIt = true;this.change=change;
- }
+ expectMsgClass(DataChangedReply.class);
- public boolean gotIt() {
- return gotIt;
- }
- public AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> getChange(){
- return change;
- }
+ Mockito.verify(mockListener).onDataChanged(mockChangeEvent);
+ }};
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
- public void testDataChangedWhenNotificationsAreEnabled(){
+ public void testDataChangedWhenNotificationsAreDisabled(){
new JavaTestKit(getSystem()) {{
- final MockDataChangeListener listener = new MockDataChangeListener();
- final Props props = DataChangeListener.props(listener);
+ final AsyncDataChangeEvent mockChangeEvent = Mockito.mock(AsyncDataChangeEvent.class);
+ final AsyncDataChangeListener mockListener = Mockito.mock(AsyncDataChangeListener.class);
+ final Props props = DataChangeListener.props(mockListener);
final ActorRef subject =
- getSystem().actorOf(props, "testDataChangedNotificationsEnabled");
+ getSystem().actorOf(props, "testDataChangedNotificationsDisabled");
+
+ subject.tell(new DataChanged(CompositeModel.createTestContext(), mockChangeEvent),
+ getRef());
new Within(duration("1 seconds")) {
@Override
protected void run() {
-
- // Let the DataChangeListener know that notifications should
- // be enabled
- subject.tell(new EnableNotification(true), getRef());
-
- subject.tell(
- new DataChanged(CompositeModel.createTestContext(),new MockDataChangedEvent()),
- getRef());
-
- final Boolean out = new ExpectMsg<Boolean>(duration("800 millis"), "dataChanged") {
- // do not put code outside this method, will run afterwards
- @Override
- protected Boolean match(Object in) {
- if (in != null && in.getClass().equals(DataChangedReply.class)) {
-
- return true;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertTrue(out);
- assertTrue(listener.gotIt());
- assertNotNull(listener.getChange().getCreatedData());
-
expectNoMsg();
+
+ Mockito.verify(mockListener, Mockito.never()).onDataChanged(
+ Mockito.any(AsyncDataChangeEvent.class));
}
};
}};
}
+ @SuppressWarnings({ "rawtypes", "unchecked" })
@Test
- public void testDataChangedWhenNotificationsAreDisabled(){
+ public void testDataChangedWithNoSender(){
new JavaTestKit(getSystem()) {{
- final MockDataChangeListener listener = new MockDataChangeListener();
- final Props props = DataChangeListener.props(listener);
- final ActorRef subject =
- getSystem().actorOf(props, "testDataChangedNotificationsDisabled");
+ final AsyncDataChangeEvent mockChangeEvent = Mockito.mock(AsyncDataChangeEvent.class);
+ final AsyncDataChangeListener mockListener = Mockito.mock(AsyncDataChangeListener.class);
+ final Props props = DataChangeListener.props(mockListener);
+ final ActorRef subject = getSystem().actorOf(props, "testDataChangedWithNoSender");
+ // Let the DataChangeListener know that notifications should be enabled
+ subject.tell(new EnableNotification(true), ActorRef.noSender());
+
+ subject.tell(new DataChanged(CompositeModel.createTestContext(), mockChangeEvent),
+ ActorRef.noSender());
+
+ getSystem().eventStream().subscribe(getRef(), DeadLetter.class);
new Within(duration("1 seconds")) {
@Override
protected void run() {
-
- subject.tell(
- new DataChanged(CompositeModel.createTestContext(),new MockDataChangedEvent()),
- getRef());
-
expectNoMsg();
}
};
package org.opendaylight.controller.cluster.datastore;
+import akka.actor.ActorRef;
import akka.actor.ActorSystem;
-import akka.event.Logging;
-import akka.testkit.JavaTestKit;
-
+import akka.actor.PoisonPill;
import com.google.common.base.Optional;
-import com.google.common.util.concurrent.ListenableFuture;
-
-import junit.framework.Assert;
-
-import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.Before;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-
-import java.io.File;
-import java.io.IOException;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
-public class DistributedDataStoreIntegrationTest {
+public class DistributedDataStoreIntegrationTest extends AbstractActorTest {
- private static ActorSystem system;
+ private final DatastoreContext.Builder datastoreContextBuilder =
+ DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100);
- @Before
- public void setUp() throws IOException {
- File journal = new File("journal");
+ @Test
+ public void testWriteTransactionWithSingleShard() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("transactionIntegrationTest", "test-1");
- if(journal.exists()) {
- FileUtils.deleteDirectory(journal);
- }
+ testWriteTransaction(dataStore, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
- System.setProperty("shard.persistent", "false");
- system = ActorSystem.create("test");
+ cleanup(dataStore);
+ }};
}
- @After
- public void tearDown() {
- JavaTestKit.shutdownActorSystem(system);
- system = null;
+ @Test
+ public void testWriteTransactionWithMultipleShards() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("testWriteTransactionWithMultipleShards", "cars-1", "people-1");
+
+ DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newWriteOnlyTransaction returned null", writeTx);
+
+ YangInstanceIdentifier nodePath1 = CarsModel.BASE_PATH;
+ NormalizedNode<?, ?> nodeToWrite1 = CarsModel.emptyContainer();
+ writeTx.write(nodePath1, nodeToWrite1);
+
+ YangInstanceIdentifier nodePath2 = PeopleModel.BASE_PATH;
+ NormalizedNode<?, ?> nodeToWrite2 = PeopleModel.emptyContainer();
+ writeTx.write(nodePath2, nodeToWrite2);
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+
+ Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit);
+ cohort.preCommit().get(5, TimeUnit.SECONDS);
+ cohort.commit().get(5, TimeUnit.SECONDS);
+
+ // Verify the data in the store
+
+ DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
+
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath1).get();
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", nodeToWrite1, optional.get());
+
+ optional = readTx.read(nodePath2).get();
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", nodeToWrite2, optional.get());
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testReadWriteTransaction() throws Exception{
+ System.setProperty("shard.persistent", "true");
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("testReadWriteTransaction", "test-1");
+
+ // 1. Create a read-write Tx
+
+ DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
+ assertNotNull("newReadWriteTransaction returned null", readWriteTx);
+
+ // 2. Write some data
+
+ YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ readWriteTx.write(nodePath, nodeToWrite );
+
+ // 3. Read the data from Tx
+
+ Boolean exists = readWriteTx.exists(nodePath).checkedGet(5, TimeUnit.SECONDS);
+ assertEquals("exists", true, exists);
+
+ Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", nodeToWrite, optional.get());
+
+ // 4. Ready the Tx for commit
+
+ DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
+
+ // 5. Commit the Tx
+
+ Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit);
+ cohort.preCommit().get(5, TimeUnit.SECONDS);
+ cohort.commit().get(5, TimeUnit.SECONDS);
+
+ // 6. Verify the data in the store
+
+ DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
+
+ optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", nodeToWrite, optional.get());
+
+ cleanup(dataStore);
+ }};
}
- protected ActorSystem getSystem() {
- return system;
+ @Test
+ public void testTransactionWritesWithShardNotInitiallyReady() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionWritesWithShardNotInitiallyReady";
+ String shardName = "test-1";
+
+ // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
+ // initialized until we create and submit the write the Tx.
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the write Tx
+
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
+
+ // Do some modification operations and ready the Tx on a separate thread.
+
+ final YangInstanceIdentifier listEntryPath = YangInstanceIdentifier.builder(
+ TestModel.OUTER_LIST_PATH).nodeWithKey(TestModel.OUTER_LIST_QNAME,
+ TestModel.ID_QNAME, 1).build();
+
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ writeTx.merge(TestModel.OUTER_LIST_PATH, ImmutableNodes.mapNodeBuilder(
+ TestModel.OUTER_LIST_QNAME).build());
+
+ writeTx.write(listEntryPath, ImmutableNodes.mapEntry(
+ TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
+
+ writeTx.delete(listEntryPath);
+
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
+ }
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Tx ready", true, done);
+
+ // At this point the Tx operations should be waiting for the shard to initialize so
+ // trigger the latch to let the shard recovery to continue.
+
+ blockRecoveryLatch.countDown();
+
+ // Wait for the Tx commit to complete.
+
+ assertEquals("canCommit", true, txCohort.get().canCommit().get(5, TimeUnit.SECONDS));
+ txCohort.get().preCommit().get(5, TimeUnit.SECONDS);
+ txCohort.get().commit().get(5, TimeUnit.SECONDS);
+
+ // Verify the data in the store
+
+ DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
+
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).
+ get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+
+ optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+
+ optional = readTx.read(listEntryPath).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", false, optional.isPresent());
+
+ cleanup(dataStore);
+ }};
}
@Test
- public void integrationTest() throws Exception {
- final Configuration configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
- ShardStrategyFactory.setConfiguration(configuration);
+ public void testTransactionReadsWithShardNotInitiallyReady() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionReadsWithShardNotInitiallyReady";
+ String shardName = "test-1";
+
+ // Setup the InMemoryJournal to block shard recovery to ensure the shard isn't
+ // initialized until we create the Tx.
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
+
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
+
+ // Create the read-write Tx
+
+ final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
+ assertNotNull("newReadWriteTransaction returned null", readWriteTx);
+
+ // Do some reads on the Tx on a separate thread.
+
+ final AtomicReference<CheckedFuture<Boolean, ReadFailedException>> txExistsFuture =
+ new AtomicReference<>();
+ final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
+ txReadFuture = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReadsDone = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ readWriteTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txExistsFuture.set(readWriteTx.exists(TestModel.TEST_PATH));
+
+ txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReadsDone.countDown();
+ }
+ }
+ };
+ txThread.start();
+ // Wait for the Tx operations to complete.
- new JavaTestKit(getSystem()) {
- {
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReadsDone, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
- try {
- final DistributedDataStore distributedDataStore =
- new DistributedDataStore(getSystem(), "config",
- new MockClusterWrapper(), configuration,
- new DatastoreContext());
+ assertEquals("Tx reads done", true, done);
- distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
+ // At this point the Tx operations should be waiting for the shard to initialize so
+ // trigger the latch to let the shard recovery to continue.
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from("akka://test/user/shardmanager-config/member-1-shard-test-1-config")
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
+ blockRecoveryLatch.countDown();
- assertEquals(true, result);
+ // Wait for the reads to complete and verify.
- DOMStoreReadWriteTransaction transaction =
- distributedDataStore.newReadWriteTransaction();
+ assertEquals("exists", true, txExistsFuture.get().checkedGet(5, TimeUnit.SECONDS));
+ assertEquals("read", true, txReadFuture.get().checkedGet(5, TimeUnit.SECONDS).isPresent());
- transaction
- .write(TestModel.TEST_PATH, ImmutableNodes
- .containerNode(TestModel.TEST_QNAME));
+ readWriteTx.close();
- ListenableFuture<Optional<NormalizedNode<?, ?>>>
- future =
- transaction.read(TestModel.TEST_PATH);
+ cleanup(dataStore);
+ }};
+ }
- Optional<NormalizedNode<?, ?>> optional =
- future.get();
+ @Test(expected=NotInitializedException.class)
+ public void testTransactionCommitFailureWithShardNotInitialized() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionCommitFailureWithShardNotInitialized";
+ String shardName = "test-1";
- Assert.assertTrue("Node not found", optional.isPresent());
+ // Set the shard initialization timeout low for the test.
- NormalizedNode<?, ?> normalizedNode =
- optional.get();
+ datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
- assertEquals(TestModel.TEST_QNAME,
- normalizedNode.getNodeType());
+ // Setup the InMemoryJournal to block shard recovery indefinitely.
- DOMStoreThreePhaseCommitCohort ready =
- transaction.ready();
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
- ListenableFuture<Boolean> canCommit =
- ready.canCommit();
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
- assertTrue(canCommit.get(5, TimeUnit.SECONDS));
+ // Create the write Tx
- ListenableFuture<Void> preCommit =
- ready.preCommit();
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
- preCommit.get(5, TimeUnit.SECONDS);
+ // Do some modifications and ready the Tx on a separate thread.
- ListenableFuture<Void> commit = ready.commit();
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- commit.get(5, TimeUnit.SECONDS);
- } catch (ExecutionException | TimeoutException | InterruptedException e){
- fail(e.getMessage());
- }
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
}
- };
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
}
- };
+ assertEquals("Tx ready", true, done);
+
+ // Wait for the commit to complete. Since the shard never initialized, the Tx should
+ // have timed out and throw an appropriate exception cause.
+
+ try {
+ txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
+ } catch(ExecutionException e) {
+ throw e.getCause();
+ } finally {
+ blockRecoveryLatch.countDown();
+ cleanup(dataStore);
+ }
+ }};
}
- @Test
- public void transactionChainIntegrationTest() throws Exception {
- final Configuration configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
- ShardStrategyFactory.setConfiguration(configuration);
+ @Test(expected=NotInitializedException.class)
+ public void testTransactionReadFailureWithShardNotInitialized() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionReadFailureWithShardNotInitialized";
+ String shardName = "test-1";
+
+ // Set the shard initialization timeout low for the test.
+ datastoreContextBuilder.shardInitializationTimeout(300, TimeUnit.MILLISECONDS);
+ // Setup the InMemoryJournal to block shard recovery indefinitely.
- new JavaTestKit(getSystem()) {
- {
+ String persistentID = String.format("member-1-shard-%s-%s", shardName, testName);
+ CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
+ InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
- try {
- final DistributedDataStore distributedDataStore =
- new DistributedDataStore(getSystem(), "config",
- new MockClusterWrapper(), configuration,
- new DatastoreContext());
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
- distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
+ // Create the read-write Tx
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from("akka://test/user/shardmanager-config/member-1-shard-test-1-config")
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
+ final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
+ assertNotNull("newReadWriteTransaction returned null", readWriteTx);
- assertEquals(true, result);
+ // Do a read on the Tx on a separate thread.
- DOMStoreTransactionChain transactionChain =
- distributedDataStore.createTransactionChain();
+ final AtomicReference<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>>
+ txReadFuture = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReadDone = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ readWriteTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
- DOMStoreReadWriteTransaction transaction =
- transactionChain.newReadWriteTransaction();
+ txReadFuture.set(readWriteTx.read(TestModel.TEST_PATH));
+
+ readWriteTx.close();
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReadDone.countDown();
+ }
+ }
+ };
- transaction
- .write(TestModel.TEST_PATH, ImmutableNodes
- .containerNode(TestModel.TEST_QNAME));
+ txThread.start();
- ListenableFuture<Optional<NormalizedNode<?, ?>>>
- future =
- transaction.read(TestModel.TEST_PATH);
+ // Wait for the Tx operations to complete.
- Optional<NormalizedNode<?, ?>> optional =
- future.get();
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReadDone, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
- Assert.assertTrue("Node not found", optional.isPresent());
+ assertEquals("Tx read done", true, done);
- NormalizedNode<?, ?> normalizedNode =
- optional.get();
+ // Wait for the read to complete. Since the shard never initialized, the Tx should
+ // have timed out and throw an appropriate exception cause.
- assertEquals(TestModel.TEST_QNAME,
- normalizedNode.getNodeType());
+ try {
+ txReadFuture.get().checkedGet(5, TimeUnit.SECONDS);
+ } catch(ReadFailedException e) {
+ throw e.getCause();
+ } finally {
+ blockRecoveryLatch.countDown();
+ cleanup(dataStore);
+ }
+ }};
+ }
- DOMStoreThreePhaseCommitCohort ready =
- transaction.ready();
+ @Test(expected=NoShardLeaderException.class)
+ public void testTransactionCommitFailureWithNoShardLeader() throws Throwable{
+ new IntegrationTestKit(getSystem()) {{
+ String testName = "testTransactionCommitFailureWithNoShardLeader";
+ String shardName = "test-1";
- ListenableFuture<Boolean> canCommit =
- ready.canCommit();
+ // We don't want the shard to become the leader so prevent shard election from completing
+ // by setting the election timeout, which is based on the heartbeat interval, really high.
- assertTrue(canCommit.get(5, TimeUnit.SECONDS));
+ datastoreContextBuilder.shardHeartbeatIntervalInMillis(30000);
- ListenableFuture<Void> preCommit =
- ready.preCommit();
+ // Set the leader election timeout low for the test.
- preCommit.get(5, TimeUnit.SECONDS);
+ datastoreContextBuilder.shardLeaderElectionTimeout(1, TimeUnit.MILLISECONDS);
- ListenableFuture<Void> commit = ready.commit();
+ DistributedDataStore dataStore = setupDistributedDataStore(testName, false, shardName);
- commit.get(5, TimeUnit.SECONDS);
+ // Create the write Tx.
- transactionChain.close();
- } catch (ExecutionException | TimeoutException | InterruptedException e){
- fail(e.getMessage());
- }
+ final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newReadWriteTransaction returned null", writeTx);
+
+ // Do some modifications and ready the Tx on a separate thread.
+
+ final AtomicReference<DOMStoreThreePhaseCommitCohort> txCohort = new AtomicReference<>();
+ final AtomicReference<Exception> caughtEx = new AtomicReference<>();
+ final CountDownLatch txReady = new CountDownLatch(1);
+ Thread txThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ writeTx.write(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ txCohort.set(writeTx.ready());
+ } catch(Exception e) {
+ caughtEx.set(e);
+ return;
+ } finally {
+ txReady.countDown();
}
- };
+ }
+ };
+
+ txThread.start();
+
+ // Wait for the Tx operations to complete.
+
+ boolean done = Uninterruptibles.awaitUninterruptibly(txReady, 5, TimeUnit.SECONDS);
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
}
- };
+ assertEquals("Tx ready", true, done);
+
+ // Wait for the commit to complete. Since no shard leader was elected in time, the Tx
+ // should have timed out and throw an appropriate exception cause.
+
+ try {
+ txCohort.get().canCommit().get(5, TimeUnit.SECONDS);
+ } catch(ExecutionException e) {
+ throw e.getCause();
+ } finally {
+ cleanup(dataStore);
+ }
+ }};
}
+ @Test
+ public void testTransactionAbort() throws Exception{
+ System.setProperty("shard.persistent", "true");
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("transactionAbortIntegrationTest", "test-1");
+
+ DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newWriteOnlyTransaction returned null", writeTx);
+
+ writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+
+ cohort.canCommit().get(5, TimeUnit.SECONDS);
+
+ cohort.abort().get(5, TimeUnit.SECONDS);
+
+ testWriteTransaction(dataStore, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testTransactionChain() throws Exception{
+ System.setProperty("shard.persistent", "true");
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("transactionChainIntegrationTest", "test-1");
+
+ // 1. Create a Tx chain and write-only Tx
+
+ DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+
+ DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+ assertNotNull("newWriteOnlyTransaction returned null", writeTx);
- //FIXME : Disabling test because it's flaky
- //@Test
- public void integrationTestWithMultiShardConfiguration()
- throws ExecutionException, InterruptedException, TimeoutException {
- final Configuration configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
+ // 2. Write some data
- ShardStrategyFactory.setConfiguration(configuration);
+ NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ writeTx.write(TestModel.TEST_PATH, containerNode);
- new JavaTestKit(getSystem()) {
- {
+ // 3. Ready the Tx for commit
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
- try {
- final DistributedDataStore distributedDataStore =
- new DistributedDataStore(getSystem(), "config",
- new MockClusterWrapper(), configuration, null);
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
- distributedDataStore.onGlobalContextUpdated(
- SchemaContextHelper.full());
+ // 4. Commit the Tx
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(
- Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(
- "akka://test/user/shardmanager-config/member-1-shard-cars-1-config")
- .message(
- "Switching from state Candidate to Leader")
- .occurrences(1)
- .exec();
+ Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit);
+ cohort.preCommit().get(5, TimeUnit.SECONDS);
+ cohort.commit().get(5, TimeUnit.SECONDS);
- Thread.sleep(1000);
+ // 5. Verify the data in the store
+ DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
- DOMStoreReadWriteTransaction transaction =
- distributedDataStore.newReadWriteTransaction();
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", containerNode, optional.get());
- transaction.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
- transaction.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
+ txChain.close();
- DOMStoreThreePhaseCommitCohort ready = transaction.ready();
+ cleanup(dataStore);
+ }};
+ }
+
+ @Test
+ public void testChangeListenerRegistration() throws Exception{
+ new IntegrationTestKit(getSystem()) {{
+ DistributedDataStore dataStore =
+ setupDistributedDataStore("testChangeListenerRegistration", "test-1");
+
+ MockDataChangeListener listener = new MockDataChangeListener(3);
+
+ ListenerRegistration<MockDataChangeListener>
+ listenerReg = dataStore.registerChangeListener(TestModel.TEST_PATH, listener,
+ DataChangeScope.SUBTREE);
+
+ assertNotNull("registerChangeListener returned null", listenerReg);
+
+ testWriteTransaction(dataStore, TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
- ListenableFuture<Boolean> canCommit = ready.canCommit();
+ YangInstanceIdentifier listPath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
+ nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build();
+ testWriteTransaction(dataStore, listPath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
- assertTrue(canCommit.get(5, TimeUnit.SECONDS));
+ listener.waitForChangeEvents(TestModel.TEST_PATH, TestModel.OUTER_LIST_PATH, listPath );
- ListenableFuture<Void> preCommit = ready.preCommit();
+ listenerReg.close();
- preCommit.get(5, TimeUnit.SECONDS);
+ testWriteTransaction(dataStore, YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH).
+ nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2));
- ListenableFuture<Void> commit = ready.commit();
+ listener.expectNoMoreChanges("Received unexpected change after close");
- commit.get(5, TimeUnit.SECONDS);
+ cleanup(dataStore);
+ }};
+ }
+
+ class IntegrationTestKit extends ShardTestKit {
+
+ IntegrationTestKit(ActorSystem actorSystem) {
+ super(actorSystem);
+ }
- assertEquals(true, result);
- } catch(ExecutionException | TimeoutException | InterruptedException e){
- fail(e.getMessage());
+ DistributedDataStore setupDistributedDataStore(String typeName, String... shardNames) {
+ return setupDistributedDataStore(typeName, true, shardNames);
+ }
+
+ DistributedDataStore setupDistributedDataStore(String typeName, boolean waitUntilLeader,
+ String... shardNames) {
+ MockClusterWrapper cluster = new MockClusterWrapper();
+ Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
+ ShardStrategyFactory.setConfiguration(config);
+
+ DatastoreContext datastoreContext = datastoreContextBuilder.build();
+ DistributedDataStore dataStore = new DistributedDataStore(getSystem(), typeName, cluster,
+ config, datastoreContext);
+
+ SchemaContext schemaContext = SchemaContextHelper.full();
+ dataStore.onGlobalContextUpdated(schemaContext);
+
+ if(waitUntilLeader) {
+ for(String shardName: shardNames) {
+ ActorRef shard = null;
+ for(int i = 0; i < 20 * 5 && shard == null; i++) {
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ Optional<ActorRef> shardReply = dataStore.getActorContext().findLocalShard(shardName);
+ if(shardReply.isPresent()) {
+ shard = shardReply.get();
}
}
- };
+
+ assertNotNull("Shard was not created", shard);
+
+ waitUntilLeader(shard);
+ }
}
- };
+ return dataStore;
+ }
+
+ void testWriteTransaction(DistributedDataStore dataStore, YangInstanceIdentifier nodePath,
+ NormalizedNode<?, ?> nodeToWrite) throws Exception {
+
+ // 1. Create a write-only Tx
+
+ DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+ assertNotNull("newWriteOnlyTransaction returned null", writeTx);
+
+ // 2. Write some data
+
+ writeTx.write(nodePath, nodeToWrite);
+
+ // 3. Ready the Tx for commit
+
+ DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+ // 4. Commit the Tx
+
+ Boolean canCommit = cohort.canCommit().get(5, TimeUnit.SECONDS);
+ assertEquals("canCommit", true, canCommit);
+ cohort.preCommit().get(5, TimeUnit.SECONDS);
+ cohort.commit().get(5, TimeUnit.SECONDS);
+
+ // 5. Verify the data in the store
+
+ DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
+
+ Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
+ assertEquals("isPresent", true, optional.isPresent());
+ assertEquals("Data node", nodeToWrite, optional.get());
+ }
+
+ void cleanup(DistributedDataStore dataStore) {
+ dataStore.getActorContext().getShardManager().tell(PoisonPill.getInstance(), null);
+ }
}
}
+++ /dev/null
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.Futures;
-import akka.util.Timeout;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
-import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.ExecutionContextExecutor;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.concurrent.TimeUnit;
-
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class DistributedDataStoreTest extends AbstractActorTest{
-
- private DistributedDataStore distributedDataStore;
- private MockActorContext mockActorContext;
- private ActorRef doNothingActorRef;
-
- @Before
- public void setUp() throws Exception {
- ShardStrategyFactory.setConfiguration(new MockConfiguration());
- final Props props = Props.create(DoNothingActor.class);
-
- doNothingActorRef = getSystem().actorOf(props);
-
- mockActorContext = new MockActorContext(getSystem(), doNothingActorRef);
- distributedDataStore = new DistributedDataStore(mockActorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- // Make CreateTransactionReply as the default response. Will need to be
- // tuned if a specific test requires some other response
- mockActorContext.setExecuteShardOperationResponse(
- CreateTransactionReply.newBuilder()
- .setTransactionActorPath(doNothingActorRef.path().toString())
- .setTransactionId("txn-1 ")
- .build());
- }
-
- @After
- public void tearDown() throws Exception {
-
- }
-
- @SuppressWarnings("resource")
- @Test
- public void testConstructor(){
- ActorSystem actorSystem = mock(ActorSystem.class);
-
- new DistributedDataStore(actorSystem, "config",
- mock(ClusterWrapper.class), mock(Configuration.class),
- new DatastoreContext());
-
- verify(actorSystem).actorOf(any(Props.class), eq("shardmanager-config"));
- }
-
- @Test
- public void testRegisterChangeListenerWhenShardIsNotLocal() throws Exception {
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH, new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
- @Override
- public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
- throw new UnsupportedOperationException("onDataChanged");
- }
- }, AsyncDataBroker.DataChangeScope.BASE);
-
- // Since we do not expect the shard to be local registration will return a NoOpRegistration
- assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
-
- assertNotNull(registration);
- }
-
- @Test
- public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
-
- Future future = mock(Future.class);
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorContext.getActorSystem()).thenReturn(getSystem());
- when(actorContext
- .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(future);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
- }
-
- @Test
- public void testRegisterChangeListenerWhenSuccessfulReplyReceived() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
-
- // Make Future successful
- Future f = Futures.successful(new RegisterChangeListenerReply(doNothingActorRef.path()));
-
- // Setup the mocks
- ActorSystem actorSystem = mock(ActorSystem.class);
- ActorSelection actorSelection = mock(ActorSelection.class);
-
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorSystem.dispatcher()).thenReturn(executor);
- when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
- when(actorContext.getActorSystem()).thenReturn(actorSystem);
- when(actorContext
- .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
- when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
-
- ActorSelection listenerRegistrationActor =
- ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
-
- assertNotNull(listenerRegistrationActor);
-
- assertEquals(actorSelection, listenerRegistrationActor);
- }
-
- @Test
- public void testRegisterChangeListenerWhenSuccessfulReplyFailed() throws Exception {
- ActorContext actorContext = mock(ActorContext.class);
-
- distributedDataStore = new DistributedDataStore(actorContext);
- distributedDataStore.onGlobalContextUpdated(
- TestModel.createTestContext());
-
- ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
-
- // Make Future fail
- Future f = Futures.failed(new IllegalArgumentException());
-
- // Setup the mocks
- ActorSystem actorSystem = mock(ActorSystem.class);
- ActorSelection actorSelection = mock(ActorSelection.class);
-
- when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
- when(actorSystem.dispatcher()).thenReturn(executor);
- when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
- when(actorContext.getActorSystem()).thenReturn(actorSystem);
- when(actorContext
- .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
- when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
-
- ListenerRegistration registration =
- distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
- mock(AsyncDataChangeListener.class),
- AsyncDataBroker.DataChangeScope.BASE);
-
- assertNotNull(registration);
-
- assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
-
- ActorSelection listenerRegistrationActor =
- ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
-
- assertNull(listenerRegistrationActor);
-
- }
-
-
- @Test
- public void testCreateTransactionChain() throws Exception {
- final DOMStoreTransactionChain transactionChain = distributedDataStore.createTransactionChain();
- assertNotNull(transactionChain);
- }
-
- @Test
- public void testNewReadOnlyTransaction() throws Exception {
- final DOMStoreReadTransaction transaction = distributedDataStore.newReadOnlyTransaction();
- assertNotNull(transaction);
- }
-
- @Test
- public void testNewWriteOnlyTransaction() throws Exception {
- final DOMStoreWriteTransaction transaction = distributedDataStore.newWriteOnlyTransaction();
- assertNotNull(transaction);
- }
-
- @Test
- public void testNewReadWriteTransaction() throws Exception {
- final DOMStoreReadWriteTransaction transaction = distributedDataStore.newReadWriteTransaction();
- assertNotNull(transaction);
- }
-}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
import akka.actor.Props;
+import akka.japi.Creator;
+import akka.pattern.Patterns;
+import akka.persistence.RecoveryCompleted;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-import junit.framework.Assert;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import akka.util.Timeout;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
+import org.opendaylight.controller.cluster.datastore.messages.ActorNotInitialized;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import scala.concurrent.duration.Duration;
-
-import static junit.framework.Assert.assertEquals;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ShardManagerTest extends AbstractActorTest {
+ private static int ID_COUNTER = 1;
+
+ private final String shardMrgIDSuffix = "config" + ID_COUNTER++;
+ private final String shardMgrID = "shard-manager-" + shardMrgIDSuffix;
+
+ private static ActorRef mockShardActor;
-public class ShardManagerTest {
- private static ActorSystem system;
+ @Before
+ public void setUp() {
+ InMemoryJournal.clear();
- @BeforeClass
- public static void setUp() {
- system = ActorSystem.create("test");
+ if(mockShardActor == null) {
+ String name = new ShardIdentifier(Shard.DEFAULT_NAME, "member-1","config").toString();
+ mockShardActor = getSystem().actorOf(Props.create(DoNothingActor.class), name);
+ }
}
- @AfterClass
- public static void tearDown() {
- JavaTestKit.shutdownActorSystem(system);
- system = null;
+ @After
+ public void tearDown() {
+ InMemoryJournal.clear();
+ }
+
+ private Props newShardMgrProps() {
+ return ShardManager.props(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().build());
}
@Test
public void testOnReceiveFindPrimaryForNonExistentShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+ shardManager.tell(new FindPrimary("non-existent", false).toSerializable(), getRef());
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ expectMsgEquals(duration("5 seconds"),
+ new PrimaryNotFound("non-existent").toSerializable());
+ }};
+ }
- subject.tell(new FindPrimary("inventory").toSerializable(), getRef());
+ @Test
+ public void testOnReceiveFindPrimaryForExistentShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- expectMsgEquals(Duration.Zero(),
- new PrimaryNotFound("inventory").toSerializable());
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
- expectNoMsg();
- }
- };
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
+
+ expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
}};
}
@Test
- public void testOnReceiveFindPrimaryForExistentShard() throws Exception {
+ public void testOnReceiveFindPrimaryForNotInitializedShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false).toSerializable(), getRef());
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
+ }};
+ }
- subject.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
+ @Test
+ public void testOnReceiveFindPrimaryWaitForShardInitialized() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- expectNoMsg();
- }
- };
+ // We're passing waitUntilInitialized = true to FindPrimary so the response should be
+ // delayed until we send ActorInitialized.
+ Future<Object> future = Patterns.ask(shardManager, new FindPrimary(Shard.DEFAULT_NAME, true),
+ new Timeout(5, TimeUnit.SECONDS));
+
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ Object resp = Await.result(future, duration("5 seconds"));
+ assertTrue("Expected: PrimaryFound, Actual: " + resp, resp instanceof PrimaryFound);
}};
}
@Test
public void testOnReceiveFindLocalShardForNonExistentShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
-
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new FindLocalShard("inventory"), getRef());
-
- final String out = new ExpectMsg<String>(duration("10 seconds"), "find local") {
- @Override
- protected String match(Object in) {
- if (in instanceof LocalShardNotFound) {
- return ((LocalShardNotFound) in).getShardName();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("inventory", out);
-
- expectNoMsg();
- }
- };
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ shardManager.tell(new FindLocalShard("non-existent", false), getRef());
+
+ LocalShardNotFound notFound = expectMsgClass(duration("5 seconds"), LocalShardNotFound.class);
+
+ assertEquals("getShardName", "non-existent", notFound.getShardName());
}};
}
@Test
public void testOnReceiveFindLocalShardForExistentShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
+
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
+
+ LocalShardFound found = expectMsgClass(duration("5 seconds"), LocalShardFound.class);
- final MockClusterWrapper mockClusterWrapper = new MockClusterWrapper();
+ assertTrue("Found path contains " + found.getPath().path().toString(),
+ found.getPath().path().toString().contains("member-1-shard-default-config"));
+ }};
+ }
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", mockClusterWrapper,
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+ @Test
+ public void testOnReceiveFindLocalShardForNotInitializedShard() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), getRef());
- subject.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
+ expectMsgClass(duration("5 seconds"), ActorNotInitialized.class);
+ }};
+ }
- final ActorRef out = new ExpectMsg<ActorRef>(duration("10 seconds"), "find local") {
- @Override
- protected ActorRef match(Object in) {
- if (in instanceof LocalShardFound) {
- return ((LocalShardFound) in).getPath();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ @Test
+ public void testOnReceiveFindLocalShardWaitForShardInitialized() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- assertTrue(out.path().toString(), out.path().toString().contains("member-1-shard-default-config"));
+ shardManager.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+ // We're passing waitUntilInitialized = true to FindLocalShard so the response should be
+ // delayed until we send ActorInitialized.
+ Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
+ new Timeout(5, TimeUnit.SECONDS));
- expectNoMsg();
- }
- };
+ shardManager.tell(new ActorInitialized(), mockShardActor);
+
+ Object resp = Await.result(future, duration("5 seconds"));
+ assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
}};
}
@Test
public void testOnReceiveMemberUp() throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
-
- // the run() method needs to finish within 3 seconds
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
-
- MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
-
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "primary found") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
- PrimaryFound f = PrimaryFound.fromSerializable(in);
- return f.getPrimaryPath();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- Assert.assertTrue(out, out.contains("member-2-shard-astronauts-config"));
-
- expectNoMsg();
- }
- };
+ MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
+
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
+
+ PrimaryFound found = PrimaryFound.fromSerializable(expectMsgClass(duration("5 seconds"),
+ PrimaryFound.SERIALIZABLE_CLASS));
+ String path = found.getPrimaryPath();
+ assertTrue("Found path contains " + path, path.contains("member-2-shard-astronauts-config"));
}};
}
@Test
public void testOnReceiveMemberDown() throws Exception {
- new JavaTestKit(system) {{
- final Props props = ShardManager
- .props("config", new MockClusterWrapper(),
- new MockConfiguration(), new DatastoreContext());
- final TestActorRef<ShardManager> subject =
- TestActorRef.create(system, props);
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shardManager = getSystem().actorOf(newShardMgrProps());
- // the run() method needs to finish within 3 seconds
- new Within(duration("10 seconds")) {
- @Override
- protected void run() {
+ MockClusterWrapper.sendMemberUp(shardManager, "member-2", getRef().path().toString());
- MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ expectMsgClass(duration("5 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
- expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+ MockClusterWrapper.sendMemberRemoved(shardManager, "member-2", getRef().path().toString());
- MockClusterWrapper.sendMemberRemoved(subject, "member-2", getRef().path().toString());
+ shardManager.tell(new FindPrimary("astronauts", false).toSerializable(), getRef());
- subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+ expectMsgClass(duration("5 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+ }};
+ }
- expectMsgClass(duration("1 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+ @Test
+ public void testOnRecoveryJournalIsCleaned() {
+ InMemoryJournal.addEntry(shardMgrID, 1L, new ShardManager.SchemaContextModules(
+ ImmutableSet.of("foo")));
+ InMemoryJournal.addEntry(shardMgrID, 2L, new ShardManager.SchemaContextModules(
+ ImmutableSet.of("bar")));
+ InMemoryJournal.addDeleteMessagesCompleteLatch(shardMgrID);
+
+ new JavaTestKit(getSystem()) {{
+ TestActorRef<TestShardManager> shardManager = TestActorRef.create(getSystem(),
+ Props.create(new TestShardManagerCreator(shardMrgIDSuffix)));
+
+ shardManager.underlyingActor().waitForRecoveryComplete();
+ InMemoryJournal.waitForDeleteMessagesComplete(shardMgrID);
+
+ // Journal entries up to the last one should've been deleted
+ Map<Long, Object> journal = InMemoryJournal.get(shardMgrID);
+ synchronized (journal) {
+ assertEquals("Journal size", 1, journal.size());
+ assertEquals("Journal entry seq #", Long.valueOf(2), journal.keySet().iterator().next());
+ }
+ }};
+ }
+
+ @Test
+ public void testOnRecoveryPreviouslyKnownModulesAreDiscovered() throws Exception {
+ final ImmutableSet<String> persistedModules = ImmutableSet.of("foo", "bar");
+ InMemoryJournal.addEntry(shardMgrID, 1L, new ShardManager.SchemaContextModules(
+ persistedModules));
+ new JavaTestKit(getSystem()) {{
+ TestActorRef<TestShardManager> shardManager = TestActorRef.create(getSystem(),
+ Props.create(new TestShardManagerCreator(shardMrgIDSuffix)));
+
+ shardManager.underlyingActor().waitForRecoveryComplete();
+
+ Collection<String> knownModules = shardManager.underlyingActor().getKnownModules();
+
+ assertEquals("getKnownModules", persistedModules, Sets.newHashSet(knownModules));
+ }};
+ }
+
+ @Test
+ public void testOnUpdateSchemaContextUpdateKnownModulesIfTheyContainASuperSetOfTheKnownModules()
+ throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final TestActorRef<ShardManager> shardManager =
+ TestActorRef.create(getSystem(), newShardMgrProps());
+
+ assertEquals("getKnownModules size", 0, shardManager.underlyingActor().getKnownModules().size());
+
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+ moduleIdentifierSet.add(foo);
+
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("getKnownModules", Sets.newHashSet("foo"),
+ Sets.newHashSet(shardManager.underlyingActor().getKnownModules()));
+
+ ModuleIdentifier bar = mock(ModuleIdentifier.class);
+ when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+ moduleIdentifierSet.add(bar);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("getKnownModules", Sets.newHashSet("foo", "bar"),
+ Sets.newHashSet(shardManager.underlyingActor().getKnownModules()));
+ }};
+ }
+
+ @Test
+ public void testOnUpdateSchemaContextDoNotUpdateKnownModulesIfTheyDoNotContainASuperSetOfKnownModules()
+ throws Exception {
+ new JavaTestKit(getSystem()) {{
+ final TestActorRef<ShardManager> shardManager =
+ TestActorRef.create(getSystem(), newShardMgrProps());
+
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+ moduleIdentifierSet.add(foo);
+
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("getKnownModules", Sets.newHashSet("foo"),
+ Sets.newHashSet(shardManager.underlyingActor().getKnownModules()));
+
+ //Create a completely different SchemaContext with only the bar module in it
+ //schemaContext = mock(SchemaContext.class);
+ moduleIdentifierSet.clear();
+ ModuleIdentifier bar = mock(ModuleIdentifier.class);
+ when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+ moduleIdentifierSet.add(bar);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("getKnownModules", Sets.newHashSet("foo"),
+ Sets.newHashSet(shardManager.underlyingActor().getKnownModules()));
+
+ }};
+ }
+
+ @Test
+ public void testRecoveryApplicable(){
+ new JavaTestKit(getSystem()) {
+ {
+ final Props persistentProps = ShardManager.props(shardMrgIDSuffix,
+ new MockClusterWrapper(),
+ new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(true).build());
+ final TestActorRef<ShardManager> persistentShardManager =
+ TestActorRef.create(getSystem(), persistentProps);
+
+ DataPersistenceProvider dataPersistenceProvider1 = persistentShardManager.underlyingActor().getDataPersistenceProvider();
+
+ assertTrue("Recovery Applicable", dataPersistenceProvider1.isRecoveryApplicable());
+
+ final Props nonPersistentProps = ShardManager.props(shardMrgIDSuffix,
+ new MockClusterWrapper(),
+ new MockConfiguration(),
+ DatastoreContext.newBuilder().persistent(false).build());
+ final TestActorRef<ShardManager> nonPersistentShardManager =
+ TestActorRef.create(getSystem(), nonPersistentProps);
+
+ DataPersistenceProvider dataPersistenceProvider2 = nonPersistentShardManager.underlyingActor().getDataPersistenceProvider();
+
+ assertFalse("Recovery Not Applicable", dataPersistenceProvider2.isRecoveryApplicable());
+
+
+ }};
+
+ }
+
+ @Test
+ public void testOnUpdateSchemaContextUpdateKnownModulesCallsDataPersistenceProvider()
+ throws Exception {
+ final CountDownLatch persistLatch = new CountDownLatch(1);
+ final Creator<ShardManager> creator = new Creator<ShardManager>() {
+ @Override
+ public ShardManager create() throws Exception {
+ return new ShardManager(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build()) {
+ @Override
+ protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
+ DataPersistenceProviderMonitor dataPersistenceProviderMonitor
+ = new DataPersistenceProviderMonitor();
+ dataPersistenceProviderMonitor.setPersistLatch(persistLatch);
+ return dataPersistenceProviderMonitor;
+ }
+ };
+ }
+ };
+
+ new JavaTestKit(getSystem()) {{
+
+ final TestActorRef<ShardManager> shardManager =
+ TestActorRef.create(getSystem(), Props.create(new DelegatingShardManagerCreator(creator)));
+
+ ModuleIdentifier foo = mock(ModuleIdentifier.class);
+ when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+ Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+ moduleIdentifierSet.add(foo);
+
+ SchemaContext schemaContext = mock(SchemaContext.class);
+ when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+ shardManager.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+ assertEquals("Persisted", true,
+ Uninterruptibles.awaitUninterruptibly(persistLatch, 5, TimeUnit.SECONDS));
- expectNoMsg();
- }
- };
}};
}
+
+ private static class TestShardManager extends ShardManager {
+ private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+
+ TestShardManager(String shardMrgIDSuffix) {
+ super(shardMrgIDSuffix, new MockClusterWrapper(), new MockConfiguration(),
+ DatastoreContext.newBuilder().build());
+ }
+
+ @Override
+ public void handleRecover(Object message) throws Exception {
+ try {
+ super.handleRecover(message);
+ } finally {
+ if(message instanceof RecoveryCompleted) {
+ recoveryComplete.countDown();
+ }
+ }
+ }
+
+ void waitForRecoveryComplete() {
+ assertEquals("Recovery complete", true,
+ Uninterruptibles.awaitUninterruptibly(recoveryComplete, 5, TimeUnit.SECONDS));
+ }
+ }
+
+ @SuppressWarnings("serial")
+ static class TestShardManagerCreator implements Creator<TestShardManager> {
+ String shardMrgIDSuffix;
+
+ TestShardManagerCreator(String shardMrgIDSuffix) {
+ this.shardMrgIDSuffix = shardMrgIDSuffix;
+ }
+
+ @Override
+ public TestShardManager create() throws Exception {
+ return new TestShardManager(shardMrgIDSuffix);
+ }
+
+ }
+
+ private static class DelegatingShardManagerCreator implements Creator<ShardManager> {
+ private Creator<ShardManager> delegate;
+
+ public DelegatingShardManagerCreator(Creator<ShardManager> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ShardManager create() throws Exception {
+ return delegate.create();
+ }
+ }
}
package org.opendaylight.controller.cluster.datastore;
import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
import akka.actor.Props;
-import akka.event.Logging;
-import akka.testkit.JavaTestKit;
+import akka.dispatch.Dispatchers;
+import akka.dispatch.OnComplete;
+import akka.japi.Creator;
+import akka.pattern.Patterns;
import akka.testkit.TestActorRef;
+import akka.util.Timeout;
+import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
-import org.junit.Assert;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
+import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.datastore.utils.MockDataChangeListener;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
import java.io.IOException;
import java.util.Collections;
-import java.util.HashMap;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+
public class ShardTest extends AbstractActorTest {
- private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
+ private static final SchemaContext SCHEMA_CONTEXT = TestModel.createTestContext();
+
+ private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
+
+ private final ShardIdentifier shardID = ShardIdentifier.builder().memberName("member-1")
+ .shardName("inventory").type("config" + NEXT_SHARD_NUM.getAndIncrement()).build();
+
+ private DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).
+ shardHeartbeatIntervalInMillis(100).build();
+
+ @Before
+ public void setUp() {
+ InMemorySnapshotStore.clear();
+ InMemoryJournal.clear();
+ }
+
+ @After
+ public void tearDown() {
+ InMemorySnapshotStore.clear();
+ InMemoryJournal.clear();
+ }
+
+ private Props newShardProps() {
+ return Shard.props(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ dataStoreContext, SCHEMA_CONTEXT);
+ }
@Test
- public void testOnReceiveRegisterListener() throws Exception {
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ public void testRegisterChangeListener() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps(), "testRegisterChangeListener");
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testRegisterChangeListener");
+ waitUntilLeader(shard);
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ shard.tell(new UpdateSchemaContext(SchemaContextHelper.full()), ActorRef.noSender());
- subject.tell(
- new UpdateSchemaContext(SchemaContextHelper.full()),
- getRef());
+ MockDataChangeListener listener = new MockDataChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataChangeListener.props(listener),
+ "testRegisterChangeListener-DataChangeListener");
- subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
- getRef().path(), AsyncDataBroker.DataChangeScope.BASE),
- getRef());
+ shard.tell(new RegisterChangeListener(TestModel.TEST_PATH,
+ dclActor.path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
- final Boolean notificationEnabled = new ExpectMsg<Boolean>(
- duration("3 seconds"), "enable notification") {
- // do not put code outside this method, will run afterwards
- @Override
- protected Boolean match(Object in) {
- if(in instanceof EnableNotification){
- return ((EnableNotification) in).isEnabled();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ RegisterChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
+ RegisterChangeListenerReply.class);
+ String replyPath = reply.getListenerRegistrationPath().toString();
+ assertTrue("Incorrect reply path: " + replyPath, replyPath.matches(
+ "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
+
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ listener.waitForChangeEvents(path);
- assertFalse(notificationEnabled);
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testChangeListenerNotifiedWhenNotTheLeaderOnRegistration() throws Exception {
+ // This test tests the timing window in which a change listener is registered before the
+ // shard becomes the leader. We verify that the listener is registered and notified of the
+ // existing data when the shard becomes the leader.
+ new ShardTestKit(getSystem()) {{
+ // For this test, we want to send the RegisterChangeListener message after the shard
+ // has recovered from persistence and before it becomes the leader. So we subclass
+ // Shard to override onReceiveCommand and, when the first ElectionTimeout is received,
+ // we know that the shard has been initialized to a follower and has started the
+ // election process. The following 2 CountDownLatches are used to coordinate the
+ // ElectionTimeout with the sending of the RegisterChangeListener message.
+ final CountDownLatch onFirstElectionTimeout = new CountDownLatch(1);
+ final CountDownLatch onChangeListenerRegistered = new CountDownLatch(1);
+ Creator<Shard> creator = new Creator<Shard>() {
+ boolean firstElectionTimeout = true;
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ dataStoreContext, SCHEMA_CONTEXT) {
@Override
- protected String match(Object in) {
- if (in.getClass().equals(RegisterChangeListenerReply.class)) {
- RegisterChangeListenerReply reply =
- (RegisterChangeListenerReply) in;
- return reply.getListenerRegistrationPath()
- .toString();
+ public void onReceiveCommand(final Object message) throws Exception {
+ if(message instanceof ElectionTimeout && firstElectionTimeout) {
+ // Got the first ElectionTimeout. We don't forward it to the
+ // base Shard yet until we've sent the RegisterChangeListener
+ // message. So we signal the onFirstElectionTimeout latch to tell
+ // the main thread to send the RegisterChangeListener message and
+ // start a thread to wait on the onChangeListenerRegistered latch,
+ // which the main thread signals after it has sent the message.
+ // After the onChangeListenerRegistered is triggered, we send the
+ // original ElectionTimeout message to proceed with the election.
+ firstElectionTimeout = false;
+ final ActorRef self = getSelf();
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(
+ onChangeListenerRegistered, 5, TimeUnit.SECONDS);
+ self.tell(message, self);
+ }
+ }.start();
+
+ onFirstElectionTimeout.countDown();
} else {
- throw noMatch();
+ super.onReceiveCommand(message);
}
}
- }.get(); // this extracts the received message
-
- assertTrue(out.matches(
- "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
+ };
}
+ };
+ MockDataChangeListener listener = new MockDataChangeListener(1);
+ ActorRef dclActor = getSystem().actorOf(DataChangeListener.props(listener),
+ "testRegisterChangeListenerWhenNotLeaderInitially-DataChangeListener");
- };
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)),
+ "testRegisterChangeListenerWhenNotLeaderInitially");
+
+ // Write initial data into the in-memory store.
+ YangInstanceIdentifier path = TestModel.TEST_PATH;
+ writeToStore(shard, path, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ // Wait until the shard receives the first ElectionTimeout message.
+ assertEquals("Got first ElectionTimeout", true,
+ onFirstElectionTimeout.await(5, TimeUnit.SECONDS));
+
+ // Now send the RegisterChangeListener and wait for the reply.
+ shard.tell(new RegisterChangeListener(path, dclActor.path(),
+ AsyncDataBroker.DataChangeScope.SUBTREE), getRef());
+
+ RegisterChangeListenerReply reply = expectMsgClass(duration("5 seconds"),
+ RegisterChangeListenerReply.class);
+ assertNotNull("getListenerRegistrationPath", reply.getListenerRegistrationPath());
+
+ // Sanity check - verify the shard is not the leader yet.
+ shard.tell(new FindLeader(), getRef());
+ FindLeaderReply findLeadeReply =
+ expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+ assertNull("Expected the shard not to be the leader", findLeadeReply.getLeaderActor());
+
+ // Signal the onChangeListenerRegistered latch to tell the thread above to proceed
+ // with the election process.
+ onChangeListenerRegistered.countDown();
+
+ // Wait for the shard to become the leader and notify our listener with the existing
+ // data in the store.
+ listener.waitForChangeEvents(path);
+
+ dclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
public void testCreateTransaction(){
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateTransaction");
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
+ new ShardTestKit(getSystem()) {{
+ ActorRef shard = getSystem().actorOf(newShardProps(), "testCreateTransaction");
- Assert.assertEquals(true, result);
+ waitUntilLeader(shard);
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ shard.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
+
+ shard.tell(new CreateTransaction("txn-1",
+ TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
+
+ CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+ CreateTransactionReply.class);
+
+ String path = reply.getTransactionActorPath().toString();
+ assertTrue("Unexpected transaction path " + path,
+ path.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testCreateTransactionOnChain(){
+ new ShardTestKit(getSystem()) {{
+ final ActorRef shard = getSystem().actorOf(newShardProps(), "testCreateTransactionOnChain");
+
+ waitUntilLeader(shard);
+
+ shard.tell(new CreateTransaction("txn-1",
+ TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
+ getRef());
+
+ CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+ CreateTransactionReply.class);
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ String path = reply.getTransactionActorPath().toString();
+ assertTrue("Unexpected transaction path " + path,
+ path.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
- subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(),
- getRef());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testPeerAddressResolved() throws Exception {
+ new ShardTestKit(getSystem()) {{
+ final CountDownLatch recoveryComplete = new CountDownLatch(1);
+ class TestShard extends Shard {
+ TestShard() {
+ super(shardID, Collections.<ShardIdentifier, String>singletonMap(shardID, null),
+ dataStoreContext, SCHEMA_CONTEXT);
+ }
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
+ Map<String, String> getPeerAddresses() {
+ return getRaftActorContext().getPeerAddresses();
+ }
+
+ @Override
+ protected void onRecoveryComplete() {
+ try {
+ super.onRecoveryComplete();
+ } finally {
+ recoveryComplete.countDown();
+ }
+ }
+ }
+
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(new Creator<Shard>() {
@Override
- protected String match(Object in) {
- if (in instanceof CreateTransactionReply) {
- CreateTransactionReply reply =
- (CreateTransactionReply) in;
- return reply.getTransactionActorPath()
- .toString();
- } else {
- throw noMatch();
- }
+ public TestShard create() throws Exception {
+ return new TestShard();
}
- }.get(); // this extracts the received message
+ })), "testPeerAddressResolved");
- assertTrue("Unexpected transaction path " + out,
- out.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
- expectNoMsg();
- }
- };
+ //waitUntilLeader(shard);
+ assertEquals("Recovery complete", true,
+ Uninterruptibles.awaitUninterruptibly(recoveryComplete, 5, TimeUnit.SECONDS));
+
+ String address = "akka://foobar";
+ shard.underlyingActor().onReceiveCommand(new PeerAddressResolved(shardID, address));
+
+ assertEquals("getPeerAddresses", address,
+ ((TestShard)shard.underlyingActor()).getPeerAddresses().get(shardID.toString()));
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testCreateTransactionOnChain(){
- new JavaTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateTransactionOnChain");
-
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
- ) {
+ public void testApplySnapshot() throws Exception {
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(),
+ "testApplySnapshot");
+
+ NormalizedNodeToNodeCodec codec =
+ new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
+
+ writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+
+ YangInstanceIdentifier root = YangInstanceIdentifier.builder().build();
+ NormalizedNode<?,?> expected = readStore(shard, root);
+
+ NormalizedNodeMessages.Container encode = codec.encode(expected);
+
+ ApplySnapshot applySnapshot = new ApplySnapshot(Snapshot.create(
+ encode.getNormalizedNode().toByteString().toByteArray(),
+ Collections.<ReplicatedLogEntry>emptyList(), 1, 2, 3, 4));
+
+ shard.underlyingActor().onReceiveCommand(applySnapshot);
+
+ NormalizedNode<?,?> actual = readStore(shard, root);
+
+ assertEquals(expected, actual);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+
+ @Test
+ public void testApplyState() throws Exception {
+
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testApplyState");
+
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ MutableCompositeModification compMod = new MutableCompositeModification();
+ compMod.addModification(new WriteModification(TestModel.TEST_PATH, node, SCHEMA_CONTEXT));
+ Payload payload = new CompositeModificationPayload(compMod.toSerializable());
+ ApplyState applyState = new ApplyState(null, "test",
+ new ReplicatedLogImplEntry(1, 2, payload));
+
+ shard.underlyingActor().onReceiveCommand(applyState);
+
+ NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
+ assertEquals("Applied state", node, actual);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
+
+ @SuppressWarnings("serial")
+ @Test
+ public void testRecovery() throws Exception {
+
+ // Set up the InMemorySnapshotStore.
+
+ InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
+ testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+
+ DOMStoreWriteTransaction writeTx = testStore.newWriteOnlyTransaction();
+ writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ DOMStoreThreePhaseCommitCohort commitCohort = writeTx.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+
+ DOMStoreReadTransaction readTx = testStore.newReadOnlyTransaction();
+ NormalizedNode<?, ?> root = readTx.read(YangInstanceIdentifier.builder().build()).get().get();
+
+ InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
+ new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT).encode(
+ root).
+ getNormalizedNode().toByteString().toByteArray(),
+ Collections.<ReplicatedLogEntry>emptyList(), 0, 1, -1, -1));
+
+ // Set up the InMemoryJournal.
+
+ InMemoryJournal.addEntry(shardID.toString(), 0, new ReplicatedLogImplEntry(0, 1, newPayload(
+ new WriteModification(TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ SCHEMA_CONTEXT))));
+
+ int nListEntries = 11;
+ Set<Integer> listEntryKeys = new HashSet<>();
+ for(int i = 1; i <= nListEntries; i++) {
+ listEntryKeys.add(Integer.valueOf(i));
+ YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
+ Modification mod = new MergeModification(path,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i),
+ SCHEMA_CONTEXT);
+ InMemoryJournal.addEntry(shardID.toString(), i, new ReplicatedLogImplEntry(i, 1,
+ newPayload(mod)));
+ }
+
+ InMemoryJournal.addEntry(shardID.toString(), nListEntries + 1,
+ new ApplyLogEntries(nListEntries));
+
+ // Create the actor and wait for recovery complete.
+
+ final CountDownLatch recoveryComplete = new CountDownLatch(1);
+
+ Creator<Shard> creator = new Creator<Shard>() {
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ dataStoreContext, SCHEMA_CONTEXT) {
@Override
- protected Boolean run() {
- return true;
+ protected void onRecoveryComplete() {
+ try {
+ super.onRecoveryComplete();
+ } finally {
+ recoveryComplete.countDown();
+ }
}
- }.from(subject.path().toString())
- .message("Switching from state Candidate to Leader")
- .occurrences(1).exec();
+ };
+ }
+ };
- Assert.assertEquals(true, result);
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)), "testRecovery");
+
+ assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+
+ // Verify data in the data store.
+
+ NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+ outerList.getValue() instanceof Iterable);
+ for(Object entry: (Iterable<?>) outerList.getValue()) {
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+ entry instanceof MapEntryNode);
+ MapEntryNode mapEntry = (MapEntryNode)entry;
+ Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+ mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+ Object value = idLeaf.get().getValue();
+ assertTrue("Unexpected value for leaf "+ TestModel.ID_QNAME.getLocalName() + ": " + value,
+ listEntryKeys.remove(value));
+ }
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ if(!listEntryKeys.isEmpty()) {
+ fail("Missing " + TestModel.OUTER_LIST_QNAME.getLocalName() + " entries with keys: " +
+ listEntryKeys);
+ }
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ assertEquals("Last log index", nListEntries,
+ shard.underlyingActor().getShardMBean().getLastLogIndex());
+ assertEquals("Commit index", nListEntries,
+ shard.underlyingActor().getShardMBean().getCommitIndex());
+ assertEquals("Last applied", nListEntries,
+ shard.underlyingActor().getShardMBean().getLastApplied());
- subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
- getRef());
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof CreateTransactionReply) {
- CreateTransactionReply reply =
- (CreateTransactionReply) in;
- return reply.getTransactionActorPath()
- .toString();
- } else {
- throw noMatch();
- }
+ private CompositeModificationPayload newPayload(Modification... mods) {
+ MutableCompositeModification compMod = new MutableCompositeModification();
+ for(Modification mod: mods) {
+ compMod.addModification(mod);
+ }
+
+ return new CompositeModificationPayload(compMod.toSerializable());
+ }
+
+ private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(String cohortName,
+ InMemoryDOMDataStore dataStore, YangInstanceIdentifier path, NormalizedNode data,
+ MutableCompositeModification modification) {
+ return setupMockWriteTransaction(cohortName, dataStore, path, data, modification, null);
+ }
+
+ private DOMStoreThreePhaseCommitCohort setupMockWriteTransaction(String cohortName,
+ InMemoryDOMDataStore dataStore, YangInstanceIdentifier path, NormalizedNode data,
+ MutableCompositeModification modification,
+ final Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit) {
+
+ DOMStoreWriteTransaction tx = dataStore.newWriteOnlyTransaction();
+ tx.write(path, data);
+ final DOMStoreThreePhaseCommitCohort realCohort = tx.ready();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, cohortName);
+
+ doAnswer(new Answer<ListenableFuture<Boolean>>() {
+ @Override
+ public ListenableFuture<Boolean> answer(InvocationOnMock invocation) {
+ return realCohort.canCommit();
+ }
+ }).when(cohort).canCommit();
+
+ doAnswer(new Answer<ListenableFuture<Void>>() {
+ @Override
+ public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ if(preCommit != null) {
+ return preCommit.apply(realCohort);
+ } else {
+ return realCohort.preCommit();
+ }
+ }
+ }).when(cohort).preCommit();
+
+ doAnswer(new Answer<ListenableFuture<Void>>() {
+ @Override
+ public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ return realCohort.commit();
+ }
+ }).when(cohort).commit();
+
+ doAnswer(new Answer<ListenableFuture<Void>>() {
+ @Override
+ public ListenableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
+ return realCohort.abort();
+ }
+ }).when(cohort).abort();
+
+ modification.addModification(new WriteModification(path, data, SCHEMA_CONTEXT));
+
+ return cohort;
+ }
+
+ @SuppressWarnings({ "unchecked" })
+ @Test
+ public void testConcurrentThreePhaseCommits() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testConcurrentThreePhaseCommits");
+
+ waitUntilLeader(shard);
+
+ // Setup 3 simulated transactions with mock cohorts backed by real cohorts.
+
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
+ String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification3);
+
+ long timeoutSec = 5;
+ final FiniteDuration duration = FiniteDuration.create(timeoutSec, TimeUnit.SECONDS);
+ final Timeout timeout = new Timeout(duration);
+
+ // Simulate the ForwardedReadyTransaction message for the first Tx that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1, true), getRef());
+ ReadyTransactionReply readyReply = ReadyTransactionReply.fromSerializable(
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the ForwardedReadyTransaction for the next 2 Tx's.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID3, cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CanCommitTransaction message for the next 2 Tx's. These should get queued and
+ // processed after the first Tx completes.
+
+ Future<Object> canCommitFuture1 = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2).toSerializable(), timeout);
+
+ Future<Object> canCommitFuture2 = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID3).toSerializable(), timeout);
+
+ // Send the CommitTransaction message for the first Tx. After it completes, it should
+ // trigger the 2nd Tx to proceed which should in turn then trigger the 3rd.
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Wait for the next 2 Tx's to complete.
+
+ final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
+ final CountDownLatch commitLatch = new CountDownLatch(2);
+
+ class OnFutureComplete extends OnComplete<Object> {
+ private final Class<?> expRespType;
+
+ OnFutureComplete(Class<?> expRespType) {
+ this.expRespType = expRespType;
+ }
+
+ @Override
+ public void onComplete(Throwable error, Object resp) {
+ if(error != null) {
+ caughtEx.set(new AssertionError(getClass().getSimpleName() + " failure", error));
+ } else {
+ try {
+ assertEquals("Commit response type", expRespType, resp.getClass());
+ onSuccess(resp);
+ } catch (Exception e) {
+ caughtEx.set(e);
}
- }.get(); // this extracts the received message
+ }
+ }
- assertTrue("Unexpected transaction path " + out,
- out.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
- expectNoMsg();
+ void onSuccess(Object resp) throws Exception {
}
- };
+ }
+
+ class OnCommitFutureComplete extends OnFutureComplete {
+ OnCommitFutureComplete() {
+ super(CommitTransactionReply.SERIALIZABLE_CLASS);
+ }
+
+ @Override
+ public void onComplete(Throwable error, Object resp) {
+ super.onComplete(error, resp);
+ commitLatch.countDown();
+ }
+ }
+
+ class OnCanCommitFutureComplete extends OnFutureComplete {
+ private final String transactionID;
+
+ OnCanCommitFutureComplete(String transactionID) {
+ super(CanCommitTransactionReply.SERIALIZABLE_CLASS);
+ this.transactionID = transactionID;
+ }
+
+ @Override
+ void onSuccess(Object resp) throws Exception {
+ CanCommitTransactionReply canCommitReply =
+ CanCommitTransactionReply.fromSerializable(resp);
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ Future<Object> commitFuture = Patterns.ask(shard,
+ new CommitTransaction(transactionID).toSerializable(), timeout);
+ commitFuture.onComplete(new OnCommitFutureComplete(), getSystem().dispatcher());
+ }
+ }
+
+ canCommitFuture1.onComplete(new OnCanCommitFutureComplete(transactionID2),
+ getSystem().dispatcher());
+
+ canCommitFuture2.onComplete(new OnCanCommitFutureComplete(transactionID3),
+ getSystem().dispatcher());
+
+ boolean done = commitLatch.await(timeoutSec, TimeUnit.SECONDS);
+
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Commits complete", true, done);
+
+ InOrder inOrder = inOrder(cohort1, cohort2, cohort3);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
+ inOrder.verify(cohort2).preCommit();
+ inOrder.verify(cohort2).commit();
+ inOrder.verify(cohort3).canCommit();
+ inOrder.verify(cohort3).preCommit();
+ inOrder.verify(cohort3).commit();
+
+ // Verify data in the data store.
+
+ NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+ assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+ outerList.getValue() instanceof Iterable);
+ Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
+ assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+ entry instanceof MapEntryNode);
+ MapEntryNode mapEntry = (MapEntryNode)entry;
+ Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+ mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+ assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+ assertEquals(TestModel.ID_QNAME.getLocalName() + " value", 1, idLeaf.get().getValue());
+
+ for(int i = 0; i < 20 * 5; i++) {
+ long lastLogIndex = shard.underlyingActor().getShardMBean().getLastLogIndex();
+ if(lastLogIndex == 2) {
+ break;
+ }
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ assertEquals("Last log index", 2, shard.underlyingActor().getShardMBean().getLastLogIndex());
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testPeerAddressResolved(){
- new JavaTestKit(getSystem()) {{
- Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+ public void testCommitPhaseFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ // Setup 2 simulated transactions with mock cohorts. The first one fails in the
+ // commit phase.
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort1).preCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort1).commit();
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
- peerAddresses.put(identifier, null);
- final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testPeerAddressResolved");
+ FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
- new Within(duration("3 seconds")) {
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message for the 2nd Tx. This should get queued and
+ // processed after the first Tx completes.
+
+ Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2).toSerializable(), timeout);
+
+ // Send the CommitTransaction message for the first Tx. This should send back an error
+ // and trigger the 2nd Tx to proceed.
+
+ shard.tell(new CommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
@Override
- protected void run() {
+ public void onComplete(Throwable t, Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
- subject.tell(
- new PeerAddressResolved(identifier, "akka://foobar"),
- getRef());
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
+
+ InOrder inOrder = inOrder(cohort1, cohort2);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort1).preCommit();
+ inOrder.verify(cohort1).commit();
+ inOrder.verify(cohort2).canCommit();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testPreCommitPhaseFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testPreCommitPhaseFailure");
+
+ waitUntilLeader(shard);
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).preCommit();
+
+ FiniteDuration duration = duration("5 seconds");
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CommitTransaction message. This should send back an error
+ // for preCommit failure.
+
+ shard.tell(new CommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ InOrder inOrder = inOrder(cohort);
+ inOrder.verify(cohort).canCommit();
+ inOrder.verify(cohort).preCommit();
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testCanCommitPhaseFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitPhaseFailure");
- expectNoMsg();
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+
+ String transactionID = "tx1";
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock"))).when(cohort).canCommit();
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CanCommitTransaction message.
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testAbortBeforeFinishCommit() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testAbortBeforeFinishCommit");
+
+ waitUntilLeader(shard);
+
+ final FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
+
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
+ final String transactionID = "tx1";
+ final CountDownLatch abortComplete = new CountDownLatch(1);
+ Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>> preCommit =
+ new Function<DOMStoreThreePhaseCommitCohort,ListenableFuture<Void>>() {
+ @Override
+ public ListenableFuture<Void> apply(final DOMStoreThreePhaseCommitCohort cohort) {
+ ListenableFuture<Void> preCommitFuture = cohort.preCommit();
+
+ Future<Object> abortFuture = Patterns.ask(shard,
+ new AbortTransaction(transactionID).toSerializable(), timeout);
+ abortFuture.onComplete(new OnComplete<Object>() {
+ @Override
+ public void onComplete(Throwable e, Object resp) {
+ abortComplete.countDown();
+ }
+ }, getSystem().dispatcher());
+
+ return preCommitFuture;
}
};
+
+ MutableCompositeModification modification = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ modification, preCommit);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID, cohort, modification, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new CanCommitTransaction(transactionID).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ Future<Object> commitFuture = Patterns.ask(shard,
+ new CommitTransaction(transactionID).toSerializable(), timeout);
+
+ assertEquals("Abort complete", true, abortComplete.await(5, TimeUnit.SECONDS));
+
+ Await.result(commitFuture, duration);
+
+ NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST_PATH);
+ assertNotNull(TestModel.TEST_QNAME.getLocalName() + " not found", node);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
@Test
- public void testApplySnapshot() throws ExecutionException, InterruptedException {
- Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+ public void testTransactionCommitTimeout() throws Throwable {
+ dataStoreContext = DatastoreContext.newBuilder().shardTransactionCommitTimeoutInSeconds(1).build();
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitTimeout");
- peerAddresses.put(identifier, null);
- final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
+ waitUntilLeader(shard);
- TestActorRef<Shard> ref = TestActorRef.create(getSystem(), props);
+ final FiniteDuration duration = duration("5 seconds");
- ref.underlyingActor().updateSchemaContext(TestModel.createTestContext());
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
- NormalizedNodeToNodeCodec codec =
- new NormalizedNodeToNodeCodec(TestModel.createTestContext());
+ writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ writeToStore(shard, TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
- ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+ // Create 1st Tx - will timeout
- NormalizedNode expected = ref.underlyingActor().readStore();
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1).build(),
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+ modification1);
- NormalizedNodeMessages.Container encode = codec
- .encode(YangInstanceIdentifier.builder().build(), expected);
+ // Create 2nd Tx
+ String transactionID2 = "tx3";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ YangInstanceIdentifier listNodePath = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+ .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2).build();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort3", dataStore,
+ listNodePath,
+ ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2),
+ modification2);
- ref.underlyingActor().applySnapshot(encode.getNormalizedNode().toByteString());
+ // Ready the Tx's
- NormalizedNode actual = ref.underlyingActor().readStore();
+ shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
- assertEquals(expected, actual);
+ shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // canCommit 1st Tx. We don't send the commit so it should timeout.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // canCommit the 2nd Tx - it should complete after the 1st Tx times out.
+
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // Commit the 2nd Tx.
+
+ shard.tell(new CommitTransaction(transactionID2).toSerializable(), getRef());
+ expectMsgClass(duration, CommitTransactionReply.SERIALIZABLE_CLASS);
+
+ NormalizedNode<?, ?> node = readStore(shard, listNodePath);
+ assertNotNull(listNodePath + " not found", node);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
}
- private static class ShardTestKit extends JavaTestKit {
+ @Test
+ public void testTransactionCommitQueueCapacityExceeded() throws Throwable {
+ dataStoreContext = DatastoreContext.newBuilder().shardTransactionCommitQueueCapacity(1).build();
- private ShardTestKit(ActorSystem actorSystem) {
- super(actorSystem);
- }
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testTransactionCommitQueueCapacityExceeded");
- protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
- // Wait for a specific log message to show up
- final boolean result =
- new JavaTestKit.EventFilter<Boolean>(logLevel
- ) {
- @Override
- protected Boolean run() {
- return true;
- }
- }.from(subject.path().toString())
- .message(logMessage)
- .occurrences(1).exec();
+ waitUntilLeader(shard);
- Assert.assertEquals(true, result);
+ final FiniteDuration duration = duration("5 seconds");
- }
+ InMemoryDOMDataStore dataStore = shard.underlyingActor().getDataStore();
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = setupMockWriteTransaction("cohort1", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification1);
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = setupMockWriteTransaction("cohort2", dataStore,
+ TestModel.OUTER_LIST_PATH,
+ ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+ modification2);
+
+ String transactionID3 = "tx3";
+ MutableCompositeModification modification3 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort3 = setupMockWriteTransaction("cohort3", dataStore,
+ TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), modification3);
+
+ // Ready the Tx's
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID3, cohort3, modification3, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // canCommit 1st Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS);
+
+ // canCommit the 2nd Tx - it should get queued.
+ shard.tell(new CanCommitTransaction(transactionID2).toSerializable(), getRef());
+
+ // canCommit the 3rd Tx - should exceed queue capacity and fail.
+
+ shard.tell(new CanCommitTransaction(transactionID3).toSerializable(), getRef());
+ expectMsgClass(duration, akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
}
@Test
- public void testCreateSnapshot() throws IOException, InterruptedException {
+ public void testCanCommitBeforeReadyFailure() throws Throwable {
+ new ShardTestKit(getSystem()) {{
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testCanCommitBeforeReadyFailure");
+
+ shard.tell(new CanCommitTransaction("tx").toSerializable(), getRef());
+ expectMsgClass(duration("5 seconds"), akka.actor.Status.Failure.class);
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
+
+ @Test
+ public void testAbortTransaction() throws Throwable {
new ShardTestKit(getSystem()) {{
- final ShardIdentifier identifier =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
+ final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ newShardProps().withDispatcher(Dispatchers.DefaultDispatcherId()),
+ "testAbortTransaction");
+
+ waitUntilLeader(shard);
+
+ // Setup 2 simulated transactions with mock cohorts. The first one will be aborted.
+
+ String transactionID1 = "tx1";
+ MutableCompositeModification modification1 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort1 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort1");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(cohort1).abort();
+
+ String transactionID2 = "tx2";
+ MutableCompositeModification modification2 = new MutableCompositeModification();
+ DOMStoreThreePhaseCommitCohort cohort2 = mock(DOMStoreThreePhaseCommitCohort.class, "cohort2");
+ doReturn(Futures.immediateFuture(Boolean.TRUE)).when(cohort2).canCommit();
+
+ FiniteDuration duration = duration("5 seconds");
+ final Timeout timeout = new Timeout(duration);
+
+ // Simulate the ForwardedReadyTransaction messages that would be sent
+ // by the ShardTransaction.
+
+ shard.tell(new ForwardedReadyTransaction(transactionID1, cohort1, modification1, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ shard.tell(new ForwardedReadyTransaction(transactionID2, cohort2, modification2, true), getRef());
+ expectMsgClass(duration, ReadyTransactionReply.SERIALIZABLE_CLASS);
+
+ // Send the CanCommitTransaction message for the first Tx.
+
+ shard.tell(new CanCommitTransaction(transactionID1).toSerializable(), getRef());
+ CanCommitTransactionReply canCommitReply = CanCommitTransactionReply.fromSerializable(
+ expectMsgClass(duration, CanCommitTransactionReply.SERIALIZABLE_CLASS));
+ assertEquals("Can commit", true, canCommitReply.getCanCommit());
+
+ // Send the CanCommitTransaction message for the 2nd Tx. This should get queued and
+ // processed after the first Tx completes.
- final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
- final ActorRef subject =
- getSystem().actorOf(props, "testCreateSnapshot");
+ Future<Object> canCommitFuture = Patterns.ask(shard,
+ new CanCommitTransaction(transactionID2).toSerializable(), timeout);
- // Wait for a specific log message to show up
- this.waitForLogMessage(Logging.Info.class, subject, "Switching from state Candidate to Leader");
+ // Send the AbortTransaction message for the first Tx. This should trigger the 2nd
+ // Tx to proceed.
+ shard.tell(new AbortTransaction(transactionID1).toSerializable(), getRef());
+ expectMsgClass(duration, AbortTransactionReply.SERIALIZABLE_CLASS);
- new Within(duration("3 seconds")) {
+ // Wait for the 2nd Tx to complete the canCommit phase.
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ canCommitFuture.onComplete(new OnComplete<Object>() {
@Override
- protected void run() {
+ public void onComplete(Throwable t, Object resp) {
+ latch.countDown();
+ }
+ }, getSystem().dispatcher());
- subject.tell(
- new UpdateSchemaContext(TestModel.createTestContext()),
- getRef());
+ assertEquals("2nd CanCommit complete", true, latch.await(5, TimeUnit.SECONDS));
- subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
- getRef());
+ InOrder inOrder = inOrder(cohort1, cohort2);
+ inOrder.verify(cohort1).canCommit();
+ inOrder.verify(cohort2).canCommit();
- waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+ }};
+ }
- subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
- getRef());
+ @Test
+ public void testCreateSnapshot() throws IOException, InterruptedException {
+ testCreateSnapshot(true, "testCreateSnapshot");
+ }
- waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+ @Test
+ public void testCreateSnapshotWithNonPersistentData() throws IOException, InterruptedException {
+ testCreateSnapshot(false, "testCreateSnapshotWithNonPersistentData");
+ }
+ public void testCreateSnapshot(boolean persistent, final String shardActorName) throws IOException, InterruptedException {
+ final DatastoreContext dataStoreContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(persistent).build();
+
+ new ShardTestKit(getSystem()) {{
+ final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
+ Creator<Shard> creator = new Creator<Shard>() {
+ @Override
+ public Shard create() throws Exception {
+ return new Shard(shardID, Collections.<ShardIdentifier,String>emptyMap(),
+ dataStoreContext, SCHEMA_CONTEXT) {
+ @Override
+ protected void commitSnapshot(long sequenceNumber) {
+ super.commitSnapshot(sequenceNumber);
+ latch.get().countDown();
+ }
+ };
}
};
- deletePersistenceFiles();
+ TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+ Props.create(new DelegatingShardCreator(creator)), shardActorName);
+
+ waitUntilLeader(shard);
+
+ shard.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
+
+ assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
+
+ latch.set(new CountDownLatch(1));
+ shard.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
+
+ assertEquals("Snapshot saved", true, latch.get().await(5, TimeUnit.SECONDS));
+
+ shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
}};
}
InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.listeningDecorator(
MoreExecutors.sameThreadExecutor()), MoreExecutors.sameThreadExecutor());
- store.onGlobalContextUpdated(TestModel.createTestContext());
+ store.onGlobalContextUpdated(SCHEMA_CONTEXT);
DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
putTransaction.write(TestModel.TEST_PATH,
}
+ @Test
+ public void testRecoveryApplicable(){
+
+ final DatastoreContext persistentContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(true).build();
+
+ final Props persistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ persistentContext, SCHEMA_CONTEXT);
+
+ final DatastoreContext nonPersistentContext = DatastoreContext.newBuilder().
+ shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000).persistent(false).build();
+
+ final Props nonPersistentProps = Shard.props(shardID, Collections.<ShardIdentifier, String>emptyMap(),
+ nonPersistentContext, SCHEMA_CONTEXT);
+
+ new ShardTestKit(getSystem()) {{
+ TestActorRef<Shard> shard1 = TestActorRef.create(getSystem(),
+ persistentProps, "testPersistence1");
+
+ assertTrue("Recovery Applicable", shard1.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+
+ shard1.tell(PoisonPill.getInstance(), ActorRef.noSender());
+
+ TestActorRef<Shard> shard2 = TestActorRef.create(getSystem(),
+ nonPersistentProps, "testPersistence2");
+
+ assertFalse("Recovery Not Applicable", shard2.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+
+ shard2.tell(PoisonPill.getInstance(), ActorRef.noSender());
+
+ }};
+
+ }
+
+
private NormalizedNode readStore(InMemoryDOMDataStore store) throws ReadFailedException {
DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read =
}
};
}
+
+ private NormalizedNode<?,?> readStore(TestActorRef<Shard> shard, YangInstanceIdentifier id)
+ throws ExecutionException, InterruptedException {
+ DOMStoreReadTransaction transaction = shard.underlyingActor().getDataStore().newReadOnlyTransaction();
+
+ CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
+ transaction.read(id);
+
+ Optional<NormalizedNode<?, ?>> optional = future.get();
+ NormalizedNode<?, ?> node = optional.isPresent()? optional.get() : null;
+
+ transaction.close();
+
+ return node;
+ }
+
+ private void writeToStore(TestActorRef<Shard> shard, YangInstanceIdentifier id, NormalizedNode<?,?> node)
+ throws ExecutionException, InterruptedException {
+ DOMStoreWriteTransaction transaction = shard.underlyingActor().getDataStore().newWriteOnlyTransaction();
+
+ transaction.write(id, node);
+
+ DOMStoreThreePhaseCommitCohort commitCohort = transaction.ready();
+ commitCohort.preCommit().get();
+ commitCohort.commit().get();
+ }
+
+ private static final class DelegatingShardCreator implements Creator<Shard> {
+ private final Creator<Shard> delegate;
+
+ DelegatingShardCreator(Creator<Shard> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public Shard create() throws Exception {
+ return delegate.create();
+ }
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.pattern.Patterns;
+import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.Assert;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
+import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+class ShardTestKit extends JavaTestKit {
+
+ ShardTestKit(ActorSystem actorSystem) {
+ super(actorSystem);
+ }
+
+ protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+ // Wait for a specific log message to show up
+ final boolean result =
+ new JavaTestKit.EventFilter<Boolean>(logLevel
+ ) {
+ @Override
+ protected Boolean run() {
+ return true;
+ }
+ }.from(subject.path().toString())
+ .message(logMessage)
+ .occurrences(1).exec();
+
+ Assert.assertEquals(true, result);
+
+ }
+
+ protected void waitUntilLeader(ActorRef shard) {
+ FiniteDuration duration = Duration.create(100, TimeUnit.MILLISECONDS);
+ for(int i = 0; i < 20 * 5; i++) {
+ Future<Object> future = Patterns.ask(shard, new FindLeader(), new Timeout(duration));
+ try {
+ FindLeaderReply resp = (FindLeaderReply)Await.result(future, duration);
+ if(resp.getLeaderActor() != null) {
+ return;
+ }
+ } catch(TimeoutException e) {
+ } catch(Exception e) {
+ System.err.println("FindLeader threw ex");
+ e.printStackTrace();
+ }
+
+
+ Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+ }
+
+ Assert.fail("Leader not found for shard " + shard.path());
+ }
+
+}
\ No newline at end of file
import akka.actor.ActorRef;
import akka.actor.Props;
+import akka.pattern.AskTimeoutException;
import akka.testkit.TestActorRef;
-
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
-
import org.junit.BeforeClass;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
-
import java.util.Collections;
import java.util.concurrent.TimeUnit;
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("operational").build();
- private final DatastoreContext datastoreContext = new DatastoreContext();
+ private final DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
}
private ActorRef createShard(){
- return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, new DatastoreContext(), TestModel.createTestContext()));
+ return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext,
+ TestModel.createTestContext()));
}
@Test(expected = ReadFailedException.class)
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeWriteWithTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
Await.result(future, Duration.create(3, TimeUnit.SECONDS));
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeReadWriteWithTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
.serialize(Builders.containerBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).build());
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeMergeTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props, "testNegativeMergeTransactionReady");
}
- @Test(expected = IllegalStateException.class)
+ @Test(expected = AskTimeoutException.class)
public void testNegativeDeleteDataWhenTransactionReady() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
+ testSchemaContext, datastoreContext, shardStats, "txn");
final TestActorRef<ShardTransaction> subject = TestActorRef
.create(getSystem(), props,
import akka.actor.Terminated;
import akka.testkit.JavaTestKit;
import akka.testkit.TestActorRef;
-
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
-
import org.junit.BeforeClass;
import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.ShardWriteTransaction.GetCompositeModificationReply;
import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import scala.concurrent.duration.Duration;
-
import java.util.Collections;
import java.util.concurrent.TimeUnit;
-
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class ShardTransactionTest extends AbstractActorTest {
ShardIdentifier.builder().memberName("member-1")
.shardName("inventory").type("config").build();
- private DatastoreContext datastoreContext = new DatastoreContext();
+ private DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
private ActorRef createShard(){
return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER,
- Collections.EMPTY_MAP, new DatastoreContext(), TestModel.createTestContext()));
+ Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()));
}
@Test
public void testOnReceiveReadData() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject = getSystem().actorOf(props, "testReadData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- if (ReadDataReply.fromSerializable(testSchemaContext,YangInstanceIdentifier.builder().build(), in)
- .getNormalizedNode()!= null) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveReadData(getSystem().actorOf(props, "testReadDataRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveReadData(getSystem().actorOf(props, "testReadDataRW"));
+ }
+
+ private void testOnReceiveReadData(final ActorRef transaction) {
+ //serialized read
+ transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(),
+ getRef());
+
+ ShardTransactionMessages.ReadDataReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ReadDataReply.SERIALIZABLE_CLASS);
+
+ assertNotNull(ReadDataReply.fromSerializable(
+ testSchemaContext,YangInstanceIdentifier.builder().build(), replySerialized)
+ .getNormalizedNode());
+
+ // unserialized read
+ transaction.tell(new ReadData(YangInstanceIdentifier.builder().build()),getRef());
+
+ ReadDataReply reply = expectMsgClass(duration("5 seconds"), ReadDataReply.class);
+
+ assertNotNull(reply.getNormalizedNode());
}};
}
public void testOnReceiveReadDataWhenDataNotFound() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props( store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject = getSystem().actorOf(props, "testReadDataWhenDataNotFound");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new ReadData(TestModel.TEST_PATH).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
- if (ReadDataReply.fromSerializable(testSchemaContext,TestModel.TEST_PATH, in)
- .getNormalizedNode()
- == null) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props( store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveReadDataWhenDataNotFound(getSystem().actorOf(
+ props, "testReadDataWhenDataNotFoundRO"));
+
+ props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveReadDataWhenDataNotFound(getSystem().actorOf(
+ props, "testReadDataWhenDataNotFoundRW"));
+ }
+
+ private void testOnReceiveReadDataWhenDataNotFound(final ActorRef transaction) {
+ // serialized read
+ transaction.tell(new ReadData(TestModel.TEST_PATH).toSerializable(), getRef());
+
+ ShardTransactionMessages.ReadDataReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ReadDataReply.SERIALIZABLE_CLASS);
+
+ assertTrue(ReadDataReply.fromSerializable(
+ testSchemaContext, TestModel.TEST_PATH, replySerialized).getNormalizedNode() == null);
+
+ // unserialized read
+ transaction.tell(new ReadData(TestModel.TEST_PATH),getRef());
+
+ ReadDataReply reply = expectMsgClass(duration("5 seconds"), ReadDataReply.class);
+
+ assertTrue(reply.getNormalizedNode() == null);
}};
}
public void testOnReceiveDataExistsPositive() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject = getSystem().actorOf(props, "testDataExistsPositive");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- if (DataExistsReply.fromSerializable(in)
- .exists()) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveDataExistsPositive(getSystem().actorOf(props, "testDataExistsPositiveRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveDataExistsPositive(getSystem().actorOf(props, "testDataExistsPositiveRW"));
+ }
+
+ private void testOnReceiveDataExistsPositive(final ActorRef transaction) {
+ transaction.tell(new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
+ getRef());
+
+ ShardTransactionMessages.DataExistsReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DataExistsReply.class);
+
+ assertTrue(DataExistsReply.fromSerializable(replySerialized).exists());
+
+ // unserialized read
+ transaction.tell(new DataExists(YangInstanceIdentifier.builder().build()),getRef());
+
+ DataExistsReply reply = expectMsgClass(duration("5 seconds"), DataExistsReply.class);
+
+ assertTrue(reply.exists());
}};
}
public void testOnReceiveDataExistsNegative() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
- final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject = getSystem().actorOf(props, "testDataExistsNegative");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(
- new DataExists(TestModel.TEST_PATH).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
- if (!DataExistsReply.fromSerializable(in)
- .exists()) {
- return "match";
- }
- return null;
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- expectNoMsg();
- }
-
-
- };
+ Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveDataExistsNegative(getSystem().actorOf(props, "testDataExistsNegativeRO"));
+
+ props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+
+ testOnReceiveDataExistsNegative(getSystem().actorOf(props, "testDataExistsNegativeRW"));
+ }
+
+ private void testOnReceiveDataExistsNegative(final ActorRef transaction) {
+ transaction.tell(new DataExists(TestModel.TEST_PATH).toSerializable(), getRef());
+
+ ShardTransactionMessages.DataExistsReply replySerialized =
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DataExistsReply.class);
+
+ assertFalse(DataExistsReply.fromSerializable(replySerialized).exists());
+
+ // unserialized read
+ transaction.tell(new DataExists(TestModel.TEST_PATH),getRef());
+
+ DataExistsReply reply = expectMsgClass(duration("5 seconds"), DataExistsReply.class);
+
+ assertFalse(reply.exists());
}};
}
private void assertModification(final ActorRef subject,
final Class<? extends Modification> modificationType) {
new JavaTestKit(getSystem()) {{
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
- subject
- .tell(new ShardTransaction.GetCompositedModification(),
- getRef());
-
- final CompositeModification compositeModification =
- new ExpectMsg<CompositeModification>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected CompositeModification match(Object in) {
- if (in instanceof ShardTransaction.GetCompositeModificationReply) {
- return ((ShardTransaction.GetCompositeModificationReply) in)
- .getModification();
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertTrue(
- compositeModification.getModifications().size() == 1);
- assertEquals(modificationType,
- compositeModification.getModifications().get(0)
- .getClass());
-
- }
- };
+ subject.tell(new ShardWriteTransaction.GetCompositedModification(), getRef());
+
+ CompositeModification compositeModification = expectMsgClass(duration("3 seconds"),
+ GetCompositeModificationReply.class).getModification();
+
+ assertTrue(compositeModification.getModifications().size() == 1);
+ assertEquals(modificationType, compositeModification.getModifications().get(0).getClass());
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
- getSystem().actorOf(props, "testWriteData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new WriteData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(WriteDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- assertModification(subject, WriteModification.class);
- expectNoMsg();
- }
-
-
- };
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testWriteData");
+
+ transaction.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()).toSerializable(),
+ getRef());
+
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.WriteDataReply.class);
+
+ assertModification(transaction, WriteModification.class);
+
+ //unserialized write
+ transaction.tell(new WriteData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME),
+ TestModel.createTestContext()),
+ getRef());
+
+ expectMsgClass(duration("5 seconds"), WriteDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
- getSystem().actorOf(props, "testMergeData");
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new MergeData(TestModel.TEST_PATH,
- ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext).toSerializable(),
- getRef());
-
- final String out = new ExpectMsg<String>(duration("500 milliseconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(MergeDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testMergeData");
- assertEquals("match", out);
+ transaction.tell(new MergeData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext).toSerializable(),
+ getRef());
- assertModification(subject, MergeModification.class);
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.MergeDataReply.class);
- expectNoMsg();
- }
+ assertModification(transaction, MergeModification.class);
+ //unserialized merge
+ transaction.tell(new MergeData(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), testSchemaContext),
+ getRef());
- };
+ expectMsgClass(duration("5 seconds"), MergeDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newWriteOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
- getSystem().actorOf(props, "testDeleteData");
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testDeleteData");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ transaction.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
- subject.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
+ expectMsgClass(duration("5 seconds"), ShardTransactionMessages.DeleteDataReply.class);
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(DeleteDataReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ assertModification(transaction, DeleteModification.class);
- assertEquals("match", out);
+ //unserialized merge
+ transaction.tell(new DeleteData(TestModel.TEST_PATH), getRef());
- assertModification(subject, DeleteModification.class);
- expectNoMsg();
- }
-
-
- };
+ expectMsgClass(duration("5 seconds"), DeleteDataReply.class);
}};
}
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
- getSystem().actorOf(props, "testReadyTransaction");
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testReadyTransaction");
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ watch(transaction);
- subject.tell(new ReadyTransaction().toSerializable(), getRef());
+ transaction.tell(new ReadyTransaction().toSerializable(), getRef());
- final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
+ Terminated.class);
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.SERIALIZABLE_CLASS,
+ Terminated.class);
+ }};
- assertEquals("match", out);
+ // test
+ new JavaTestKit(getSystem()) {{
+ final ActorRef shard = createShard();
+ final Props props = ShardTransaction.props( store.newReadWriteTransaction(), shard,
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testReadyTransaction2");
- expectNoMsg();
- }
+ watch(transaction);
+ transaction.tell(new ReadyTransaction(), getRef());
- };
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
+ Terminated.class);
+ expectMsgAnyClassOf(duration("5 seconds"), ReadyTransactionReply.class,
+ Terminated.class);
}};
}
+ @SuppressWarnings("unchecked")
@Test
public void testOnReceiveCloseTransaction() throws Exception {
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
- getSystem().actorOf(props, "testCloseTransaction");
-
- watch(subject);
-
- new Within(duration("6 seconds")) {
- @Override
- protected void run() {
-
- subject.tell(new CloseTransaction().toSerializable(), getRef());
-
- final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- System.out.println("!!!IN match 1: "+(in!=null?in.getClass():"NULL"));
- if (in.getClass().equals(CloseTransactionReply.SERIALIZABLE_CLASS)) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", out);
-
- final String termination = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- System.out.println("!!!IN match 2: "+(in!=null?in.getClass():"NULL"));
- if (in instanceof Terminated) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
-
- assertEquals("match", termination);
- }
- };
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction = getSystem().actorOf(props, "testCloseTransaction");
+
+ watch(transaction);
+
+ transaction.tell(new CloseTransaction().toSerializable(), getRef());
+
+ expectMsgClass(duration("3 seconds"), CloseTransactionReply.SERIALIZABLE_CLASS);
+ expectTerminated(duration("3 seconds"), transaction);
}};
}
public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadOnlyTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final TestActorRef subject = TestActorRef.apply(props,getSystem());
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final TestActorRef<ShardTransaction> transaction = TestActorRef.apply(props,getSystem());
- subject.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(), ActorRef.noSender());
+ transaction.receive(new DeleteData(TestModel.TEST_PATH).toSerializable(), ActorRef.noSender());
}
@Test
public void testShardTransactionInactivity() {
- datastoreContext = new DatastoreContext("Test",
- InMemoryDOMDataStoreConfigProperties.getDefault(),
- Duration.create(500, TimeUnit.MILLISECONDS), 5);
+ datastoreContext = DatastoreContext.newBuilder().shardTransactionIdleTimeout(
+ Duration.create(500, TimeUnit.MILLISECONDS)).build();
new JavaTestKit(getSystem()) {{
final ActorRef shard = createShard();
final Props props = ShardTransaction.props(store.newReadWriteTransaction(), shard,
- testSchemaContext, datastoreContext, shardStats);
- final ActorRef subject =
+ testSchemaContext, datastoreContext, shardStats, "txn");
+ final ActorRef transaction =
getSystem().actorOf(props, "testShardTransactionInactivity");
- watch(subject);
-
- // The shard Tx actor should receive a ReceiveTimeout message and self-destruct.
-
- final String termination = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
- // do not put code outside this method, will run afterwards
- @Override
- protected String match(Object in) {
- if (in instanceof Terminated) {
- return "match";
- } else {
- throw noMatch();
- }
- }
- }.get(); // this extracts the received message
+ watch(transaction);
- assertEquals("match", termination);
+ expectMsgClass(duration("3 seconds"), Terminated.class);
}};
}
}
+++ /dev/null
-/*
- *
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.TestActorRef;
-import akka.util.Timeout;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.CompositeModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
-import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.Collections;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.when;
-
-
-public class ThreePhaseCommitCohortFailureTest extends AbstractActorTest {
-
- private static ListeningExecutorService storeExecutor =
- MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
-
- private static final InMemoryDOMDataStore store =
- new InMemoryDOMDataStore("OPER", storeExecutor,
- MoreExecutors.sameThreadExecutor());
-
- private static final SchemaContext testSchemaContext =
- TestModel.createTestContext();
-
- private static final ShardIdentifier SHARD_IDENTIFIER =
- ShardIdentifier.builder().memberName("member-1")
- .shardName("inventory").type("config").build();
-
- private final DatastoreContext datastoreContext = new DatastoreContext();
-
- private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
-
- @BeforeClass
- public static void staticSetup() {
- store.onGlobalContextUpdated(testSchemaContext);
- }
-
- private final FiniteDuration ASK_RESULT_DURATION = Duration.create(5000, TimeUnit.MILLISECONDS);
-
- private ActorRef createShard(){
- return getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()));
- }
-
- @Test(expected = TestException.class)
- public void testNegativeAbortResultsInException() throws Exception {
-
- final ActorRef shard = createShard();
- final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
- .mock(DOMStoreThreePhaseCommitCohort.class);
- final CompositeModification mockComposite =
- Mockito.mock(CompositeModification.class);
- final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
-
- final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
- .create(getSystem(), props,
- "testNegativeAbortResultsInException");
-
- when(mockCohort.abort()).thenReturn(
- Futures.<Void>immediateFailedFuture(new TestException()));
-
- Future<Object> future =
- akka.pattern.Patterns.ask(subject,
- ThreePhaseCommitCohortMessages.AbortTransaction.newBuilder()
- .build(), 3000);
- assertTrue(future.isCompleted());
-
- Await.result(future, ASK_RESULT_DURATION);
- }
-
-
- @Test(expected = OptimisticLockFailedException.class)
- public void testNegativeCanCommitResultsInException() throws Exception {
-
- final ActorRef shard = createShard();
- final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
- .mock(DOMStoreThreePhaseCommitCohort.class);
- final CompositeModification mockComposite =
- Mockito.mock(CompositeModification.class);
- final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
-
- final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
- .create(getSystem(), props,
- "testNegativeCanCommitResultsInException");
-
- when(mockCohort.canCommit()).thenReturn(
- Futures
- .<Boolean>immediateFailedFuture(
- new OptimisticLockFailedException("some exception")));
-
- Future<Object> future =
- akka.pattern.Patterns.ask(subject,
- ThreePhaseCommitCohortMessages.CanCommitTransaction.newBuilder()
- .build(), 3000);
-
-
- Await.result(future, ASK_RESULT_DURATION);
-
- }
-
-
- @Test(expected = TestException.class)
- public void testNegativePreCommitResultsInException() throws Exception {
-
- final ActorRef shard = createShard();
- final DOMStoreThreePhaseCommitCohort mockCohort = Mockito
- .mock(DOMStoreThreePhaseCommitCohort.class);
- final CompositeModification mockComposite =
- Mockito.mock(CompositeModification.class);
- final Props props =
- ThreePhaseCommitCohort.props(mockCohort, shard, mockComposite, shardStats);
-
- final TestActorRef<ThreePhaseCommitCohort> subject = TestActorRef
- .create(getSystem(), props,
- "testNegativePreCommitResultsInException");
-
- when(mockCohort.preCommit()).thenReturn(
- Futures
- .<Void>immediateFailedFuture(
- new TestException()));
-
- Future<Object> future =
- akka.pattern.Patterns.ask(subject,
- ThreePhaseCommitCohortMessages.PreCommitTransaction.newBuilder()
- .build(), 3000);
-
- Await.result(future, ASK_RESULT_DURATION);
-
- }
-
- @Test(expected = TestException.class)
- public void testNegativeCommitResultsInException() throws Exception {
-
- final TestActorRef<Shard> subject = TestActorRef.create(getSystem(),
- Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP, datastoreContext, TestModel.createTestContext()),
- "testNegativeCommitResultsInException");
-
- final ActorRef shardTransaction =
- getSystem().actorOf(ShardTransaction.props(store.newReadWriteTransaction(), subject,
- testSchemaContext, datastoreContext, shardStats));
-
- ShardTransactionMessages.WriteData writeData =
- ShardTransactionMessages.WriteData.newBuilder()
- .setInstanceIdentifierPathArguments(
- NormalizedNodeMessages.InstanceIdentifier.newBuilder()
- .build()).setNormalizedNode(
- NormalizedNodeMessages.Node.newBuilder().build()
-
- ).build();
-
- Timeout askTimeout = new Timeout(ASK_RESULT_DURATION);
-
- //This is done so that Modification list is updated which is used during commit
- Future<Object> future = akka.pattern.Patterns.ask(shardTransaction, writeData, askTimeout);
-
- //ready transaction creates the cohort so that we get into the
- //block where in commmit is done
- ShardTransactionMessages.ReadyTransaction readyTransaction =
- ShardTransactionMessages.ReadyTransaction.newBuilder().build();
-
- future = akka.pattern.Patterns.ask(shardTransaction, readyTransaction, askTimeout);
-
- //but when the message is sent it will have the MockCommit object
- //so that we can simulate throwing of exception
- ForwardedCommitTransaction mockForwardCommitTransaction =
- Mockito.mock(ForwardedCommitTransaction.class);
- DOMStoreThreePhaseCommitCohort mockThreePhaseCommitTransaction =
- Mockito.mock(DOMStoreThreePhaseCommitCohort.class);
- when(mockForwardCommitTransaction.getCohort())
- .thenReturn(mockThreePhaseCommitTransaction);
- when(mockThreePhaseCommitTransaction.commit()).thenReturn(Futures
- .<Void>immediateFailedFuture(
- new TestException()));
- Modification mockModification = Mockito.mock(
- Modification.class);
- when(mockForwardCommitTransaction.getModification())
- .thenReturn(mockModification);
-
- when(mockModification.toSerializable()).thenReturn(
- PersistentMessages.CompositeModification.newBuilder().build());
-
- future = akka.pattern.Patterns.ask(subject, mockForwardCommitTransaction, askTimeout);
- Await.result(future, ASK_RESULT_DURATION);
- }
-
- private class TestException extends Exception {
- }
-}
import akka.actor.ActorSelection;
import akka.actor.Props;
import akka.dispatch.Futures;
-
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ListenableFuture;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.isA;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.times;
-
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.opendaylight.controller.cluster.datastore.messages.SerializableMessage;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
-
import scala.concurrent.Future;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.isA;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
@SuppressWarnings("serial")
doReturn(getSystem()).when(actorContext).getActorSystem();
}
- private Future<ActorPath> newCohortPath() {
+ private Future<ActorSelection> newCohort() {
ActorPath path = getSystem().actorOf(Props.create(DoNothingActor.class)).path();
- doReturn(mock(ActorSelection.class)).when(actorContext).actorSelection(path);
- return Futures.successful(path);
+ ActorSelection actorSelection = getSystem().actorSelection(path);
+ return Futures.successful(actorSelection);
}
private final ThreePhaseCommitCohortProxy setupProxy(int nCohorts) throws Exception {
- List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
+ List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
for(int i = 1; i <= nCohorts; i++) {
- cohortPathFutures.add(newCohortPath());
+ cohortFutures.add(newCohort());
}
- return new ThreePhaseCommitCohortProxy(actorContext, cohortPathFutures, "txn-1");
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures, "txn-1");
}
private ThreePhaseCommitCohortProxy setupProxyWithFailedCohortPath()
throws Exception {
- List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
- cohortPathFutures.add(newCohortPath());
- cohortPathFutures.add(Futures.<ActorPath>failed(new TestException()));
+ List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
+ cohortFutures.add(newCohort());
+ cohortFutures.add(Futures.<ActorSelection>failed(new TestException()));
- return new ThreePhaseCommitCohortProxy(actorContext, cohortPathFutures, "txn-1");
+ return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures, "txn-1");
}
private void setupMockActorContext(Class<?> requestType, Object... responses) {
.successful(((SerializableMessage) responses[i]).toSerializable()));
}
- stubber.when(actorContext).executeRemoteOperationAsync(any(ActorSelection.class),
+ stubber.when(actorContext).executeOperationAsync(any(ActorSelection.class),
isA(requestType));
}
private void verifyCohortInvocations(int nCohorts, Class<?> requestType) {
- verify(actorContext, times(nCohorts)).executeRemoteOperationAsync(
+ verify(actorContext, times(nCohorts)).executeOperationAsync(
any(ActorSelection.class), isA(requestType));
}
@Test
public void testPreCommit() throws Exception {
+ // Precommit is currently a no-op
ThreePhaseCommitCohortProxy proxy = setupProxy(1);
setupMockActorContext(PreCommitTransaction.SERIALIZABLE_CLASS,
new PreCommitTransactionReply());
proxy.preCommit().get(5, TimeUnit.SECONDS);
-
- verifyCohortInvocations(1, PreCommitTransaction.SERIALIZABLE_CLASS);
- }
-
- @Test(expected = ExecutionException.class)
- public void testPreCommitWithFailure() throws Exception {
- ThreePhaseCommitCohortProxy proxy = setupProxy(2);
-
- setupMockActorContext(PreCommitTransaction.SERIALIZABLE_CLASS,
- new PreCommitTransactionReply(), new RuntimeException("mock"));
-
- proxy.preCommit().get(5, TimeUnit.SECONDS);
}
@Test
proxy.commit().get(5, TimeUnit.SECONDS);
verifyCohortInvocations(2, CanCommitTransaction.SERIALIZABLE_CLASS);
- verifyCohortInvocations(2, PreCommitTransaction.SERIALIZABLE_CLASS);
verifyCohortInvocations(2, CommitTransaction.SERIALIZABLE_CLASS);
}
}
package org.opendaylight.controller.cluster.datastore;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import akka.actor.ActorPath;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
+import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.dispatch.Futures;
-
+import akka.testkit.JavaTestKit;
import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
import com.google.common.util.concurrent.CheckedFuture;
-
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import org.junit.AfterClass;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
-
-import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
-import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.WRITE_ONLY;
-import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
-
import org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType;
import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
+import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
-
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.argThat;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.isA;
import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
+import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
+import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.WRITE_ONLY;
@SuppressWarnings("resource")
-public class TransactionProxyTest extends AbstractActorTest {
+public class TransactionProxyTest {
@SuppressWarnings("serial")
static class TestException extends RuntimeException {
CheckedFuture<?, ReadFailedException> invoke(TransactionProxy proxy) throws Exception;
}
+ private static ActorSystem system;
+
private final Configuration configuration = new MockConfiguration();
@Mock
private SchemaContext schemaContext;
+ @Mock
+ private ClusterWrapper mockClusterWrapper;
+
String memberName = "mock-member";
+ @BeforeClass
+ public static void setUpClass() throws IOException {
+
+ Config config = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder().
+ put("akka.actor.default-dispatcher.type",
+ "akka.testkit.CallingThreadDispatcherConfigurator").build()).
+ withFallback(ConfigFactory.load());
+ system = ActorSystem.create("test", config);
+ }
+
+ @AfterClass
+ public static void tearDownClass() throws IOException {
+ JavaTestKit.shutdownActorSystem(system);
+ system = null;
+ }
+
@Before
public void setUp(){
MockitoAnnotations.initMocks(this);
schemaContext = TestModel.createTestContext();
+ DatastoreContext dataStoreContext = DatastoreContext.newBuilder().build();
+
doReturn(getSystem()).when(mockActorContext).getActorSystem();
doReturn(memberName).when(mockActorContext).getCurrentMemberName();
doReturn(schemaContext).when(mockActorContext).getSchemaContext();
+ doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
+ doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
+ doReturn(dataStoreContext).when(mockActorContext).getDatastoreContext();
ShardStrategyFactory.setConfiguration(configuration);
}
+ private ActorSystem getSystem() {
+ return system;
+ }
+
private CreateTransaction eqCreateTransaction(final String memberName,
final TransactionType type) {
ArgumentMatcher<CreateTransaction> matcher = new ArgumentMatcher<CreateTransaction>() {
return argThat(matcher);
}
- private DataExists eqDataExists() {
+ private DataExists eqSerializedDataExists() {
ArgumentMatcher<DataExists> matcher = new ArgumentMatcher<DataExists>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private ReadData eqReadData() {
+ private DataExists eqDataExists() {
+ ArgumentMatcher<DataExists> matcher = new ArgumentMatcher<DataExists>() {
+ @Override
+ public boolean matches(Object argument) {
+ return (argument instanceof DataExists) &&
+ ((DataExists)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private ReadData eqSerializedReadData() {
ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
+ private ReadData eqReadData() {
+ ArgumentMatcher<ReadData> matcher = new ArgumentMatcher<ReadData>() {
+ @Override
+ public boolean matches(Object argument) {
+ return (argument instanceof ReadData) &&
+ ((ReadData)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private WriteData eqSerializedWriteData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
+ private WriteData eqWriteData(final NormalizedNode<?, ?> nodeToWrite) {
+ ArgumentMatcher<WriteData> matcher = new ArgumentMatcher<WriteData>() {
+ @Override
+ public boolean matches(Object argument) {
+ if(argument instanceof WriteData) {
+ WriteData obj = (WriteData) argument;
+ return obj.getPath().equals(TestModel.TEST_PATH) &&
+ obj.getData().equals(nodeToWrite);
+ }
+ return false;
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private MergeData eqSerializedMergeData(final NormalizedNode<?, ?> nodeToWrite) {
ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private DeleteData eqDeleteData() {
+ private MergeData eqMergeData(final NormalizedNode<?, ?> nodeToWrite) {
+ ArgumentMatcher<MergeData> matcher = new ArgumentMatcher<MergeData>() {
+ @Override
+ public boolean matches(Object argument) {
+ if(argument instanceof MergeData) {
+ MergeData obj = ((MergeData) argument);
+ return obj.getPath().equals(TestModel.TEST_PATH) &&
+ obj.getData().equals(nodeToWrite);
+ }
+
+ return false;
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private DeleteData eqSerializedDeleteData() {
ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
@Override
public boolean matches(Object argument) {
return argThat(matcher);
}
- private Future<Object> readyTxReply(ActorPath path) {
+ private DeleteData eqDeleteData() {
+ ArgumentMatcher<DeleteData> matcher = new ArgumentMatcher<DeleteData>() {
+ @Override
+ public boolean matches(Object argument) {
+ return argument instanceof DeleteData &&
+ ((DeleteData)argument).getPath().equals(TestModel.TEST_PATH);
+ }
+ };
+
+ return argThat(matcher);
+ }
+
+ private Future<Object> readySerializedTxReply(String path) {
return Futures.successful((Object)new ReadyTransactionReply(path).toSerializable());
}
- private Future<Object> readDataReply(NormalizedNode<?, ?> data) {
+ private Future<Object> readyTxReply(String path) {
+ return Futures.successful((Object)new ReadyTransactionReply(path));
+ }
+
+
+ private Future<Object> readSerializedDataReply(NormalizedNode<?, ?> data) {
return Futures.successful(new ReadDataReply(schemaContext, data).toSerializable());
}
- private Future<Object> dataExistsReply(boolean exists) {
+ private Future<ReadDataReply> readDataReply(NormalizedNode<?, ?> data) {
+ return Futures.successful(new ReadDataReply(schemaContext, data));
+ }
+
+ private Future<Object> dataExistsSerializedReply(boolean exists) {
return Futures.successful(new DataExistsReply(exists).toSerializable());
}
- private Future<Object> writeDataReply() {
+ private Future<DataExistsReply> dataExistsReply(boolean exists) {
+ return Futures.successful(new DataExistsReply(exists));
+ }
+
+ private Future<Object> writeSerializedDataReply() {
return Futures.successful(new WriteDataReply().toSerializable());
}
- private Future<Object> mergeDataReply() {
+ private Future<WriteDataReply> writeDataReply() {
+ return Futures.successful(new WriteDataReply());
+ }
+
+ private Future<Object> mergeSerializedDataReply() {
return Futures.successful(new MergeDataReply().toSerializable());
}
- private Future<Object> deleteDataReply() {
+ private Future<MergeDataReply> mergeDataReply() {
+ return Futures.successful(new MergeDataReply());
+ }
+
+ private Future<Object> deleteSerializedDataReply() {
return Futures.successful(new DeleteDataReply().toSerializable());
}
+ private Future<DeleteDataReply> deleteDataReply() {
+ return Futures.successful(new DeleteDataReply());
+ }
+
private ActorSelection actorSelection(ActorRef actorRef) {
return getSystem().actorSelection(actorRef.path());
}
.setTransactionId("txn-1").build();
}
- private ActorRef setupActorContextWithInitialCreateTransaction(TransactionType type) {
- ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
- doReturn(getSystem().actorSelection(actorRef.path())).
+ private ActorRef setupActorContextWithInitialCreateTransaction(ActorSystem actorSystem, TransactionType type) {
+ ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+ doReturn(actorSystem.actorSelection(actorRef.path())).
when(mockActorContext).actorSelection(actorRef.path().toString());
- doReturn(createTransactionReply(actorRef)).when(mockActorContext).
- executeShardOperation(eq(DefaultShardStrategy.DEFAULT_SHARD),
+
+ doReturn(Futures.successful(actorSystem.actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ doReturn(Futures.successful(createTransactionReply(actorRef))).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(actorRef.path())),
eqCreateTransaction(memberName, type));
- doReturn(actorRef.path().toString()).when(mockActorContext).resolvePath(
- anyString(), eq(actorRef.path().toString()));
- doReturn(actorRef.path()).when(mockActorContext).actorFor(actorRef.path().toString());
+
+ doReturn(false).when(mockActorContext).isLocalPath(actorRef.path().toString());
return actorRef;
}
@Test
public void testRead() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
- doReturn(readDataReply(null)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
@Test(expected = ReadFailedException.class)
public void testReadWithInvalidReplyMessageType() throws Exception {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeRemoteOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testReadWithAsyncRemoteOperatonFailure() throws Throwable {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
private void testExceptionOnInitialCreateTransaction(Exception exToThrow, Invoker invoker)
throws Throwable {
+ ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
- doThrow(exToThrow).when(mockActorContext).executeShardOperation(
- anyString(), any());
+ if (exToThrow instanceof PrimaryNotFoundException) {
+ doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
+ } else {
+ doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(anyString());
+ }
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- READ_ONLY);
+ doReturn(Futures.failed(exToThrow)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), any());
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
propagateReadFailedExceptionCause(invoker.invoke(transactionProxy));
}
@Test(expected = TestException.class)
public void testReadWithPriorRecordingOperationFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(eq(actorSelection(actorRef)), eqDeleteData());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
- doReturn(readDataReply(null)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
try {
propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
} finally {
- verify(mockActorContext, times(0)).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ verify(mockActorContext, times(0)).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
}
}
@Test
public void testReadWithPriorRecordingOperationSuccessful() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(expectedNode));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(expectedNode));
- doReturn(readDataReply(expectedNode)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
}
+ @Test(expected=IllegalArgumentException.class)
+ public void testInvalidCreateTransactionReply() throws Throwable {
+ ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext).
+ actorSelection(actorRef.path().toString());
+
+ doReturn(Futures.successful(getSystem().actorSelection(actorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
+ eq(getSystem().actorSelection(actorRef.path())), eqCreateTransaction(memberName, READ_ONLY));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_ONLY);
+
+ propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
+ }
+
@Test
public void testExists() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
- doReturn(dataExistsReply(false)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
assertEquals("Exists response", false, exists);
- doReturn(dataExistsReply(true)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
@Test(expected = ReadFailedException.class)
public void testExistsWithInvalidReplyMessageType() throws Exception {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeRemoteOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testExistsWithAsyncRemoteOperatonFailure() throws Throwable {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(any(ActorSelection.class), any());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_ONLY);
@Test(expected = TestException.class)
public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(eq(actorSelection(actorRef)), eqDeleteData());
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedDeleteData());
- doReturn(dataExistsReply(false)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
try {
propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
} finally {
- verify(mockActorContext, times(0)).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ verify(mockActorContext, times(0)).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
}
}
@Test
public void testExistsWithPriorRecordingOperationSuccessful() throws Throwable {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(dataExistsReply(true)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDataExists());
+ doReturn(dataExistsSerializedReply(true)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDataExists());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
@Test
public void testWrite() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
WriteDataReply.SERIALIZABLE_CLASS);
@Test
public void testMerge() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
- TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
- WRITE_ONLY);
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
- verify(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
MergeDataReply.SERIALIZABLE_CLASS);
@Test
public void testDelete() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
- doReturn(deleteDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDeleteData());
+ doReturn(deleteSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDeleteData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
transactionProxy.delete(TestModel.TEST_PATH);
- verify(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqDeleteData());
+ verify(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedDeleteData());
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
DeleteDataReply.SERIALIZABLE_CLASS);
}
- private void verifyCohortPathFutures(ThreePhaseCommitCohortProxy proxy,
- Object... expReplies) throws Exception {
+ private void verifyCohortFutures(ThreePhaseCommitCohortProxy proxy,
+ Object... expReplies) throws Exception {
assertEquals("getReadyOperationFutures size", expReplies.length,
- proxy.getCohortPathFutures().size());
+ proxy.getCohortFutures().size());
int i = 0;
- for( Future<ActorPath> future: proxy.getCohortPathFutures()) {
+ for( Future<ActorSelection> future: proxy.getCohortFutures()) {
assertNotNull("Ready operation Future is null", future);
Object expReply = expReplies[i++];
- if(expReply instanceof ActorPath) {
- ActorPath actual = Await.result(future, Duration.create(5, TimeUnit.SECONDS));
+ if(expReply instanceof ActorSelection) {
+ ActorSelection actual = Await.result(future, Duration.create(5, TimeUnit.SECONDS));
assertEquals("Cohort actor path", expReply, actual);
} else {
// Expecting exception.
@SuppressWarnings("unchecked")
@Test
public void testReady() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(readDataReply(null)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(readyTxReply(actorRef.path())).when(mockActorContext).executeRemoteOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
WriteDataReply.SERIALIZABLE_CLASS);
- verifyCohortPathFutures(proxy, actorRef.path());
+ verifyCohortFutures(proxy, getSystem().actorSelection(actorRef.path()));
}
@SuppressWarnings("unchecked")
@Test
public void testReadyWithRecordingOperationFailure() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ executeOperationAsync(eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
- doReturn(readyTxReply(actorRef.path())).when(mockActorContext).executeRemoteOperationAsync(
+ doReturn(readySerializedTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
eq(actorSelection(actorRef)), isA(ReadyTransaction.SERIALIZABLE_CLASS));
+ doReturn(false).when(mockActorContext).isLocalPath(actorRef.path().toString());
+
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+ verifyCohortFutures(proxy, TestException.class);
+
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
MergeDataReply.SERIALIZABLE_CLASS, TestException.class);
-
- verifyCohortPathFutures(proxy, TestException.class);
}
@SuppressWarnings("unchecked")
@Test
public void testReadyWithReplyFailure() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(mergeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqMergeData(nodeToWrite));
+ doReturn(mergeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedMergeData(nodeToWrite));
doReturn(Futures.failed(new TestException())).when(mockActorContext).
- executeRemoteOperationAsync(eq(actorSelection(actorRef)),
+ executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
MergeDataReply.SERIALIZABLE_CLASS);
- verifyCohortPathFutures(proxy, TestException.class);
+ verifyCohortFutures(proxy, TestException.class);
}
@Test
public void testReadyWithInitialCreateTransactionFailure() throws Exception {
- doThrow(new PrimaryNotFoundException("mock")).when(mockActorContext).executeShardOperation(
- anyString(), any());
+ doReturn(Futures.failed(new PrimaryNotFoundException("mock"))).when(
+ mockActorContext).findPrimaryShardAsync(anyString());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
WRITE_ONLY);
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyCohortPathFutures(proxy, PrimaryNotFoundException.class);
+ verifyCohortFutures(proxy, PrimaryNotFoundException.class);
}
@SuppressWarnings("unchecked")
@Test
public void testReadyWithInvalidReplyMessageType() throws Exception {
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(WRITE_ONLY);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
- doReturn(writeDataReply()).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqWriteData(nodeToWrite));
+ doReturn(writeSerializedDataReply()).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedWriteData(nodeToWrite));
doReturn(Futures.successful(new Object())).when(mockActorContext).
- executeRemoteOperationAsync(eq(actorSelection(actorRef)),
+ executeOperationAsync(eq(actorSelection(actorRef)),
isA(ReadyTransaction.SERIALIZABLE_CLASS));
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
- verifyCohortPathFutures(proxy, IllegalArgumentException.class);
+ verifyCohortFutures(proxy, IllegalArgumentException.class);
}
@Test
public void testGetIdentifier() {
- setupActorContextWithInitialCreateTransaction(READ_ONLY);
+ setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
TransactionProxy.TransactionType.READ_ONLY);
@SuppressWarnings("unchecked")
@Test
public void testClose() throws Exception{
- ActorRef actorRef = setupActorContextWithInitialCreateTransaction(READ_WRITE);
+ ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
- doReturn(readDataReply(null)).when(mockActorContext).executeRemoteOperationAsync(
- eq(actorSelection(actorRef)), eqReadData());
+ doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
+ eq(actorSelection(actorRef)), eqSerializedReadData());
TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
READ_WRITE);
transactionProxy.close();
- verify(mockActorContext).sendRemoteOperationAsync(
+ verify(mockActorContext).sendOperationAsync(
eq(actorSelection(actorRef)), isA(CloseTransaction.SERIALIZABLE_CLASS));
}
+
+
+ /**
+ * Method to test a local Tx actor. The Tx paths are matched to decide if the
+ * Tx actor is local or not. This is done by mocking the Tx actor path
+ * and the caller paths and ensuring that the paths have the remote-address format
+ *
+ * Note: Since the default akka provider for test is not a RemoteActorRefProvider,
+ * the paths returned for the actors for all the tests are not qualified remote paths.
+ * Hence are treated as non-local/remote actors. In short, all tests except
+ * few below run for remote actors
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testLocalTxActorRead() throws Exception {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
+ .setTransactionId("txn-1")
+ .setTransactionActorPath(actorPath)
+ .build();
+
+ doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, READ_ONLY));
+
+ doReturn(true).when(mockActorContext).isLocalPath(actorPath);
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,READ_ONLY);
+
+ // negative test case with null as the reply
+ doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqReadData());
+
+ Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
+ TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", false, readOptional.isPresent());
+
+ // test case with node as read data reply
+ NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqReadData());
+
+ readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+
+ assertEquals("NormalizedNode isPresent", true, readOptional.isPresent());
+
+ assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
+
+ // test for local data exists
+ doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqDataExists());
+
+ boolean exists = transactionProxy.exists(TestModel.TEST_PATH).checkedGet();
+
+ assertEquals("Exists response", true, exists);
+ }
+
+ @Test
+ public void testLocalTxActorWrite() throws Exception {
+ ActorSystem actorSystem = getSystem();
+ ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
+
+ doReturn(actorSystem.actorSelection(shardActorRef.path())).
+ when(mockActorContext).actorSelection(shardActorRef.path().toString());
+
+ doReturn(Futures.successful(actorSystem.actorSelection(shardActorRef.path()))).
+ when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
+
+ String actorPath = "akka.tcp://system@127.0.0.1:2550/user/tx-actor";
+ CreateTransactionReply createTransactionReply = CreateTransactionReply.newBuilder()
+ .setTransactionId("txn-1")
+ .setTransactionActorPath(actorPath)
+ .build();
+
+ doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).
+ executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
+ eqCreateTransaction(memberName, WRITE_ONLY));
+
+ doReturn(true).when(mockActorContext).isLocalPath(actorPath);
+
+ NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+
+ doReturn(writeDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqWriteData(nodeToWrite));
+
+ TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, WRITE_ONLY);
+ transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
+
+ verify(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqWriteData(nodeToWrite));
+
+ //testing local merge
+ doReturn(mergeDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqMergeData(nodeToWrite));
+
+ transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
+
+ verify(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqMergeData(nodeToWrite));
+
+
+ //testing local delete
+ doReturn(deleteDataReply()).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), eqDeleteData());
+
+ transactionProxy.delete(TestModel.TEST_PATH);
+
+ verify(mockActorContext).executeOperationAsync(any(ActorSelection.class), eqDeleteData());
+
+ verifyRecordingOperationFutures(transactionProxy.getRecordedOperationFutures(),
+ WriteDataReply.class, MergeDataReply.class, DeleteDataReply.class);
+
+ // testing ready
+ doReturn(readyTxReply(shardActorRef.path().toString())).when(mockActorContext).executeOperationAsync(
+ any(ActorSelection.class), isA(ReadyTransaction.class));
+
+ DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
+
+ assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
+
+ ThreePhaseCommitCohortProxy proxy = (ThreePhaseCommitCohortProxy) ready;
+
+ verifyCohortFutures(proxy, getSystem().actorSelection(shardActorRef.path()));
+ }
}
assertEquals("member-1-shard-inventory-config", id.toString());
}
+ @Test
+ public void testFromShardIdString(){
+ String shardIdStr = "member-1-shard-inventory-config";
+
+ ShardIdentifier id = ShardIdentifier.builder().fromShardIdString(shardIdStr).build();
+ assertEquals("member-1", id.getMemberName());
+ assertEquals("inventory", id.getShardName());
+ assertEquals("config", id.getType());
+ }
}
package org.opendaylight.controller.cluster.datastore.messages;
-import junit.framework.Assert;
+import org.junit.Assert;
import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class MergeDataTest {
@Test
- public void testBasic(){
- MergeData mergeData = new MergeData(TestModel.TEST_PATH, ImmutableNodes
- .containerNode(TestModel.TEST_QNAME),
- TestModel.createTestContext());
-
- MergeData output = MergeData
- .fromSerializable(mergeData.toSerializable(),
- TestModel.createTestContext());
-
- }
-
- @Test
- public void testNormalizedNodeEncodeDecode(){
- NormalizedNode<?, ?> expected =
- ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-
- NormalizedNodeMessages.Container node =
- new NormalizedNodeToNodeCodec(TestModel.createTestContext())
- .encode(TestModel.TEST_PATH,
- expected);
-
- String parentPath = node.getParentPath();
-
- NormalizedNodeMessages.Node normalizedNode =
- node.getNormalizedNode();
-
- NormalizedNode<?,?> actual = new NormalizedNodeToNodeCodec(TestModel.createTestContext()).decode(TestModel.TEST_PATH,
- normalizedNode);
-
-
- Assert.assertEquals(expected, actual);
+ public void testSerialization() {
+ SchemaContext schemaContext = TestModel.createTestContext();
+ MergeData expected = new MergeData(TestModel.TEST_PATH, ImmutableNodes
+ .containerNode(TestModel.TEST_QNAME), schemaContext);
+
+ MergeData actual = MergeData.fromSerializable(expected.toSerializable(), schemaContext);
+ Assert.assertEquals("getPath", expected.getPath(), actual.getPath());
+ Assert.assertEquals("getData", expected.getData(), actual.getData());
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+/**
+ * Unit tests for WriteData.
+ *
+ * @author Thomas Pantelis
+ */
+public class WriteDataTest {
+
+ @Test
+ public void testSerialization() {
+ SchemaContext schemaContext = TestModel.createTestContext();
+ WriteData expected = new WriteData(TestModel.TEST_PATH, ImmutableNodes
+ .containerNode(TestModel.TEST_QNAME), schemaContext);
+
+ WriteData actual = WriteData.fromSerializable(expected.toSerializable(), schemaContext);
+ Assert.assertEquals("getPath", expected.getPath(), actual.getPath());
+ Assert.assertEquals("getData", expected.getData(), actual.getData());
+ }
+}
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class MergeModificationTest extends AbstractModificationTest{
- @Test
- public void testApply() throws Exception {
- //TODO : Need to write a better test for this
+ @Test
+ public void testApply() throws Exception {
+ //TODO : Need to write a better test for this
- //Write something into the datastore
- DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
- MergeModification writeModification = new MergeModification(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext());
- writeModification.apply(writeTransaction);
- commitTransaction(writeTransaction);
+ //Write something into the datastore
+ DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
+ MergeModification writeModification = new MergeModification(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext());
+ writeModification.apply(writeTransaction);
+ commitTransaction(writeTransaction);
- //Check if it's in the datastore
- Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
- Assert.assertTrue(data.isPresent());
+ //Check if it's in the datastore
+ Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
+ Assert.assertTrue(data.isPresent());
- }
+ }
+
+ @Test
+ public void testSerialization() {
+ SchemaContext schemaContext = TestModel.createTestContext();
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ MergeModification mergeModification = new MergeModification(TestModel.TEST_PATH,
+ node, schemaContext);
+
+ Object serialized = mergeModification.toSerializable();
+
+ MergeModification newModification = MergeModification.fromSerializable(serialized, schemaContext);
+
+ Assert.assertEquals("getPath", TestModel.TEST_PATH, newModification.getPath());
+ Assert.assertEquals("getData", node, newModification.getData());
+ }
}
MutableCompositeModification compositeModification = new MutableCompositeModification();
compositeModification.addModification(new WriteModification(TestModel.TEST_PATH,
ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext()));
-
- assertNotEquals(compositeModification.toSerializable(), compositeModification.toSerializable());
-
+ Object one = compositeModification.toSerializable();
+ try{Thread.sleep(10);}catch(Exception err){}
+ Object two = compositeModification.toSerializable();
+ assertNotEquals(one,two);
}
}
import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
public class WriteModificationTest extends AbstractModificationTest{
- @Test
- public void testApply() throws Exception {
- //Write something into the datastore
- DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
- WriteModification writeModification = new WriteModification(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext());
- writeModification.apply(writeTransaction);
- commitTransaction(writeTransaction);
+ @Test
+ public void testApply() throws Exception {
+ //Write something into the datastore
+ DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
+ WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
+ ImmutableNodes.containerNode(TestModel.TEST_QNAME), TestModel.createTestContext());
+ writeModification.apply(writeTransaction);
+ commitTransaction(writeTransaction);
- //Check if it's in the datastore
- Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
- Assert.assertTrue(data.isPresent());
+ //Check if it's in the datastore
+ Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
+ Assert.assertTrue(data.isPresent());
+ }
- }
+ @Test
+ public void testSerialization() {
+ SchemaContext schemaContext = TestModel.createTestContext();
+ NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+ WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
+ node, schemaContext);
+
+ Object serialized = writeModification.toSerializable();
+
+ WriteModification newModification = WriteModification.fromSerializable(serialized, schemaContext);
+
+ Assert.assertEquals("getPath", TestModel.TEST_PATH, newModification.getPath());
+ Assert.assertEquals("getData", node, newModification.getData());
+ }
}
package org.opendaylight.controller.cluster.datastore.utils;
-import java.util.concurrent.TimeUnit;
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.actor.UntypedActor;
import akka.japi.Creator;
import akka.testkit.JavaTestKit;
-
+import com.google.common.base.Optional;
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.Duration;
+import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
public class ActorContextTest extends AbstractActorTest{
- @Test
- public void testResolvePathForRemoteActor(){
- ActorContext actorContext =
- new ActorContext(mock(ActorSystem.class), mock(ActorRef.class),mock(
- ClusterWrapper.class),
- mock(Configuration.class));
-
- String actual = actorContext.resolvePath(
- "akka.tcp://system@127.0.0.1:2550/user/shardmanager/shard",
- "akka://system/user/shardmanager/shard/transaction");
-
- String expected = "akka.tcp://system@127.0.0.1:2550/user/shardmanager/shard/transaction";
-
- assertEquals(expected, actual);
- }
-
- @Test
- public void testResolvePathForLocalActor(){
- ActorContext actorContext =
- new ActorContext(getSystem(), mock(ActorRef.class), mock(ClusterWrapper.class),
- mock(Configuration.class));
-
- String actual = actorContext.resolvePath(
- "akka://system/user/shardmanager/shard",
- "akka://system/user/shardmanager/shard/transaction");
-
- String expected = "akka://system/user/shardmanager/shard/transaction";
-
- assertEquals(expected, actual);
-
- System.out.println(actorContext
- .actorFor("akka://system/user/shardmanager/shard/transaction"));
- }
-
private static class MockShardManager extends UntypedActor {
}
@Test
- public void testExecuteLocalShardOperationWithShardFound(){
+ public void testFindLocalShardWithShardFound(){
new JavaTestKit(getSystem()) {{
new Within(duration("1 seconds")) {
new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- Object out = actorContext.executeLocalShardOperation("default", "hello");
+ Optional<ActorRef> out = actorContext.findLocalShard("default");
- assertEquals("hello", out);
+ assertEquals(shardActorRef, out.get());
expectNoMsg();
}
@Test
- public void testExecuteLocalShardOperationWithShardNotFound(){
+ public void testFindLocalShardWithShardNotFound(){
new JavaTestKit(getSystem()) {{
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(false, null));
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(false, null));
-
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- Object out = actorContext.executeLocalShardOperation("default", "hello");
-
- assertNull(out);
-
-
- expectNoMsg();
- }
- };
+ Optional<ActorRef> out = actorContext.findLocalShard("default");
+ assertTrue(!out.isPresent());
}};
}
-
@Test
- public void testFindLocalShardWithShardFound(){
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
-
- ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
-
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(true, shardActorRef));
-
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
- mock(Configuration.class));
-
- Object out = actorContext.findLocalShard("default");
-
- assertEquals(shardActorRef, out);
-
-
- expectNoMsg();
- }
- };
- }};
-
- }
-
- @Test
- public void testFindLocalShardWithShardNotFound(){
+ public void testExecuteRemoteOperation() {
new JavaTestKit(getSystem()) {{
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
- new Within(duration("1 seconds")) {
- @Override
- protected void run() {
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(true, shardActorRef));
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(false, null));
-
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- Object out = actorContext.findLocalShard("default");
-
- assertNull(out);
+ ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
+ Object out = actorContext.executeOperation(actor, "hello");
- expectNoMsg();
- }
- };
+ assertEquals("hello", out);
}};
-
}
@Test
- public void testExecuteRemoteOperation() {
+ public void testExecuteRemoteOperationAsync() {
new JavaTestKit(getSystem()) {{
+ ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
-
- ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
-
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(true, shardActorRef));
+ ActorRef shardManagerActorRef = getSystem()
+ .actorOf(MockShardManager.props(true, shardActorRef));
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
+ ActorContext actorContext =
+ new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
mock(Configuration.class));
- ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
-
- Object out = actorContext.executeRemoteOperation(actor, "hello");
+ ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
- assertEquals("hello", out);
+ Future<Object> future = actorContext.executeOperationAsync(actor, "hello");
- expectNoMsg();
- }
- };
+ try {
+ Object result = Await.result(future, Duration.create(3, TimeUnit.SECONDS));
+ assertEquals("Result", "hello", result);
+ } catch(Exception e) {
+ throw new AssertionError(e);
+ }
}};
}
@Test
- public void testExecuteRemoteOperationAsync() {
- new JavaTestKit(getSystem()) {{
-
- new Within(duration("3 seconds")) {
- @Override
- protected void run() {
+ public void testIsLocalPath() {
+ MockClusterWrapper clusterWrapper = new MockClusterWrapper();
+ ActorContext actorContext =
+ new ActorContext(getSystem(), null, clusterWrapper, mock(Configuration.class));
- ActorRef shardActorRef = getSystem().actorOf(Props.create(EchoActor.class));
+ clusterWrapper.setSelfAddress("");
+ assertEquals(false, actorContext.isLocalPath(null));
+ assertEquals(false, actorContext.isLocalPath(""));
- ActorRef shardManagerActorRef = getSystem()
- .actorOf(MockShardManager.props(true, shardActorRef));
+ clusterWrapper.setSelfAddress(null);
+ assertEquals(false, actorContext.isLocalPath(""));
- ActorContext actorContext =
- new ActorContext(getSystem(), shardManagerActorRef , mock(ClusterWrapper.class),
- mock(Configuration.class));
+ clusterWrapper.setSelfAddress("akka://test/user/$b");
+ assertEquals(false, actorContext.isLocalPath("akka://test/user/$a"));
- ActorSelection actor = actorContext.actorSelection(shardActorRef.path());
+ clusterWrapper.setSelfAddress("akka.tcp://system@127.0.0.1:2550/");
+ assertEquals(true, actorContext.isLocalPath("akka.tcp://system@127.0.0.1:2550/"));
- Future<Object> future = actorContext.executeRemoteOperationAsync(actor, "hello");
+ clusterWrapper.setSelfAddress("akka.tcp://system@127.0.0.1:2550");
+ assertEquals(false, actorContext.isLocalPath("akka.tcp://system@127.0.0.1:2550/"));
- try {
- Object result = Await.result(future, Duration.create(3, TimeUnit.SECONDS));
- assertEquals("Result", "hello", result);
- } catch(Exception e) {
- throw new AssertionError(e);
- }
+ clusterWrapper.setSelfAddress("akka.tcp://system@128.0.0.1:2550/");
+ assertEquals(false, actorContext.isLocalPath("akka.tcp://system@127.0.0.1:2550/"));
- expectNoMsg();
- }
- };
- }};
+ clusterWrapper.setSelfAddress("akka.tcp://system@127.0.0.1:2551/");
+ assertEquals(false, actorContext.isLocalPath("akka.tcp://system@127.0.0.1:2550/"));
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Uninterruptibles;
+import scala.concurrent.Future;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+
+public class InMemoryJournal extends AsyncWriteJournal {
+
+ private static final Map<String, Map<Long, Object>> journals = new ConcurrentHashMap<>();
+
+ private static final Map<String, CountDownLatch> deleteMessagesCompleteLatches = new ConcurrentHashMap<>();
+
+ private static final Map<String, CountDownLatch> blockReadMessagesLatches = new ConcurrentHashMap<>();
+
+ public static void addEntry(String persistenceId, long sequenceNr, Object data) {
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal == null) {
+ journal = Maps.newLinkedHashMap();
+ journals.put(persistenceId, journal);
+ }
+
+ synchronized (journal) {
+ journal.put(sequenceNr, data);
+ }
+ }
+
+ public static void clear() {
+ journals.clear();
+ }
+
+ public static Map<Long, Object> get(String persistenceId) {
+ Map<Long, Object> journal = journals.get(persistenceId);
+ return journal != null ? journal : Collections.<Long, Object>emptyMap();
+ }
+
+ public static void waitForDeleteMessagesComplete(String persistenceId) {
+ assertEquals("Recovery complete", true, Uninterruptibles.awaitUninterruptibly(
+ deleteMessagesCompleteLatches.get(persistenceId), 5, TimeUnit.SECONDS));
+ }
+
+ public static void addDeleteMessagesCompleteLatch(String persistenceId) {
+ deleteMessagesCompleteLatches.put(persistenceId, new CountDownLatch(1));
+ }
+
+ public static void addBlockReadMessagesLatch(String persistenceId, CountDownLatch latch) {
+ blockReadMessagesLatches.put(persistenceId, latch);
+ }
+
+ @Override
+ public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+ long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ CountDownLatch blockLatch = blockReadMessagesLatches.remove(persistenceId);
+ if(blockLatch != null) {
+ Uninterruptibles.awaitUninterruptibly(blockLatch);
+ }
+
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal == null) {
+ return null;
+ }
+
+ synchronized (journal) {
+ for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+ PersistentRepr persistentMessage =
+ new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId,
+ false, null, null);
+ replayCallback.apply(persistentMessage);
+ }
+ }
+
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override
+ public Future<Long> doAsyncReadHighestSequenceNr(String persistenceId, long fromSequenceNr) {
+ return Futures.successful(-1L);
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteMessages(final Iterable<PersistentRepr> messages) {
+ return Futures.future(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ for (PersistentRepr repr : messages) {
+ Map<Long, Object> journal = journals.get(repr.persistenceId());
+ if(journal == null) {
+ journal = Maps.newLinkedHashMap();
+ journals.put(repr.persistenceId(), journal);
+ }
+
+ synchronized (journal) {
+ journal.put(repr.sequenceNr(), repr.payload());
+ }
+ }
+ return null;
+ }
+ }, context().dispatcher());
+ }
+
+ @Override
+ public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> confirmations) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent) {
+ return Futures.successful(null);
+ }
+
+ @Override
+ public Future<Void> doAsyncDeleteMessagesTo(String persistenceId, long toSequenceNr, boolean permanent) {
+ Map<Long, Object> journal = journals.get(persistenceId);
+ if(journal != null) {
+ synchronized (journal) {
+ Iterator<Long> iter = journal.keySet().iterator();
+ while(iter.hasNext()) {
+ Long n = iter.next();
+ if(n <= toSequenceNr) {
+ iter.remove();
+ }
+ }
+ }
+ }
+
+ CountDownLatch latch = deleteMessagesCompleteLatches.get(persistenceId);
+ if(latch != null) {
+ latch.countDown();
+ }
+
+ return Futures.successful(null);
+ }
+}
import akka.persistence.snapshot.japi.SnapshotStore;
import com.google.common.collect.Iterables;
import scala.concurrent.Future;
-
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.cluster.raft.Snapshot;
public class InMemorySnapshotStore extends SnapshotStore {
- Map<String, List<Snapshot>> snapshots = new HashMap<>();
+ private static Map<String, List<StoredSnapshot>> snapshots = new ConcurrentHashMap<>();
+
+ public static void addSnapshot(String persistentId, Snapshot snapshot) {
+ List<StoredSnapshot> snapshotList = snapshots.get(persistentId);
+
+ if(snapshotList == null) {
+ snapshotList = new ArrayList<>();
+ snapshots.put(persistentId, snapshotList);
+ }
+
+ snapshotList.add(new StoredSnapshot(new SnapshotMetadata(persistentId, snapshotList.size(),
+ System.currentTimeMillis()), snapshot));
+ }
+
+ public static void clear() {
+ snapshots.clear();
+ }
- @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+ @Override
+ public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
SnapshotSelectionCriteria snapshotSelectionCriteria) {
- List<Snapshot> snapshotList = snapshots.get(s);
+ List<StoredSnapshot> snapshotList = snapshots.get(s);
if(snapshotList == null){
return Futures.successful(Option.<SelectedSnapshot>none());
}
- Snapshot snapshot = Iterables.getLast(snapshotList);
+ StoredSnapshot snapshot = Iterables.getLast(snapshotList);
SelectedSnapshot selectedSnapshot =
new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
return Futures.successful(Option.some(selectedSnapshot));
}
- @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
- List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+ @Override
+ public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+ List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
if(snapshotList == null){
snapshotList = new ArrayList<>();
snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
}
- snapshotList.add(new Snapshot(snapshotMetadata, o));
+ snapshotList.add(new StoredSnapshot(snapshotMetadata, o));
return Futures.successful(null);
}
- @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+ @Override
+ public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
}
- @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
- List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+ @Override
+ public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+ List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
if(snapshotList == null){
return;
int deleteIndex = -1;
for(int i=0;i<snapshotList.size(); i++){
- Snapshot snapshot = snapshotList.get(i);
+ StoredSnapshot snapshot = snapshotList.get(i);
if(snapshotMetadata.equals(snapshot.getMetadata())){
deleteIndex = i;
break;
}
- @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+ @Override
+ public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
throws Exception {
- List<Snapshot> snapshotList = snapshots.get(s);
+ List<StoredSnapshot> snapshotList = snapshots.get(s);
if(snapshotList == null){
return;
snapshots.remove(s);
}
- private static class Snapshot {
+ private static class StoredSnapshot {
private final SnapshotMetadata metadata;
private final Object data;
- private Snapshot(SnapshotMetadata metadata, Object data) {
+ private StoredSnapshot(SnapshotMetadata metadata, Object data) {
this.metadata = metadata;
this.data = data;
}
import akka.actor.ActorRef;
import akka.actor.ActorSelection;
import akka.actor.ActorSystem;
+import com.google.common.base.Optional;
public class MockActorContext extends ActorContext {
super(actorSystem, shardManager, new MockClusterWrapper(), new MockConfiguration());
}
-
- @Override public Object executeShardOperation(String shardName,
- Object message) {
- return executeShardOperationResponse;
- }
-
- @Override public Object executeRemoteOperation(ActorSelection actor,
- Object message) {
+ @Override public Object executeOperation(ActorSelection actor,
+ Object message) {
return executeRemoteOperationResponse;
}
- @Override public ActorSelection findPrimary(String shardName) {
- return null;
+ @Override public Optional<ActorSelection> findPrimaryShard(String shardName) {
+ return Optional.absent();
}
public void setExecuteShardOperationResponse(Object response){
}
@Override
- public Object executeLocalOperation(ActorRef actor,
- Object message) {
+ public Object executeOperation(ActorRef actor,
+ Object message) {
return this.executeLocalOperationResponse;
}
- @Override
- public Object executeLocalShardOperation(String shardName,
- Object message) {
- return this.executeLocalShardOperationResponse;
- }
}
import akka.cluster.UniqueAddress;
import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
import scala.collection.JavaConversions;
-
import java.util.HashSet;
import java.util.Set;
public class MockClusterWrapper implements ClusterWrapper{
- @Override public void subscribeToMemberEvents(ActorRef actorRef) {
+ private String selfAddress = "akka.tcp://test@127.0.0.1:2550/user/member-1-shard-test-config";
+
+ @Override
+ public void subscribeToMemberEvents(ActorRef actorRef) {
}
- @Override public String getCurrentMemberName() {
+ @Override
+ public String getCurrentMemberName() {
return "member-1";
}
+ @Override
+ public String getSelfAddress() {
+ return selfAddress;
+ }
+
+ public void setSelfAddress(String selfAddress) {
+ this.selfAddress = selfAddress;
+ }
+
public static void sendMemberUp(ActorRef to, String memberName, String address){
to.tell(createMemberUp(memberName, address), null);
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+/**
+ * A mock DataChangeListener implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class MockDataChangeListener implements
+ AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> {
+
+ private final List<AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>>
+ changeList = Lists.newArrayList();
+ private final CountDownLatch changeLatch;
+ private final int expChangeEventCount;
+
+ public MockDataChangeListener(int expChangeEventCount) {
+ changeLatch = new CountDownLatch(expChangeEventCount);
+ this.expChangeEventCount = expChangeEventCount;
+ }
+
+ @Override
+ public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
+ changeList.add(change);
+ changeLatch.countDown();
+ }
+
+ public void waitForChangeEvents(YangInstanceIdentifier... expPaths) {
+ assertEquals("Change notifications complete", true,
+ Uninterruptibles.awaitUninterruptibly(changeLatch, 5, TimeUnit.SECONDS));
+
+ for(int i = 0; i < expPaths.length; i++) {
+ assertTrue(String.format("Change %d does not contain %s", (i+1), expPaths[i]),
+ changeList.get(i).getCreatedData().containsKey(expPaths[i]));
+ }
+ }
+
+ public void expectNoMoreChanges(String assertMsg) {
+ Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
+ assertEquals(assertMsg, expChangeEventCount, changeList.size());
+ }
+}
ActorContext testContext = new ActorContext(actorSystem, actorSystem.actorOf(
Props.create(DoNothingActor.class)), new MockClusterWrapper(), new MockConfiguration());
Object messages = testContext
- .executeLocalOperation(actorRef, "messages");
+ .executeOperation(actorRef, "messages");
Assert.assertNotNull(messages);
Set<QName> childAugmentations = new HashSet<>();
childAugmentations.add(AUG_QNAME);
final YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier =
- new YangInstanceIdentifier.AugmentationIdentifier(null, childAugmentations);
+ new YangInstanceIdentifier.AugmentationIdentifier(childAugmentations);
final AugmentationNode augmentationNode =
Builders.augmentationBuilder()
.withNodeIdentifier(augmentationIdentifier)
import org.junit.Test;
import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
public class SampleModelsTest {
final NormalizedNodeMessages.Container node =
new NormalizedNodeToNodeCodec(SchemaContextHelper.full())
- .encode(YangInstanceIdentifier.of(PeopleModel.BASE_QNAME),
- expected);
+ .encode(expected);
final NormalizedNodeMessages.Node normalizedNode =
node.getNormalizedNode();
- final NormalizedNode<?,?> actual = new NormalizedNodeToNodeCodec(SchemaContextHelper.full()).decode(YangInstanceIdentifier.of(PeopleModel.BASE_QNAME),
- normalizedNode);
+ final NormalizedNode<?,?> actual = new NormalizedNodeToNodeCodec(SchemaContextHelper.full()).decode(normalizedNode);
Assert.assertEquals(expected, actual);
final NormalizedNodeMessages.Container node =
new NormalizedNodeToNodeCodec(SchemaContextHelper.full())
- .encode(YangInstanceIdentifier.of(CarsModel.BASE_QNAME),
- expected);
+ .encode(expected);
final NormalizedNodeMessages.Node normalizedNode =
node.getNormalizedNode();
final NormalizedNode<?,?> actual = new NormalizedNodeToNodeCodec(SchemaContextHelper.full()).decode(
- YangInstanceIdentifier.of(CarsModel.BASE_QNAME),
normalizedNode);
akka {
persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+ persistence.journal.plugin = "in-memory-journal"
loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
}
}
+in-memory-journal {
+ class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+}
+
in-memory-snapshot-store {
# Class name of the plugin.
class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-core-api</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-broker-impl</artifactId>
<packaging>bundle</packaging>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-inmemory-datastore</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</dependency>
<dependency>
package org.opendaylight.controller.config.yang.md.sal.dom.impl;
import java.util.EnumMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMConcurrentDataCommitCoordinator;
import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataCommitCoordinatorImpl;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataCommitExecutor;
import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import com.google.common.collect.Lists;
/**
*
datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
- /*
- * We use a single-threaded executor for commits with a bounded queue capacity. If the
- * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
- * fail. This is done to relieve back pressure. This should be an extreme scenario - either
- * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
- * continuously hammering commits too fast or the controller is just over-capacity for the
- * system it's running on.
- */
- ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
- getMaxDataBrokerCommitQueueSize(), "WriteTxCommit");
-
/*
* We use an executor for commit ListenableFuture callbacks that favors reusing available
* threads over creating new threads at the expense of execution time. The assumption is
getMaxDataBrokerFutureCallbackPoolSize(), getMaxDataBrokerFutureCallbackQueueSize(),
"CommitFutures");
- DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
- new DeadlockDetectingListeningExecutorService(commitExecutor,
- TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
- listenableFutureExecutor));
+ final List<AbstractMXBean> mBeans = Lists.newArrayList();
+
+ DOMDataCommitExecutor commitCoordinator;
+ DurationStatisticsTracker commitStatsTracker = null;
+
+ if(getAllowConcurrentCommits()) {
+ DOMConcurrentDataCommitCoordinator coordinator =
+ new DOMConcurrentDataCommitCoordinator(listenableFutureExecutor);
+ commitStatsTracker = coordinator.getCommitStatsTracker();
+ commitCoordinator = coordinator;
+ } else {
+ /*
+ * We use a single-threaded executor for commits with a bounded queue capacity. If the
+ * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
+ * fail. This is done to relieve back pressure. This should be an extreme scenario - either
+ * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
+ * continuously hammering commits too fast or the controller is just over-capacity for the
+ * system it's running on.
+ */
+ ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+ getMaxDataBrokerCommitQueueSize(), "WriteTxCommit");
+
+ DOMDataCommitCoordinatorImpl coordinator = new DOMDataCommitCoordinatorImpl(
+ new DeadlockDetectingListeningExecutorService(commitExecutor,
+ TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
+ listenableFutureExecutor));
+
+ commitStatsTracker = coordinator.getCommitStatsTracker();
+ commitCoordinator = coordinator;
+
+ final AbstractMXBean commitExecutorStatsMXBean =
+ ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
+ JMX_BEAN_TYPE, null);
+ if(commitExecutorStatsMXBean != null) {
+ mBeans.add(commitExecutorStatsMXBean);
+ }
+ }
- final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
- newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
- commitStatsMXBean.registerMBean();
+ DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores, commitCoordinator);
+
+ if(commitStatsTracker != null) {
+ final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
+ commitStatsTracker, JMX_BEAN_TYPE);
+ commitStatsMXBean.registerMBean();
+ mBeans.add(commitStatsMXBean);
+ }
- final AbstractMXBean commitExecutorStatsMXBean =
- ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
- JMX_BEAN_TYPE, null);
final AbstractMXBean commitFutureStatsMXBean =
ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
"CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
+ if(commitFutureStatsMXBean != null) {
+ mBeans.add(commitFutureStatsMXBean);
+ }
newDataBroker.setCloseable(new AutoCloseable() {
@Override
public void close() {
- commitStatsMXBean.unregisterMBean();
- if (commitExecutorStatsMXBean != null) {
- commitExecutorStatsMXBean.unregisterMBean();
- }
- if (commitFutureStatsMXBean != null) {
- commitFutureStatsMXBean.unregisterMBean();
+ for(AbstractMXBean mBean: mBeans) {
+ mBean.unregisterMBean();
}
}
});
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import java.util.List;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.AbstractFuture;
+import com.google.common.util.concurrent.AbstractListeningExecutorService;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Implementation of DOMDataCommitExecutor that coordinates transaction commits concurrently. The 3
+ * commit phases (canCommit, preCommit, and commit) are performed serially and non-blocking
+ * (ie async) per transaction but multiple transaction commits can run concurrent.
+ *
+ * @author Thomas Pantelis
+ */
+public class DOMConcurrentDataCommitCoordinator implements DOMDataCommitExecutor {
+
+ private static final String CAN_COMMIT = "CAN_COMMIT";
+ private static final String PRE_COMMIT = "PRE_COMMIT";
+ private static final String COMMIT = "COMMIT";
+
+ private static final Logger LOG = LoggerFactory.getLogger(DOMConcurrentDataCommitCoordinator.class);
+
+ private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
+
+ /**
+ * This executor is used to execute Future listener callback Runnables async.
+ */
+ private final ExecutorService clientFutureCallbackExecutor;
+
+ /**
+ * This executor is re-used internally in calls to Futures#addCallback to avoid the overhead
+ * of Futures#addCallback creating a MoreExecutors#sameThreadExecutor for each call.
+ */
+ private final ExecutorService internalFutureCallbackExecutor = new SimpleSameThreadExecutor();
+
+ public DOMConcurrentDataCommitCoordinator(ExecutorService listenableFutureExecutor) {
+ this.clientFutureCallbackExecutor = Preconditions.checkNotNull(listenableFutureExecutor);
+ }
+
+ public DurationStatisticsTracker getCommitStatsTracker() {
+ return commitStatsTracker;
+ }
+
+ @Override
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(DOMDataWriteTransaction transaction,
+ Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+
+ Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
+ Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
+ LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
+
+ final int cohortSize = Iterables.size(cohorts);
+ final AsyncNotifyingSettableFuture clientSubmitFuture =
+ new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
+
+ doCanCommit(clientSubmitFuture, transaction, cohorts, cohortSize);
+
+ return MappingCheckedFuture.create(clientSubmitFuture,
+ TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
+ }
+
+ private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
+ final DOMDataWriteTransaction transaction,
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+
+ final long startTime = System.nanoTime();
+
+ // Not using Futures.allAsList here to avoid its internal overhead.
+ final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
+ @Override
+ public void onSuccess(Boolean result) {
+ if (result == null || !result) {
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize,
+ CAN_COMMIT, new TransactionCommitFailedException(
+ "Can Commit failed, no detailed cause available."));
+ } else {
+ if(remaining.decrementAndGet() == 0) {
+ // All cohorts completed successfully - we can move on to the preCommit phase
+ doPreCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ }
+ };
+
+ for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
+ ListenableFuture<Boolean> canCommitFuture = cohort.canCommit();
+ Futures.addCallback(canCommitFuture, futureCallback, internalFutureCallbackExecutor);
+ }
+ }
+
+ private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
+ final DOMDataWriteTransaction transaction,
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+
+ // Not using Futures.allAsList here to avoid its internal overhead.
+ final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void notUsed) {
+ if(remaining.decrementAndGet() == 0) {
+ // All cohorts completed successfully - we can move on to the commit phase
+ doCommit(startTime, clientSubmitFuture, transaction, cohorts, cohortSize);
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ }
+ };
+
+ for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
+ ListenableFuture<Void> preCommitFuture = cohort.preCommit();
+ Futures.addCallback(preCommitFuture, futureCallback, internalFutureCallbackExecutor);
+ }
+ }
+
+ private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
+ final DOMDataWriteTransaction transaction,
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final int cohortSize) {
+
+ // Not using Futures.allAsList here to avoid its internal overhead.
+ final AtomicInteger remaining = new AtomicInteger(cohortSize);
+ FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void notUsed) {
+ if(remaining.decrementAndGet() == 0) {
+ // All cohorts completed successfully - we're done.
+ commitStatsTracker.addDuration(System.nanoTime() - startTime);
+
+ clientSubmitFuture.set();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ handleException(clientSubmitFuture, transaction, cohorts, cohortSize, CAN_COMMIT, t);
+ }
+ };
+
+ for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
+ ListenableFuture<Void> commitFuture = cohort.commit();
+ Futures.addCallback(commitFuture, futureCallback, internalFutureCallbackExecutor);
+ }
+ }
+
+ private void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
+ final DOMDataWriteTransaction transaction,
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, int cohortSize,
+ final String phase, final Throwable t) {
+
+ if(clientSubmitFuture.isDone()) {
+ // We must have had failures from multiple cohorts.
+ return;
+ }
+
+ LOG.warn("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, t);
+ Exception e;
+ if(t instanceof Exception) {
+ e = (Exception)t;
+ } else {
+ e = new RuntimeException("Unexpected error occurred", t);
+ }
+
+ final TransactionCommitFailedException clientException =
+ TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+
+ // Transaction failed - tell all cohorts to abort.
+
+ @SuppressWarnings("unchecked")
+ ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohortSize];
+ int i = 0;
+ for(DOMStoreThreePhaseCommitCohort cohort: cohorts) {
+ canCommitFutures[i++] = cohort.abort();
+ }
+
+ ListenableFuture<List<Void>> combinedFuture = Futures.allAsList(canCommitFutures);
+ Futures.addCallback(combinedFuture, new FutureCallback<List<Void>>() {
+ @Override
+ public void onSuccess(List<Void> notUsed) {
+ // Propagate the original exception to the client.
+ clientSubmitFuture.setException(clientException);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ LOG.error("Tx: {} Error during Abort.", transaction.getIdentifier(), t);
+
+ // Propagate the original exception as that is what caused the Tx to fail and is
+ // what's interesting to the client.
+ clientSubmitFuture.setException(clientException);
+ }
+ }, internalFutureCallbackExecutor);
+ }
+
+ /**
+ * A settable future that uses an {@link Executor} to execute listener callback Runnables,
+ * registered via {@link #addListener}, asynchronously when this future completes. This is
+ * done to guarantee listener executions are off-loaded onto another thread to avoid blocking
+ * the thread that completed this future, as a common use case is to pass an executor that runs
+ * tasks in the same thread as the caller (ie MoreExecutors#sameThreadExecutor)
+ * to {@link #addListener}.
+ *
+ * FIXME: This class should probably be moved to yangtools common utils for re-usability and
+ * unified with AsyncNotifyingListenableFutureTask.
+ */
+ private static class AsyncNotifyingSettableFuture extends AbstractFuture<Void> {
+
+ /**
+ * ThreadLocal used to detect if the task completion thread is running the future listener Runnables.
+ */
+ private static final ThreadLocal<Boolean> ON_TASK_COMPLETION_THREAD_TL = new ThreadLocal<Boolean>();
+
+ private final ExecutorService listenerExecutor;
+
+ AsyncNotifyingSettableFuture(ExecutorService listenerExecutor) {
+ this.listenerExecutor = listenerExecutor;
+ }
+
+ @Override
+ public void addListener(final Runnable listener, final Executor executor) {
+ // Wrap the listener Runnable in a DelegatingRunnable. If the specified executor is one
+ // that runs tasks in the same thread as the caller submitting the task
+ // (e.g. {@link com.google.common.util.concurrent.MoreExecutors#sameThreadExecutor}) and
+ // the listener is executed from the #set methods, then the DelegatingRunnable will detect
+ // this via the ThreadLocal and submit the listener Runnable to the listenerExecutor.
+ //
+ // On the other hand, if this task is already complete, the call to ExecutionList#add in
+ // superclass will execute the listener Runnable immediately and, since the ThreadLocal
+ // won't be set, the DelegatingRunnable will run the listener Runnable inline.
+ super.addListener(new DelegatingRunnable(listener, listenerExecutor), executor);
+ }
+
+ boolean set() {
+ ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE);
+ try {
+ return super.set(null);
+ } finally {
+ ON_TASK_COMPLETION_THREAD_TL.set(null);
+ }
+ }
+
+ @Override
+ protected boolean setException(Throwable throwable) {
+ ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE);
+ try {
+ return super.setException(throwable);
+ } finally {
+ ON_TASK_COMPLETION_THREAD_TL.set(null);
+ }
+ }
+
+ private static final class DelegatingRunnable implements Runnable {
+ private final Runnable delegate;
+ private final Executor executor;
+
+ DelegatingRunnable(final Runnable delegate, final Executor executor) {
+ this.delegate = Preconditions.checkNotNull(delegate);
+ this.executor = Preconditions.checkNotNull(executor);
+ }
+
+ @Override
+ public void run() {
+ if (ON_TASK_COMPLETION_THREAD_TL.get() != null) {
+ // We're running on the task completion thread so off-load to the executor.
+ LOG.trace("Submitting ListenenableFuture Runnable from thread {} to executor {}",
+ Thread.currentThread().getName(), executor);
+ executor.execute(delegate);
+ } else {
+ // We're not running on the task completion thread so run the delegate inline.
+ LOG.trace("Executing ListenenableFuture Runnable on this thread: {}",
+ Thread.currentThread().getName());
+ delegate.run();
+ }
+ }
+ }
+ }
+
+ /**
+ * A simple same-thread executor without the internal locking overhead that
+ * MoreExecutors#sameThreadExecutor has. The #execute method is the only one of concern - we
+ * don't shutdown the executor so the other methods irrelevant.
+ */
+ private static class SimpleSameThreadExecutor extends AbstractListeningExecutorService {
+
+ @Override
+ public void execute(Runnable command) {
+ command.run();
+ }
+
+ @Override
+ public boolean awaitTermination(long arg0, TimeUnit arg1) throws InterruptedException {
+ return true;
+ }
+
+ @Override
+ public boolean isShutdown() {
+ return false;
+ }
+
+ @Override
+ public boolean isTerminated() {
+ return false;
+ }
+
+ @Override
+ public void shutdown() {
+ }
+
+ @Override
+ public List<Runnable> shutdownNow() {
+ return null;
+ }
+ }
+}
package org.opendaylight.controller.md.sal.dom.broker.impl;
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.EnumMap;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.DurationStatsTracker;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerImpl.class);
- private final DOMDataCommitCoordinatorImpl coordinator;
+ private final DOMDataCommitExecutor coordinator;
private final AtomicLong txNum = new AtomicLong();
private final AtomicLong chainNum = new AtomicLong();
private volatile AutoCloseable closeable;
public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
final ListeningExecutorService executor) {
+ this(datastores, new DOMDataCommitCoordinatorImpl(executor));
+ }
+
+ public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
+ final DOMDataCommitExecutor coordinator) {
super(datastores);
- this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
+ this.coordinator = Preconditions.checkNotNull(coordinator);
}
public void setCloseable(final AutoCloseable closeable) {
this.closeable = closeable;
}
- public DurationStatsTracker getCommitStatsTracker() {
- return coordinator.getCommitStatsTracker();
- }
-
@Override
public void close() {
super.close();
public CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
LOG.debug("Transaction: {} submitted with cohorts {}.", transaction.getIdentifier(), cohorts);
- return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> absent());
+ return coordinator.submit(transaction, cohorts);
}
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
* {@link org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType} type.
*
*/
-public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTransactionFactory<DOMStoreTransactionChain>
- implements DOMTransactionChain, DOMDataCommitErrorListener {
+final class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTransactionFactory<DOMStoreTransactionChain>
+ implements DOMTransactionChain {
+ private static enum State {
+ RUNNING,
+ CLOSING,
+ CLOSED,
+ FAILED,
+ }
+ private static final AtomicIntegerFieldUpdater<DOMDataBrokerTransactionChainImpl> COUNTER_UPDATER =
+ AtomicIntegerFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, "counter");
+ private static final AtomicReferenceFieldUpdater<DOMDataBrokerTransactionChainImpl, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, State.class, "state");
private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
private final AtomicLong txNum = new AtomicLong();
private final DOMDataCommitExecutor coordinator;
private final TransactionChainListener listener;
private final long chainId;
- private volatile boolean failed = false;
+ private volatile State state = State.RUNNING;
+ private volatile int counter = 0;
/**
*
this.listener = Preconditions.checkNotNull(listener);
}
+ private void checkNotFailed() {
+ Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
+ }
+
@Override
protected Object newTransactionIdentifier() {
return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
}
@Override
- public CheckedFuture<Void,TransactionCommitFailedException> submit(
+ public CheckedFuture<Void, TransactionCommitFailedException> submit(
final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+ checkNotFailed();
checkNotClosed();
- return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
+ final CheckedFuture<Void, TransactionCommitFailedException> ret = coordinator.submit(transaction, cohorts);
+
+ COUNTER_UPDATER.incrementAndGet(this);
+ Futures.addCallback(ret, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(final Void result) {
+ transactionCompleted();
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ transactionFailed(transaction, t);
+ }
+ });
+
+ return ret;
}
@Override
public void close() {
- super.close();
+ final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
+ if (!success) {
+ LOG.debug("Chain {} is no longer running", this);
+ return;
+ }
+ super.close();
for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
subChain.close();
}
- if (!failed) {
- LOG.debug("Transaction chain {}Â successfully finished.", this);
- // FIXME: this event should be emitted once all operations complete
- listener.onTransactionChainSuccessful(this);
+ if (counter == 0) {
+ finishClose();
}
}
- @Override
- public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
- failed = true;
+ private void finishClose() {
+ state = State.CLOSED;
+ listener.onTransactionChainSuccessful(this);
+ }
+
+ private void transactionCompleted() {
+ if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
+ finishClose();
+ }
+ }
+
+ private void transactionFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+ state = State.FAILED;
LOG.debug("Transaction chain {}Â failed.", this, cause);
listener.onTransactionChainFailed(this, tx, cause);
}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.util.DurationStatsTracker;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class);
- private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker();
+ private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
private final ListeningExecutorService executor;
/**
this.executor = Preconditions.checkNotNull(executor, "executor must not be null.");
}
- public DurationStatsTracker getCommitStatsTracker() {
+ public DurationStatisticsTracker getCommitStatsTracker() {
return commitStatsTracker;
}
@Override
public CheckedFuture<Void,TransactionCommitFailedException> submit(final DOMDataWriteTransaction transaction,
- final Iterable<DOMStoreThreePhaseCommitCohort> cohorts, final Optional<DOMDataCommitErrorListener> listener) {
+ final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
- Preconditions.checkArgument(listener != null, "Listener must not be null");
LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
ListenableFuture<Void> commitFuture = null;
try {
commitFuture = executor.submit(new CommitCoordinationTask(transaction, cohorts,
- listener, commitStatsTracker));
+ commitStatsTracker));
} catch(RejectedExecutionException e) {
LOG.error("The commit executor's queue is full - submit task was rejected. \n" +
executor, e);
"Could not submit the commit task - the commit queue capacity has been exceeded.", e));
}
- if (listener.isPresent()) {
- Futures.addCallback(commitFuture, new DOMDataCommitErrorInvoker(transaction, listener.get()));
- }
-
return MappingCheckedFuture.create(commitFuture,
TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
}
AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
private final DOMDataWriteTransaction tx;
private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
- private final DurationStatsTracker commitStatTracker;
+ private final DurationStatisticsTracker commitStatTracker;
private final int cohortSize;
private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
- final Optional<DOMDataCommitErrorListener> listener,
- final DurationStatsTracker commitStatTracker) {
+ final DurationStatisticsTracker commitStatsTracker) {
this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
- this.commitStatTracker = commitStatTracker;
+ this.commitStatTracker = commitStatsTracker;
this.cohortSize = Iterables.size(cohorts);
}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-
-/**
- *
- * Utility implemetation of {@link FutureCallback} which is responsible
- * for invoking {@link DOMDataCommitErrorListener} on TransactionCommit failed.
- *
- * When {@link #onFailure(Throwable)} is invoked, supplied {@link DOMDataCommitErrorListener}
- * callback is invoked with associated transaction and throwable is invoked on listener.
- *
- */
-class DOMDataCommitErrorInvoker implements FutureCallback<Void> {
-
- private final DOMDataWriteTransaction tx;
- private final DOMDataCommitErrorListener listener;
-
-
- /**
- *
- * Construct new DOMDataCommitErrorInvoker.
- *
- * @param transaction Transaction which should be passed as argument to {@link DOMDataCommitErrorListener#onCommitFailed(DOMDataWriteTransaction, Throwable)}
- * @param listener Listener which should be invoked on error.
- */
- public DOMDataCommitErrorInvoker(DOMDataWriteTransaction transaction, DOMDataCommitErrorListener listener) {
- this.tx = Preconditions.checkNotNull(transaction, "Transaction must not be null");
- this.listener = Preconditions.checkNotNull(listener, "Listener must not be null");
- }
-
- @Override
- public void onFailure(Throwable t) {
- listener.onCommitFailed(tx, t);
- }
-
- @Override
- public void onSuccess(Void result) {
- // NOOP
- }
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import java.util.EventListener;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-
-/**
- *
- * Listener on transaction failure which may be passed to
- * {@link DOMDataCommitExecutor}. This listener is notified during transaction
- * processing, before result is delivered to other client code outside MD-SAL.
- * This allows implementors to update their internal state before transaction
- * failure is visible to client code.
- *
- * This is internal API for MD-SAL implementations, for consumer facing error
- * listeners see {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener}.
- *
- */
-interface DOMDataCommitErrorListener extends EventListener {
-
- /**
- *
- * Callback which is invoked on transaction failure during three phase
- * commit in {@link DOMDataCommitExecutor}.
- *
- *
- * Implementation of this callback MUST NOT do any blocking calls or any
- * calls to MD-SAL, since this callback is invoked synchronously on MD-SAL
- * Broker coordination thread.
- *
- * @param tx
- * Transaction which failed
- * @param cause
- * Failure reason
- */
- void onCommitFailed(DOMDataWriteTransaction tx, Throwable cause);
-
-}
*/
package org.opendaylight.controller.md.sal.dom.broker.impl;
+import com.google.common.util.concurrent.CheckedFuture;
import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
/**
* Executor of Three Phase Commit coordination for
*
*
*/
-interface DOMDataCommitExecutor {
+public interface DOMDataCommitExecutor {
/**
* Submits supplied transaction to be executed in context of provided
* Transaction to be used as context for reporting
* @param cohort
* DOM Store cohorts representing provided transaction, its
- * subtransactoins.
- * @param listener
- * Error listener which should be notified if transaction failed.
+ * subtransactions.
* @return a CheckedFuture. if commit coordination on cohorts finished successfully,
* nothing is returned from the Future, On failure,
* the Future fails with a {@link TransactionCommitFailedException}.
*
*/
CheckedFuture<Void,TransactionCommitFailedException> submit(DOMDataWriteTransaction tx,
- Iterable<DOMStoreThreePhaseCommitCohort> cohort, Optional<DOMDataCommitErrorListener> listener);
+ Iterable<DOMStoreThreePhaseCommitCohort> cohort);
}
package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
import javax.annotation.Nonnull;
-
import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
-import org.opendaylight.yangtools.util.DurationStatsTracker;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
/**
* Implementation of the CommitStatsMXBean interface.
*/
public class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStatsMXBean {
- private final DurationStatsTracker commitStatsTracker;
+ private final DurationStatisticsTracker commitStatsTracker;
/**
* Constructor.
* @param commitStatsTracker the DurationStatsTracker used to obtain the stats.
* @param mBeanType mBeanType Used as the <code>type</code> property in the bean's ObjectName.
*/
- public CommitStatsMXBeanImpl(@Nonnull DurationStatsTracker commitStatsTracker,
+ public CommitStatsMXBeanImpl(@Nonnull DurationStatisticsTracker commitStatsTracker,
@Nonnull String mBeanType) {
super("CommitStats", mBeanType, null);
this.commitStatsTracker = commitStatsTracker;
type uint16;
description "The maximum queue size for the data broker's commit executor.";
}
+
+ leaf allow-concurrent-commits {
+ default false;
+ type boolean;
+ description "Specifies whether or not to allow 3-phrase commits to run concurrently.
+ Use with caution. If set to true, the data store implementations must be prepared
+ to handle concurrent commits. The default is false";
+ }
}
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.broker.impl;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.inOrder;
+import java.util.Arrays;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+/**
+ * Unit tests for DOMConcurrentDataCommitCoordinator.
+ *
+ * @author Thomas Pantelis
+ */
+public class DOMConcurrentDataCommitCoordinatorTest {
+
+ private final DOMDataWriteTransaction transaction = mock(DOMDataWriteTransaction.class);
+ private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
+ private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
+ private final ThreadPoolExecutor futureExecutor =
+ new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
+ private final DOMConcurrentDataCommitCoordinator coordinator =
+ new DOMConcurrentDataCommitCoordinator(futureExecutor);
+
+ @Before
+ public void setup() {
+ doReturn("tx").when(transaction).getIdentifier();
+ }
+
+ @After
+ public void tearDown() {
+ futureExecutor.shutdownNow();
+ }
+
+ @Test
+ public void testSuccessfulSubmitAsync() throws Throwable {
+ testSuccessfulSubmit(true);
+ }
+
+ @Test
+ public void testSuccessfulSubmitSync() throws Throwable {
+ testSuccessfulSubmit(false);
+ }
+
+ private void testSuccessfulSubmit(final boolean doAsync) throws Throwable {
+ final CountDownLatch asyncCanCommitContinue = new CountDownLatch(1);
+ Answer<ListenableFuture<Boolean>> asyncCanCommit = new Answer<ListenableFuture<Boolean>>() {
+ @Override
+ public ListenableFuture<Boolean> answer(InvocationOnMock invocation) {
+ final SettableFuture<Boolean> future = SettableFuture.create();
+ if(doAsync) {
+ new Thread() {
+ @Override
+ public void run() {
+ Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
+ 10, TimeUnit.SECONDS);
+ future.set(true);
+ }
+ }.start();
+ } else {
+ future.set(true);
+ }
+
+ return future;
+ }
+ };
+
+ doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).commit();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ final CountDownLatch doneLatch = new CountDownLatch(1);
+ final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
+ Futures.addCallback(future, new FutureCallback<Void>() {
+ @Override
+ public void onSuccess(Void result) {
+ doneLatch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ caughtEx.set(t);
+ doneLatch.countDown();
+ }
+ });
+
+ asyncCanCommitContinue.countDown();
+
+ assertEquals("Submit complete", true, doneLatch.await(5, TimeUnit.SECONDS));
+
+ if(caughtEx.get() != null) {
+ throw caughtEx.get();
+ }
+
+ assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
+
+ InOrder inOrder = inOrder(mockCohort1, mockCohort2);
+ inOrder.verify(mockCohort1).canCommit();
+ inOrder.verify(mockCohort2).canCommit();
+ inOrder.verify(mockCohort1).preCommit();
+ inOrder.verify(mockCohort2).preCommit();
+ inOrder.verify(mockCohort1).commit();
+ inOrder.verify(mockCohort2).commit();
+ }
+
+ @Test
+ public void testSubmitWithNegativeCanCommitResponse() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(false)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(false)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ private void assertFailure(CheckedFuture<Void, TransactionCommitFailedException> future,
+ Exception expCause, DOMStoreThreePhaseCommitCohort... mockCohorts)
+ throws Exception {
+ try {
+ future.checkedGet(5, TimeUnit.SECONDS);
+ fail("Expected TransactionCommitFailedException");
+ } catch (TransactionCommitFailedException e) {
+ if(expCause != null) {
+ assertSame("Expected cause", expCause, e.getCause());
+ }
+
+ InOrder inOrder = inOrder((Object[])mockCohorts);
+ for(DOMStoreThreePhaseCommitCohort c: mockCohorts) {
+ inOrder.verify(c).abort();
+ }
+ } catch (TimeoutException e) {
+ throw e;
+ }
+ }
+
+ @Test
+ public void testSubmitWithCanCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2);
+ }
+
+ @Test
+ public void testSubmitWithPreCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
+ when(mockCohort3).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ @Test
+ public void testSubmitWithCommitException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).preCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort1).abort();
+
+ doReturn(Futures.immediateFuture(true)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).preCommit();
+ IllegalStateException cause = new IllegalStateException("mock");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
+ doReturn(Futures.immediateFuture(true)).when(mockCohort3).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).preCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2"))).
+ when(mockCohort3).commit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort3).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+ }
+
+ @Test
+ public void testSubmitWithAbortException() throws Exception {
+ doReturn(Futures.immediateFuture(true)).when(mockCohort1).canCommit();
+ doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error"))).
+ when(mockCohort1).abort();
+
+ IllegalStateException cause = new IllegalStateException("mock canCommit error");
+ doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
+ doReturn(Futures.immediateFuture(null)).when(mockCohort2).abort();
+
+ CheckedFuture<Void, TransactionCommitFailedException> future = coordinator.submit(
+ transaction, Arrays.asList(mockCohort1, mockCohort2));
+
+ assertFailure(future, cause, mockCohort1, mockCohort2);
+ }
+}
private BackwardsCompatibleMountPointManager compatibleMountPointManager;
static final QName qName = QName.create("namespace", "12-12-1212", "mount");
- static final YangInstanceIdentifier id = YangInstanceIdentifier.builder(qName).build();
+ static final YangInstanceIdentifier id = YangInstanceIdentifier.of(qName);
@Before
public void setUp() throws Exception {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-core-spi</artifactId>
<packaging>bundle</packaging>
*/
package org.opendaylight.controller.md.sal.dom.broker.spi.rpc;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
-import com.google.common.base.Optional;
-
public abstract class RpcRoutingStrategy implements Identifiable<QName> {
+ private static final QName CONTEXT_REFERENCE = QName.cachedReference(QName.create("urn:opendaylight:yang:extension:yang-ext",
+ "2013-07-09", "context-reference"));
private final QName identifier;
- private static final QName CONTEXT_REFERENCE = QName.create("urn:opendaylight:yang:extension:yang-ext",
- "2013-07-09", "context-reference");
private RpcRoutingStrategy(final QName identifier) {
- super();
- this.identifier = identifier;
+ this.identifier = Preconditions.checkNotNull(identifier);
}
/**
public abstract QName getContext();
@Override
- public QName getIdentifier() {
+ public final QName getIdentifier() {
return identifier;
}
for (DataSchemaNode schemaNode : input.getChildNodes()) {
Optional<QName> context = getRoutingContext(schemaNode);
if (context.isPresent()) {
- return createRoutedStrategy(rpc, context.get(), schemaNode.getQName());
+ return new RoutedRpcStrategy(rpc.getQName(), context.get(), schemaNode.getQName());
}
}
}
- return createGlobalStrategy(rpc);
+ return new GlobalRpcStrategy(rpc.getQName());
}
- public static Optional<QName> getRoutingContext(final DataSchemaNode schemaNode) {
+ public static Optional<QName> getRoutingContext(final DataSchemaNode schemaNode) {
for (UnknownSchemaNode extension : schemaNode.getUnknownSchemaNodes()) {
if (CONTEXT_REFERENCE.equals(extension.getNodeType())) {
return Optional.fromNullable(extension.getQName());
return Optional.absent();
}
- private static RpcRoutingStrategy createRoutedStrategy(final RpcDefinition rpc, final QName context, final QName leafNode) {
- return new RoutedRpcStrategy(rpc.getQName(), context, leafNode);
- }
-
-
-
- private static RpcRoutingStrategy createGlobalStrategy(final RpcDefinition rpc) {
- GlobalRpcStrategy ret = new GlobalRpcStrategy(rpc.getQName());
- return ret;
- }
-
- private static class RoutedRpcStrategy extends RpcRoutingStrategy {
-
- final QName context;
+ private static final class RoutedRpcStrategy extends RpcRoutingStrategy {
+ private final QName context;
private final QName leaf;
private RoutedRpcStrategy(final QName identifier, final QName ctx, final QName leaf) {
super(identifier);
- this.context = ctx;
- this.leaf = leaf;
+ this.context = Preconditions.checkNotNull(ctx);
+ this.leaf = Preconditions.checkNotNull(leaf);
}
@Override
}
}
- private static class GlobalRpcStrategy extends RpcRoutingStrategy {
+ private static final class GlobalRpcStrategy extends RpcRoutingStrategy {
public GlobalRpcStrategy(final QName identifier) {
super(identifier);
@Override
public QName getContext() {
- throw new UnsupportedOperationException("Not routed strategy does not have context.");
+ throw new UnsupportedOperationException("Non-routed strategy does not have a context");
}
@Override
public QName getLeaf() {
- throw new UnsupportedOperationException("Not routed strategy does not have context.");
+ throw new UnsupportedOperationException("Non-routed strategy does not have a context");
}
}
}
\ No newline at end of file
<parent>\r
<artifactId>sal-parent</artifactId>\r
<groupId>org.opendaylight.controller</groupId>\r
- <version>1.1-SNAPSHOT</version>\r
+ <version>1.2.0-SNAPSHOT</version>\r
</parent>\r
<artifactId>sal-dom-xsql-config</artifactId>\r
<groupId>org.opendaylight.controller</groupId>\r
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-dom-xsql</artifactId>
<packaging>bundle</packaging>
-package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
+package org.odl.xsql;
import java.sql.Connection;
import java.sql.Driver;
import java.util.Properties;
import java.util.logging.Logger;
+import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCConnection;
+
public class JDBCDriver implements Driver {
public static JDBCDriver drv = new JDBCDriver();
if (url.equals("svr")) {
return new JDBCConnection(true);
} else {
- return new JDBCConnection(url);
+ return new JDBCConnection(url).getProxy();
}
} catch (Exception err) {
err.printStackTrace();
}
+ System.err.println("Error JDBC Connection");
return null;
}
@Override
public DriverPropertyInfo[] getPropertyInfo(String arg0, Properties arg1)
throws SQLException {
- DriverPropertyInfo i = new DriverPropertyInfo("NQL", "NQL");
+ DriverPropertyInfo i = new DriverPropertyInfo("OpenDayLight", "OpenDayLight");
return new DriverPropertyInfo[] {i};
}
private static final int SLEEP = 10000;
private static XSQLAdapter a = new XSQLAdapter();
private static PrintStream l = null;
+ private static String tmpDir = null;
+ private static File xqlLog = null;
public boolean stopped = false;
private List<String> elementHosts = new ArrayList<String>();
private String username;
}
+ public void loadBluePrint(){
+ try{
+ InputStream in = this.getClass().getClassLoader().getResourceAsStream("BluePrintCache.dat");
+ if(in!=null){
+ this.bluePrint = XSQLBluePrint.load(in);
+ }
+ in.close();
+ }catch(Exception err){
+ err.printStackTrace();
+ }
+ }
+
public static XSQLAdapter getInstance() {
return a;
}
+ public static File getXQLLogfile() {
+ tmpDir = System.getProperty("java.io.tmpdir");
+ xqlLog = new File(tmpDir + "/xql.log");
+ return xqlLog;
+ }
+
public static void main(String args[]) {
XSQLAdapter adapter = new XSQLAdapter();
adapter.start();
synchronized (XSQLAdapter.class) {
if (l == null) {
l = new PrintStream(
- new FileOutputStream("/tmp/xql.log"));
+ new FileOutputStream(getXQLLogfile()));
}
}
}
synchronized (XSQLAdapter.class) {
if (l == null) {
l = new PrintStream(
- new FileOutputStream("/tmp/xql.log"));
+ new FileOutputStream(getXQLLogfile()));
}
}
}
}
public void execute(JDBCResultSet rs) {
+ if(this.domDataBroker==null){
+ rs.setFinished(true);
+ return;
+ }
List<XSQLBluePrintNode> tables = rs.getTables();
List<Object> roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL);
roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION));
sout.close();
} catch (Exception err) {
}
+ } else if (input.equals("save")) {
+ XSQLBluePrint.save(this.bluePrint);
} else if (input.equals("tocsv")) {
toCsv = !toCsv;
sout.println("to csv file is " + toCsv);
package org.opendaylight.controller.md.sal.dom.xsql;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileOutputStream;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.util.Map;
import java.util.Set;
-public class XSQLBluePrint implements DatabaseMetaData {
+public class XSQLBluePrint implements DatabaseMetaData, Serializable {
- public static final String CACHE_FILE_NAME = "BluePrintCache.dat";
+ private static final long serialVersionUID = 1L;
+
+ public static final String CACHE_FILE_NAME = "./BluePrintCache.dat";
private Map<String, XSQLBluePrintNode> tableNameToBluePrint = new HashMap<String, XSQLBluePrintNode>();
- private Map<String, Map<String,XSQLBluePrintNode>> odlNameToBluePrint = new HashMap<String, Map<String,XSQLBluePrintNode>>();
+ private Map<String, Map<String, XSQLBluePrintNode>> odlNameToBluePrint = new HashMap<String, Map<String, XSQLBluePrintNode>>();
private boolean cacheLoadedSuccessfuly = false;
private DatabaseMetaData myProxy = null;
public static final String replaceAll(String source, String toReplace,
- String withThis) {
+ String withThis) {
int index = source.indexOf(toReplace);
int index2 = 0;
StringBuffer result = new StringBuffer();
public XSQLBluePrint() {
}
+ public static void save(XSQLBluePrint bp) {
+ ObjectOutputStream out = null;
+ try {
+ out = new ObjectOutputStream(new DataOutputStream(
+ new FileOutputStream(CACHE_FILE_NAME)));
+ out.writeObject(bp);
+ } catch (Exception err) {
+ err.printStackTrace();
+ } finally {
+ try {
+ out.close();
+ } catch (Exception err) {
+ }
+ }
+ }
+
+ public static XSQLBluePrint load(InputStream ins) {
+ ObjectInputStream in = null;
+ try {
+ in = new ObjectInputStream(new DataInputStream(ins));
+ return (XSQLBluePrint) in.readObject();
+ } catch (Exception err) {
+ err.printStackTrace();
+ } finally {
+ try {
+ in.close();
+ } catch (Exception err) {
+ }
+ }
+ return null;
+ }
+
private class NQLBluePrintProxy implements InvocationHandler {
public Object invoke(Object proxy, Method method, Object[] args)
- throws Throwable {
+ throws Throwable {
System.out.println("Method " + method);
return method.invoke(XSQLBluePrint.this, args);
}
public DatabaseMetaData getProxy() {
if (myProxy == null) {
try {
- myProxy = (DatabaseMetaData) Proxy
- .newProxyInstance(getClass().getClassLoader(),
- new Class[] {DatabaseMetaData.class},
+ myProxy = (DatabaseMetaData) Proxy.newProxyInstance(getClass()
+ .getClassLoader(),
+ new Class[] { DatabaseMetaData.class },
new NQLBluePrintProxy());
} catch (Exception err) {
err.printStackTrace();
return myProxy;
}
- /*
- public void loadBluePrintCache(String hostName) {
- try {
- ObjectInputStream in = new ObjectInputStream(
- new FileInputStream(hostName + "-" + CACHE_FILE_NAME));
- cache = (Map) in.readObject();
- in.close();
- cacheLoadedSuccessfuly = true;
- } catch (Exception err) {
- //err.printStackTrace();
- }
- }*/
-
- public XSQLBluePrintNode[] getBluePrintNodeByODLTableName(String odlTableName) {
- Map<String,XSQLBluePrintNode> map = this.odlNameToBluePrint.get(odlTableName);
- if(map==null) return null;
+ public XSQLBluePrintNode[] getBluePrintNodeByODLTableName(
+ String odlTableName) {
+ Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint
+ .get(odlTableName);
+ if (map == null)
+ return null;
return map.values().toArray(new XSQLBluePrintNode[map.size()]);
}
}
for (XSQLBluePrintNode n : tableNameToBluePrint.values()) {
- if (n.getBluePrintNodeName().toLowerCase().endsWith(tableName.toLowerCase())) {
+ if (n.getBluePrintNodeName().toLowerCase()
+ .endsWith(tableName.toLowerCase())) {
return n;
}
}
for (XSQLBluePrintNode n : tableNameToBluePrint.values()) {
- if (n.getBluePrintNodeName().toLowerCase().equals(tableName.toLowerCase())) {
+ if (n.getBluePrintNodeName().toLowerCase()
+ .equals(tableName.toLowerCase())) {
return n;
}
}
for (XSQLBluePrintNode n : tableNameToBluePrint.values()) {
- if (n.getBluePrintNodeName().toLowerCase().indexOf(tableName.toLowerCase())!= -1) {
+ if (n.getBluePrintNodeName().toLowerCase()
+ .indexOf(tableName.toLowerCase()) != -1) {
return n;
}
}
return null;
}
-
public boolean isCacheLoaded() {
return cacheLoadedSuccessfuly;
}
private static Map<Class, Set<Class>> superClassMap = new HashMap<Class, Set<Class>>();
public static Set<Class> getInheritance(Class myObjectClass,
- Class returnType) {
+ Class returnType) {
if (returnType != null && myObjectClass.equals(returnType)) {
return new HashSet<Class>();
public void addToBluePrintCache(XSQLBluePrintNode blNode) {
this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
- Map<String,XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode.getODLTableName());
- if(map==null){
- map = new HashMap<String,XSQLBluePrintNode>();
- this.odlNameToBluePrint.put(blNode.getODLTableName(),map);
+ Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode
+ .getODLTableName());
+ if (map == null) {
+ map = new HashMap<String, XSQLBluePrintNode>();
+ this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
}
map.put(blNode.getBluePrintNodeName(), blNode);
}
@Override
public ResultSet getAttributes(String catalog, String schemaPattern,
- String typeNamePattern, String attributeNamePattern)
- throws SQLException {
+ String typeNamePattern, String attributeNamePattern)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getBestRowIdentifier(String catalog, String schema,
- String table, int scope, boolean nullable) throws SQLException {
+ String table, int scope, boolean nullable) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getColumnPrivileges(String catalog, String schema,
- String table, String columnNamePattern) throws SQLException {
+ String table, String columnNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getColumns(String catalog, String schemaPattern,
- String tableNamePattern, String columnNamePattern)
- throws SQLException {
+ String tableNamePattern, String columnNamePattern)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getCrossReference(String parentCatalog,
- String parentSchema, String parentTable, String foreignCatalog,
- String foreignSchema, String foreignTable) throws SQLException {
+ String parentSchema, String parentTable, String foreignCatalog,
+ String foreignSchema, String foreignTable) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public String getDatabaseProductName() throws SQLException {
- return "VNE Query Language";
+ return "OpenDayLight";
}
@Override
}
@Override
- public ResultSet getExportedKeys(String catalog, String schema,
- String table)
- throws SQLException {
+ public ResultSet getExportedKeys(String catalog, String schema, String table)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getFunctionColumns(String catalog, String schemaPattern,
- String functionNamePattern, String columnNamePattern)
- throws SQLException {
+ String functionNamePattern, String columnNamePattern)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getFunctions(String catalog, String schemaPattern,
- String functionNamePattern) throws SQLException {
+ String functionNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
}
@Override
- public ResultSet getImportedKeys(String catalog, String schema,
- String table)
- throws SQLException {
+ public ResultSet getImportedKeys(String catalog, String schema, String table)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getIndexInfo(String catalog, String schema, String table,
- boolean unique, boolean approximate) throws SQLException {
+ boolean unique, boolean approximate) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getPrimaryKeys(String catalog, String schema, String table)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getProcedureColumns(String catalog, String schemaPattern,
- String procedureNamePattern, String columnNamePattern)
- throws SQLException {
+ String procedureNamePattern, String columnNamePattern)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getProcedures(String catalog, String schemaPattern,
- String procedureNamePattern) throws SQLException {
+ String procedureNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getSchemas(String catalog, String schemaPattern)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getSuperTables(String catalog, String schemaPattern,
- String tableNamePattern) throws SQLException {
+ String tableNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getSuperTypes(String catalog, String schemaPattern,
- String typeNamePattern) throws SQLException {
+ String typeNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getTablePrivileges(String catalog, String schemaPattern,
- String tableNamePattern) throws SQLException {
+ String tableNamePattern) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getTables(String catalog, String schemaPattern,
- String tableNamePattern, String[] types) throws SQLException {
+ String tableNamePattern, String[] types) throws SQLException {
return new TablesResultSet(this);
}
@Override
public ResultSet getUDTs(String catalog, String schemaPattern,
- String typeNamePattern, int[] types) throws SQLException {
+ String typeNamePattern, int[] types) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResultSet getVersionColumns(String catalog, String schema,
- String table) throws SQLException {
+ String table) throws SQLException {
// TODO Auto-generated method stub
return null;
}
}
@Override
- public boolean supportsCatalogsInPrivilegeDefinitions()
- throws SQLException {
+ public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsConvert(int fromType, int toType)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions()
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsDataManipulationTransactionsOnly()
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
- public boolean supportsDifferentTableCorrelationNames()
- throws SQLException {
+ public boolean supportsDifferentTableCorrelationNames() throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsResultSetConcurrency(int type, int concurrency)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsResultSetHoldability(int holdability)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
}
@Override
- public boolean supportsStoredFunctionsUsingCallSyntax()
- throws SQLException {
+ public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean supportsTransactionIsolationLevel(int level)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public ResultSet getPseudoColumns(String catalog, String schemaPattern,
- String tableNamePattern, String columnNamePattern)
- throws SQLException {
+ String tableNamePattern, String columnNamePattern)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
import java.io.Serializable;
import java.sql.SQLException;
import java.util.Collection;
+import java.util.HashMap;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
public class XSQLBluePrintNode implements Serializable {
private static final long serialVersionUID = 1L;
private Class<?> myInterface = null;
private String myInterfaceString = null;
- private Set<XSQLBluePrintRelation> relations =
- new HashSet<XSQLBluePrintRelation>();
- private Set<XSQLBluePrintNode> inheritingNodes =
- new HashSet<XSQLBluePrintNode>();
+ private Set<XSQLBluePrintRelation> relations = new HashSet<XSQLBluePrintRelation>();
+ private Set<XSQLBluePrintNode> inheritingNodes = new HashSet<XSQLBluePrintNode>();
private Set<XSQLBluePrintNode> children = new HashSet<XSQLBluePrintNode>();
private XSQLBluePrintNode parent = null;
private transient Set<String> parentHierarchySet = null;
private String myInterfaceName = null;
private Set<XSQLColumn> columns = new HashSet<XSQLColumn>();
+ private Map<String, XSQLColumn> origNameToColumn = new HashMap<String, XSQLColumn>();
private transient Object odlNode = null;
private boolean module = false;
private String bluePrintTableName = null;
private String odlTableName = null;
+ private String origName = null;
+
+ public XSQLBluePrintNode(String name, String _origName, int _level) {
+ this.level = _level;
+ this.odlTableName = name;
+ this.bluePrintTableName = name;
+ this.origName = _origName;
+ }
public XSQLBluePrintNode(Class<?> _myInterface, int _level) {
this.myInterface = _myInterface;
this.level = _level;
}
- public XSQLBluePrintNode(Object _odlNode, int _level,XSQLBluePrintNode _parent) {
+ public XSQLBluePrintNode(Object _odlNode, int _level,
+ XSQLBluePrintNode _parent) {
this.odlNode = _odlNode;
this.level = _level;
this.module = XSQLODLUtils.isModule(_odlNode);
this.parent = _parent;
this.bluePrintTableName = XSQLODLUtils.getBluePrintName(_odlNode);
+ this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+ }
+ public String getOrigName() {
+ return this.origName;
}
- public String getBluePrintNodeName(){
+ public String getBluePrintNodeName() {
return this.bluePrintTableName;
}
}
for (XSQLBluePrintRelation dtr : this.relations) {
XSQLBluePrintNode parent = dtr.getParent();
- if (!parent.getInterface().equals(this.getInterface()) && !parent
- .getInterface().isAssignableFrom(this.getInterface()) &&
- this.getInterface().isAssignableFrom(parent.getInterface())
- && parent.isModelChild(p)) {
+ if (!parent.getInterface().equals(this.getInterface())
+ && !parent.getInterface().isAssignableFrom(
+ this.getInterface())
+ && this.getInterface().isAssignableFrom(
+ parent.getInterface()) && parent.isModelChild(p)) {
return true;
}
}
}
public void addColumn(Object node, String tableName) {
- XSQLColumn c = new XSQLColumn(node,getBluePrintNodeName(), this);
+ XSQLColumn c = new XSQLColumn(node, getBluePrintNodeName(), this);
+ this.columns.add(c);
+ }
+
+ public XSQLColumn addColumn(String name, String tableName, String origName,
+ String origTableName) {
+ XSQLColumn c = new XSQLColumn(name, tableName, origName, origTableName);
this.columns.add(c);
+ this.origNameToColumn.put(origName, c);
+ return c;
}
public void addColumn(String methodName) {
throw new SQLException("Unknown field name '" + name + "'");
}
-
public void addParent(XSQLBluePrintNode parent, String property) {
try {
if (property.equals("ContainingTPs")) {
return;
}
- //Method m = parent.getInterface().getMethod("get"+property, null);
- //if(!m.getDeclaringClass().equals(parent.getInterface()))
- //return;
- XSQLBluePrintRelation rel =
- new XSQLBluePrintRelation(parent, property, myInterface);
+ // Method m = parent.getInterface().getMethod("get"+property, null);
+ // if(!m.getDeclaringClass().equals(parent.getInterface()))
+ // return;
+ XSQLBluePrintRelation rel = new XSQLBluePrintRelation(parent,
+ property, myInterface);
relations.add(rel);
} catch (Exception err) {
err.printStackTrace();
}
public Set<XSQLBluePrintRelation> getClonedParents() {
- Set<XSQLBluePrintRelation> result =
- new HashSet<XSQLBluePrintRelation>();
+ Set<XSQLBluePrintRelation> result = new HashSet<XSQLBluePrintRelation>();
result.addAll(this.relations);
return result;
}
if (odlNode != null) {
return getBluePrintNodeName();
}
+ if (odlTableName != null) {
+ return odlTableName;
+ }
return "Unknown";
}
XSQLBluePrintNode other = (XSQLBluePrintNode) obj;
if (odlNode != null) {
return getBluePrintNodeName().equals(other.getBluePrintNodeName());
- } else if (this.odlTableName != null) {
+ } else if (this.odlTableName == null && other.odlTableName != null)
+ return false;
+ if (this.odlTableName != null && other.odlTableName == null)
+ return false;
+ else
return this.odlTableName.equals(other.odlTableName);
- } else {
- return other.myInterface.equals(myInterface);
- }
}
@Override
private int charWidth = -1;
private Class type = null;
private transient Object bluePrintNode = null;
+ private String origName = null;
+ private String origTableName = null;
public XSQLColumn(Object odlNode, String _tableName, Object _bluePrintNode) {
this.name = XSQLODLUtils.getNodeNameFromDSN(odlNode);
this.type = XSQLODLUtils.getTypeForODLColumn(odlNode);
}
+ public XSQLColumn(String _name, String _tableName,String _origName, String _origTableName){
+ this.name = _name;
+ this.tableName = _tableName;
+ this.origName = _origName;
+ this.origTableName = _origTableName;
+ }
+
public String getName() {
return name;
}
import java.io.Serializable;
import java.util.Map;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
+
public class JDBCCommand implements Serializable {
public int type = 0;
public static final int TYPE_EXECUTE_QUERY = 1;
public static final int TYPE_QUERY_RECORD = 3;
public static final int TYPE_QUERY_FINISH = 4;
public static final int TYPE_QUERY_ERROR = 5;
+ public static final int TYPE_METADATA = 6;
+ public static final int TYPE_METADATA_REPLY = 7;
private JDBCResultSet rs = null;
private Map record = null;
private int rsID = -1;
private Exception err = null;
+ private XSQLBluePrint bluePrint = null;
+
+ public JDBCCommand() {
+
+ }
+
+ public void setType(int t) {
+ this.type = t;
+ }
public JDBCCommand(Exception _err, int _RSID) {
this.type = TYPE_QUERY_ERROR;
this.rsID = _RSID;
}
+ public JDBCCommand(XSQLBluePrint bl) {
+ this.type = TYPE_METADATA_REPLY;
+ this.bluePrint = bl;
+ }
+
public JDBCCommand(JDBCResultSet _rs, int _type) {
this.type = TYPE_EXECUTE_QUERY;
this.rs = _rs;
public Exception getERROR() {
return this.err;
}
+
+ public XSQLBluePrint getBluePrint() {
+ return this.bluePrint;
+ }
}
import java.util.concurrent.Executor;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
-public class JDBCConnection extends Thread implements Connection {
+public class JDBCConnection implements Connection, Runnable {
private Socket socket = null;
private DataInputStream in = null;
private DataOutputStream out = null;
private LinkedList<byte[]> queue = new LinkedList<byte[]>();
private XSQLAdapter adapter = null;
+ private XSQLBluePrint metaData = null;
+ private String addr = null;
+ private boolean wasClosed = false;
public JDBCConnection(Socket s, XSQLAdapter _a) {
this.socket = s;
this.adapter = _a;
try {
in = new DataInputStream(
- new BufferedInputStream(s.getInputStream()));
- out = new DataOutputStream(
- new BufferedOutputStream(s.getOutputStream()));
+ new BufferedInputStream(s.getInputStream()));
+ out = new DataOutputStream(new BufferedOutputStream(
+ s.getOutputStream()));
new JDBCObjectReader();
- this.start();
+ new Thread(this).start();
} catch (Exception err) {
err.printStackTrace();
}
}
- public JDBCConnection(String addr) throws Exception {
+ public Connection getProxy() {
+ return this;
+ /*
+ return (Connection) Proxy.newProxyInstance(this.getClass()
+ .getClassLoader(), new Class[] { Connection.class },
+ new JDBCProxy(this));
+ */
+ }
+
+ public JDBCConnection(String _addr) throws Exception {
+ this.addr = _addr;
+ init();
+ }
+
+ private void init() throws Exception {
+ if (addr.startsWith("http://"))
+ addr = addr.substring(7);
+ System.err.print("Address is:" + addr);
socket = new Socket(addr, 40004);
try {
- in = new DataInputStream(
- new BufferedInputStream(socket.getInputStream()));
- out = new DataOutputStream(
- new BufferedOutputStream(socket.getOutputStream()));
+ in = new DataInputStream(new BufferedInputStream(
+ socket.getInputStream()));
+ out = new DataOutputStream(new BufferedOutputStream(
+ socket.getOutputStream()));
new JDBCObjectReader();
- this.start();
+ new Thread(this).start();
} catch (Exception err) {
err.printStackTrace();
}
ServerSocket s = new ServerSocket(50003);
socket = s.accept();
try {
- in = new DataInputStream(
- new BufferedInputStream(socket.getInputStream()));
- out = new DataOutputStream(
- new BufferedOutputStream(socket.getOutputStream()));
+ in = new DataInputStream(new BufferedInputStream(
+ socket.getInputStream()));
+ out = new DataOutputStream(new BufferedOutputStream(
+ socket.getOutputStream()));
new JDBCObjectReader();
- this.start();
+ new Thread(this).start();
} catch (Exception err) {
err.printStackTrace();
}
}
}
-
private boolean isStopped() {
if (adapter != null && adapter.stopped) {
return true;
} catch (Exception err) {
System.out.println("Connection Lost or Closed.");
+ try {
+ out.close();
+ } catch (Exception er) {
+ }
+ out = null;
+ try {
+ in.close();
+ } catch (Exception er) {
+ }
+ in = null;
try {
socket.close();
} catch (Exception err2) {
}
- //err.printStackTrace();
+ socket = null;
}
}
}
public void processCommand(JDBCCommand cmd) {
switch (cmd.getType()) {
- case JDBCCommand.TYPE_EXECUTE_QUERY:
- try {
- JDBCServer.execute(cmd.getRS(), adapter);
- send(new JDBCCommand(cmd.getRS(),
- JDBCCommand.TYPE_QUERY_REPLY));
- QueryUpdater u = new QueryUpdater(cmd.getRS());
- new Thread(u).start();
- } catch (Exception err) {
- send(new JDBCCommand(err, cmd.getRSID()));
- }
- break;
- case JDBCCommand.TYPE_QUERY_REPLY:
- JDBCResultSet rs1 = JDBCStatement.getQuery(cmd.getRS().getID());
- rs1.updateData(cmd.getRS());
- break;
- case JDBCCommand.TYPE_QUERY_RECORD:
- JDBCResultSet rs2 = JDBCStatement.getQuery(cmd.getRSID());
- rs2.addRecord(cmd.getRecord());
- break;
- case JDBCCommand.TYPE_QUERY_FINISH:
- JDBCResultSet rs3 = JDBCStatement.removeQuery(cmd.getRSID());
- rs3.setFinished(true);
- break;
- case JDBCCommand.TYPE_QUERY_ERROR:
- System.err.println("ERROR Executing Query\n");
- cmd.getERROR().printStackTrace();
- JDBCResultSet rs4 = JDBCStatement.removeQuery(cmd.getRSID());
- rs4.setError(cmd.getERROR());
- rs4.setFinished(true);
- synchronized (rs4) {
- rs4.notifyAll();
- }
+ case JDBCCommand.TYPE_METADATA_REPLY:
+ this.metaData = cmd.getBluePrint();
+ synchronized (this) {
+ this.notifyAll();
+ }
+ break;
+ case JDBCCommand.TYPE_METADATA:
+ send(new JDBCCommand(this.adapter.getBluePrint()));
+ break;
+ case JDBCCommand.TYPE_EXECUTE_QUERY:
+ try {
+ JDBCServer.execute(cmd.getRS(), adapter);
+ send(new JDBCCommand(cmd.getRS(), JDBCCommand.TYPE_QUERY_REPLY));
+ QueryUpdater u = new QueryUpdater(cmd.getRS());
+ new Thread(u).start();
+ } catch (Exception err) {
+ send(new JDBCCommand(err, cmd.getRSID()));
+ }
+ break;
+ case JDBCCommand.TYPE_QUERY_REPLY:
+ JDBCResultSet rs1 = JDBCStatement.getQuery(cmd.getRS().getID());
+ rs1.updateData(cmd.getRS());
+ break;
+ case JDBCCommand.TYPE_QUERY_RECORD:
+ JDBCResultSet rs2 = JDBCStatement.getQuery(cmd.getRSID());
+ rs2.addRecord(cmd.getRecord());
+ break;
+ case JDBCCommand.TYPE_QUERY_FINISH:
+ JDBCResultSet rs3 = JDBCStatement.removeQuery(cmd.getRSID());
+ rs3.setFinished(true);
+ break;
+ case JDBCCommand.TYPE_QUERY_ERROR:
+ System.err.println("ERROR Executing Query\n");
+ cmd.getERROR().printStackTrace();
+ JDBCResultSet rs4 = JDBCStatement.removeQuery(cmd.getRSID());
+ rs4.setError(cmd.getERROR());
+ rs4.setFinished(true);
+ synchronized (rs4) {
+ rs4.notifyAll();
+ }
}
}
}
public void send(Object o) {
+
+ if (this.socket == null) {
+ try {
+ init();
+ } catch (Exception err) {
+ err.printStackTrace();
+ }
+ }
+
try {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
ObjectOutputStream oout = new ObjectOutputStream(bout);
@Override
public void close() throws SQLException {
+ wasClosed = true;
try {
socket.close();
} catch (Exception err) {
@Override
public Array createArrayOf(String typeName, Object[] elements)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public Statement createStatement() throws SQLException {
- return new JDBCStatement(this);
+ return new JDBCStatement(this).getProxy();
}
@Override
public Statement createStatement(int resultSetType,
- int resultSetConcurrency, int resultSetHoldability)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ return new JDBCStatement(this).getProxy();
}
@Override
- public Statement createStatement(int resultSetType,
- int resultSetConcurrency)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ public Statement createStatement(int resultSetType, int resultSetConcurrency)
+ throws SQLException {
+ return new JDBCStatement(this).getProxy();
}
@Override
public Struct createStruct(String typeName, Object[] attributes)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ if (this.metaData == null) {
+ JDBCCommand cmd = new JDBCCommand();
+ cmd.setType(JDBCCommand.TYPE_METADATA);
+ synchronized (this) {
+ send(cmd);
+ try {
+ this.wait();
+ } catch (Exception err) {
+ err.printStackTrace();
+ }
+ }
+ }
+ return metaData;
}
@Override
@Override
public boolean isClosed() throws SQLException {
- // TODO Auto-generated method stub
return false;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType,
- int resultSetConcurrency, int resultSetHoldability)
- throws SQLException {
+ int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType,
- int resultSetConcurrency) throws SQLException {
+ int resultSetConcurrency) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType,
- int resultSetConcurrency, int resultSetHoldability)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ System.err.println("SQL 1=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType,
- int resultSetConcurrency) throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ int resultSetConcurrency) throws SQLException {
+ System.err.println("SQL 2=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ throws SQLException {
+ System.err.println("SQL 3=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ throws SQLException {
+ System.err.println("SQL 4=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ throws SQLException {
+ System.err.println("SQL 5=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ System.err.println("SQL 6=" + sql);
+ return new JDBCStatement(this, sql).getProxy();
}
@Override
@Override
public void setClientInfo(Properties properties)
- throws SQLClientInfoException {
+ throws SQLClientInfoException {
// TODO Auto-generated method stub
}
@Override
public void setClientInfo(String name, String value)
- throws SQLClientInfoException {
+ throws SQLClientInfoException {
// TODO Auto-generated method stub
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
// TODO Auto-generated method stub
return 0;
}
-}
+}
--- /dev/null
+package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+
+public class JDBCProxy implements InvocationHandler {
+
+ private Object myObject = null;
+ private Class<?> myObjectClass = null;
+
+ public JDBCProxy(Object obj) {
+ this.myObject = obj;
+ this.myObjectClass = this.myObject.getClass();
+ }
+
+ @Override
+ public Object invoke(Object proxy, Method method, Object[] args)
+ throws Throwable {
+ System.err.println("Class " + this.myObjectClass.getSimpleName()
+ + " Method " + method.getName());
+ return method.invoke(this.myObject, args);
+ }
+
+}
import java.io.Reader;
import java.io.Serializable;
import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
import org.opendaylight.controller.md.sal.dom.xsql.XSQLODLUtils;
-public class JDBCResultSet
- implements Serializable, ResultSet, ResultSetMetaData {
+public class JDBCResultSet implements Serializable, ResultSet,
+ ResultSetMetaData {
private static final long serialVersionUID = -7450200738431047057L;
private String sql = null;
- private List<XSQLBluePrintNode> tablesInQuery =
- new ArrayList<XSQLBluePrintNode>();
- private Map<String, XSQLBluePrintNode> tablesInQueryMap =
- new ConcurrentHashMap<String, XSQLBluePrintNode>();
+ private List<XSQLBluePrintNode> tablesInQuery = new ArrayList<XSQLBluePrintNode>();
+ private Map<String, XSQLBluePrintNode> tablesInQueryMap = new ConcurrentHashMap<String, XSQLBluePrintNode>();
private List<XSQLColumn> fieldsInQuery = new ArrayList<XSQLColumn>();
private transient LinkedList<Map> records = new LinkedList<Map>();
private transient Map currentRecord = null;
private int id = 0;
private static Integer nextID = new Integer(0);
public int numberOfTasks = 0;
- private Map<String, Map<XSQLColumn, List<XSQLCriteria>>> criteria =
- new ConcurrentHashMap<String, Map<XSQLColumn, List<XSQLCriteria>>>();
+ private Map<String, Map<XSQLColumn, List<XSQLCriteria>>> criteria = new ConcurrentHashMap<String, Map<XSQLColumn, List<XSQLCriteria>>>();
private Exception err = null;
private List<Record> EMPTY_RESULT = new LinkedList<Record>();
+ private transient Map<String,JDBCResultSet> subQueries = new HashMap<String,JDBCResultSet>();
+
+ public ResultSet getProxy() {
+ return (ResultSet) Proxy.newProxyInstance(this.getClass().getClassLoader(), new Class[] {ResultSet.class }, new JDBCProxy(this));
+ }
+
+ public void setSQL(String _sql) {
+ this.sql = _sql;
+ }
+
+ public JDBCResultSet addSubQuery(String _sql,String logicalName) {
+ if(subQueries == null)
+ subQueries = new HashMap<String,JDBCResultSet>();
+ JDBCResultSet rs = new JDBCResultSet(_sql);
+ this.subQueries.put(logicalName,rs);
+ return rs;
+ }
+
+ public Map<String,JDBCResultSet> getSubQueries() {
+ if(this.subQueries==null)
+ this.subQueries = new HashMap<>();
+ return this.subQueries;
+ }
public JDBCResultSet(String _sql) {
synchronized (JDBCResultSet.class) {
}
public int isObjectFitCriteria(Map objValues, String tableName) {
- Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria.get(tableName);
+ Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria
+ .get(tableName);
if (tblCriteria == null) {
return 1;
}
for (Map.Entry<XSQLColumn, List<XSQLCriteria>> cc : tblCriteria
- .entrySet()) {
+ .entrySet()) {
for (XSQLCriteria c : cc.getValue()) {
Object value = objValues.get(cc.getKey().toString());
int result = c.checkValue(value);
}
public int isObjectFitCriteria(Object element, Class cls) {
- Map<XSQLColumn, List<XSQLCriteria>> tblCriteria =
- criteria.get(cls.getName());
+ Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria.get(cls
+ .getName());
if (tblCriteria == null) {
return 1;
}
for (Map.Entry<XSQLColumn, List<XSQLCriteria>> cc : tblCriteria
- .entrySet()) {
+ .entrySet()) {
for (XSQLCriteria c : cc.getValue()) {
- int result =
- c.isObjectFitCriteria(element, cc.getKey().getName());
+ int result = c.isObjectFitCriteria(element, cc.getKey()
+ .getName());
if (result == 0) {
return 0;
}
}
}
-
public void addRecord(ArrayList hierarchy) {
Map rec = new HashMap();
for (int i = hierarchy.size() - 1; i >= 0; i--) {
Object element = hierarchy.get(i);
for (XSQLColumn c : fieldsInQuery) {
- if (c.getTableName()
- .equals(element.getClass().getSimpleName())) {
+ if (c.getTableName().equals(element.getClass().getSimpleName())) {
try {
- Method m = element.getClass().getMethod(c.getName(), null);
+ Method m = element.getClass().getMethod(c.getName(),
+ null);
Object value = m.invoke(element, null);
rec.put(c.getName(), value);
} catch (Exception err) {
Map subChildren = XSQLODLUtils.getChildren(node);
Map result = new HashMap();
for (Object stc : subChildren.values()) {
- if (stc.getClass().getName()
- .endsWith("ImmutableAugmentationNode")) {
+ if (stc.getClass().getName().endsWith("ImmutableAugmentationNode")) {
Map values = XSQLODLUtils.getChildren(stc);
for (Object key : values.keySet()) {
Object val = values.get(key);
- if (val.getClass().getName()
- .endsWith("ImmutableLeafNode")) {
+ if (val.getClass().getName().endsWith("ImmutableLeafNode")) {
Object value = XSQLODLUtils.getValue(val);
String k = XSQLODLUtils.getNodeName(val);
if (value != null) {
result.put(bpn.getBluePrintNodeName() + "." + k,
- value.toString());
+ value.toString());
}
}
}
String k = XSQLODLUtils.getNodeName(stc);
Object value = XSQLODLUtils.getValue(stc);
if (value != null) {
- result.put(bpn.getBluePrintNodeName() + "." + k, value.toString());
+ result.put(bpn.getBluePrintNodeName() + "." + k,
+ value.toString());
}
}
}
return result;
}
- private void addToData(Record rec, XSQLBluePrintNode bpn, XSQLBluePrint bluePrint, Map fullRecord) {
- XSQLBluePrintNode eNodes[] = bluePrint.getBluePrintNodeByODLTableName(XSQLODLUtils.getNodeIdentiofier(rec.element));
+ private void addToData(Record rec, XSQLBluePrintNode bpn,
+ XSQLBluePrint bluePrint, Map fullRecord) {
+ XSQLBluePrintNode eNodes[] = bluePrint
+ .getBluePrintNodeByODLTableName(XSQLODLUtils
+ .getNodeIdentiofier(rec.element));
if (bpn != null) {
for (XSQLColumn c : fieldsInQuery) {
- for(XSQLBluePrintNode eNode:eNodes){
- if (((XSQLBluePrintNode) c.getBluePrintNode()).getBluePrintNodeName().equals(eNode.getBluePrintNodeName())) {
- //Object value = Criteria.getValue(rec.element, c.getName());
+ for (XSQLBluePrintNode eNode : eNodes) {
+ if (((XSQLBluePrintNode) c.getBluePrintNode())
+ .getBluePrintNodeName().equals(
+ eNode.getBluePrintNodeName())) {
+ // Object value = Criteria.getValue(rec.element,
+ // c.getName());
String columnName = c.toString();
Object value = fullRecord.get(columnName);
if (value != null) {
return false;
}
- public List<Object> getChildren(Object node, String tableName,XSQLBluePrint bluePrint) {
+ public List<Object> getChildren(Object node, String tableName,
+ XSQLBluePrint bluePrint) {
List<Object> children = XSQLODLUtils.getMChildren(node);
List<Object> result = new LinkedList<Object>();
for (Object child : children) {
String odlNodeName = XSQLODLUtils.getNodeIdentiofier(child);
- if(odlNodeName==null) continue;
+ if (odlNodeName == null)
+ continue;
- XSQLBluePrintNode eNodes[] = bluePrint.getBluePrintNodeByODLTableName(odlNodeName);
- if(eNodes==null) continue;
+ XSQLBluePrintNode eNodes[] = bluePrint
+ .getBluePrintNodeByODLTableName(odlNodeName);
+ if (eNodes == null)
+ continue;
boolean match = false;
- for(XSQLBluePrintNode enode:eNodes){
- if(tableName.startsWith(enode.toString())){
+ for (XSQLBluePrintNode enode : eNodes) {
+ if (tableName.startsWith(enode.toString())) {
match = true;
break;
}
}
- if(!match) continue;
+ if (!match)
+ continue;
if (child.getClass().getName().endsWith("ImmutableContainerNode")) {
result.add(child);
- }else
- if (child.getClass().getName().endsWith("ImmutableAugmentationNode")) {
+ } else if (child.getClass().getName()
+ .endsWith("ImmutableAugmentationNode")) {
List<Object> _children = XSQLODLUtils.getMChildren(child);
for (Object c : _children) {
- if (c.getClass().getName().endsWith("ImmutableContainerNode")) {
+ if (c.getClass().getName()
+ .endsWith("ImmutableContainerNode")) {
result.add(c);
}
}
return result;
}
- public List<Record> addRecords(Object element, XSQLBluePrintNode node,boolean root, String tableName,XSQLBluePrint bluePrint) {
+ public List<Record> addRecords(Object element, XSQLBluePrintNode node,
+ boolean root, String tableName, XSQLBluePrint bluePrint) {
List<Record> result = new LinkedList<Record>();
String nodeID = XSQLODLUtils.getNodeIdentiofier(element);
if (node.getODLTableName().equals(nodeID)) {
- XSQLBluePrintNode bluePrintNode = bluePrint.getBluePrintNodeByODLTableName(nodeID)[0];
+ XSQLBluePrintNode bluePrintNode = bluePrint
+ .getBluePrintNodeByODLTableName(nodeID)[0];
Record rec = new Record();
rec.element = element;
- XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode.getBluePrintNodeName());
- if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName()) || bpn != null) {
+ XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode
+ .getBluePrintNodeName());
+ if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName())
+ || bpn != null) {
Map<?, ?> allKeyValues = collectColumnValues(element, bpn);
- if (!(isObjectFitCriteria(allKeyValues, bpn.getBluePrintNodeName()) == 1)) {
+ if (!(isObjectFitCriteria(allKeyValues,
+ bpn.getBluePrintNodeName()) == 1)) {
return EMPTY_RESULT;
}
- addToData(rec, bpn, bluePrint,allKeyValues);
+ addToData(rec, bpn, bluePrint, allKeyValues);
}
if (root) {
addRecord(rec.data);
}
XSQLBluePrintNode parent = node.getParent();
- List<Record> subRecords = addRecords(element, parent, false, tableName,bluePrint);
+ List<Record> subRecords = addRecords(element, parent, false, tableName,
+ bluePrint);
for (Record subRec : subRecords) {
- List<Object> subO = getChildren(subRec.element, tableName,bluePrint);
+ List<Object> subO = getChildren(subRec.element, tableName,
+ bluePrint);
if (subO != null) {
for (Object subData : subO) {
Record rec = new Record();
rec.data.putAll(subRec.data);
String recID = XSQLODLUtils.getNodeIdentiofier(rec.element);
- XSQLBluePrintNode eNodes[] = bluePrint.getBluePrintNodeByODLTableName(recID);
+ XSQLBluePrintNode eNodes[] = bluePrint
+ .getBluePrintNodeByODLTableName(recID);
XSQLBluePrintNode bpn = null;
- for(XSQLBluePrintNode eNode:eNodes){
- bpn = this.tablesInQueryMap.get(eNode.getBluePrintNodeName());
- if(bpn!=null)
+ for (XSQLBluePrintNode eNode : eNodes) {
+ bpn = this.tablesInQueryMap.get(eNode
+ .getBluePrintNodeName());
+ if (bpn != null)
break;
}
boolean isObjectInCriteria = true;
if (bpn != null) {
Map allKeyValues = collectColumnValues(rec.element, bpn);
- if ((isObjectFitCriteria(allKeyValues, bpn.getBluePrintNodeName()) == 1)) {
- addToData(rec, bpn,bluePrint,allKeyValues);
+ if ((isObjectFitCriteria(allKeyValues,
+ bpn.getBluePrintNodeName()) == 1)) {
+ addToData(rec, bpn, bluePrint, allKeyValues);
} else {
isObjectInCriteria = false;
}
if (isObjectInCriteria) {
if (root) {
- if(!rec.data.isEmpty())
+ if (!rec.data.isEmpty())
addRecord(rec.data);
} else {
result.add(rec);
@Override
public BigDecimal getBigDecimal(int columnIndex, int scale)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public BigDecimal getBigDecimal(String columnLabel, int scale)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public Object getObject(int columnIndex, Map<String, Class<?>> map)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ throws SQLException {
+ return getObject(columnIndex);
}
@Override
public Object getObject(int columnIndex) throws SQLException {
- return currentRecord
- .get(this.fieldsInQuery.get(columnIndex - 1).toString());
+ return currentRecord.get(this.fieldsInQuery.get(columnIndex - 1)
+ .toString());
}
@Override
public Object getObject(String columnLabel, Map<String, Class<?>> map)
- throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ throws SQLException {
+ return getObject(columnLabel);
}
@Override
@Override
public String getString(int columnIndex) throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ return "Kuku";
}
@Override
public String getString(String columnLabel) throws SQLException {
- // TODO Auto-generated method stub
- return null;
+ return "Kuku";
}
@Override
@Override
public Timestamp getTimestamp(int columnIndex, Calendar cal)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public Timestamp getTimestamp(String columnLabel, Calendar cal)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public int getType() throws SQLException {
- // TODO Auto-generated method stub
- return 0;
+ return ResultSet.TYPE_FORWARD_ONLY;
}
@Override
}
@Override
- public InputStream getUnicodeStream(String columnLabel)
- throws SQLException {
+ public InputStream getUnicodeStream(String columnLabel) throws SQLException {
// TODO Auto-generated method stub
return null;
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, int length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x, int length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
- public void updateAsciiStream(String columnLabel, InputStream x,
- long length)
- throws SQLException {
+ public void updateAsciiStream(String columnLabel, InputStream x, long length)
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBigDecimal(int columnIndex, BigDecimal x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBigDecimal(String columnLabel, BigDecimal x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, int length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
- public void updateBinaryStream(String columnLabel, InputStream x,
- int length)
- throws SQLException {
+ public void updateBinaryStream(String columnLabel, InputStream x, int length)
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x,
- long length) throws SQLException {
+ long length) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
}
@Override
- public void updateBlob(int columnIndex, InputStream inputStream,
- long length)
- throws SQLException {
+ public void updateBlob(int columnIndex, InputStream inputStream, long length)
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBlob(int columnIndex, InputStream inputStream)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBlob(String columnLabel, InputStream inputStream,
- long length) throws SQLException {
+ long length) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBlob(String columnLabel, InputStream inputStream)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateBoolean(String columnLabel, boolean x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, int length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(int columnIndex, Reader x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(String columnLabel, Reader reader,
- int length) throws SQLException {
+ int length) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(String columnLabel, Reader reader,
- long length) throws SQLException {
+ long length) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateCharacterStream(String columnLabel, Reader reader)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateClob(int columnIndex, Reader reader, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateClob(String columnLabel, Reader reader, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateClob(String columnLabel, Reader reader)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNCharacterStream(String columnLabel, Reader reader,
- long length) throws SQLException {
+ long length) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNCharacterStream(String columnLabel, Reader reader)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNClob(int columnIndex, Reader reader, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
- public void updateNClob(int columnIndex, Reader reader)
- throws SQLException {
+ public void updateNClob(int columnIndex, Reader reader) throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNClob(String columnLabel, NClob nClob)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNClob(String columnLabel, Reader reader, long length)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNClob(String columnLabel, Reader reader)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNString(int columnIndex, String nString)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateNString(String columnLabel, String nString)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateObject(int columnIndex, Object x, int scaleOrLength)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateObject(String columnLabel, Object x, int scaleOrLength)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateSQLXML(int columnIndex, SQLXML xmlObject)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateSQLXML(String columnLabel, SQLXML xmlObject)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateTimestamp(int columnIndex, Timestamp x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public void updateTimestamp(String columnLabel, Timestamp x)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
}
@Override
public int getColumnType(int column) throws SQLException {
- // TODO Auto-generated method stub
- return 0;
+ return 12;
}
@Override
@Override
public <T> T getObject(String columnLabel, Class<T> type)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return null;
}
-
-
- ////Metadata
-
-
+ // //Metadata
}
package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
-import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
-import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
-import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrintNode;
-import org.opendaylight.controller.md.sal.dom.xsql.XSQLColumn;
-import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
-
import java.net.ServerSocket;
import java.net.Socket;
import java.sql.SQLException;
import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrintNode;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLColumn;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
+
public class JDBCServer extends Thread {
private ServerSocket socket = null;
private XSQLAdapter adapter = null;
}
public static void execute(JDBCResultSet rs, XSQLAdapter adapter)
- throws SQLException {
- parseTables(rs, adapter.getBluePrint());
- parseFields(rs, adapter.getBluePrint());
- parseCriteria(rs, adapter.getBluePrint());
+ throws SQLException {
+ if(rs.getSQL().toLowerCase().trim().equals("select 1")){
+ rs.setFinished(true);
+ return;
+ }
+ checkAndBreakSubQueries(rs, adapter);
+ if (rs.getSubQueries().size() == 0) {
+ parseTables(rs, adapter.getBluePrint());
+ parseFields(rs, adapter.getBluePrint());
+ parseCriteria(rs, adapter.getBluePrint());
+ try {
+ adapter.execute(rs);
+ } catch (Exception err) {
+ throw new SQLException("Error", err);
+ }
+ } else {
+ parseExternalQuery(rs);
+ }
+ }
+
+ public static void parseExternalQuery(JDBCResultSet rs) throws SQLException {
+ String sql = rs.getSQL();
+ for (Map.Entry<String, JDBCResultSet> entry : rs.getSubQueries()
+ .entrySet()) {
+ int index = sql.toLowerCase().indexOf(entry.getValue().getSQL());
+ String extSql = sql.substring(0, index);
+ index = extSql.lastIndexOf("(");
+ extSql = extSql.substring(0, index);
+ System.out.println("External SQL=" + extSql);
+ parseLogicalFields(extSql, rs);
+ }
+ }
+
+ public static void parseLogicalFields(String sql, JDBCResultSet rs)
+ throws SQLException {
+ if(sql.trim().toLowerCase().equals("select * from")){
+ for (Map.Entry<String, JDBCResultSet> entry : rs.getSubQueries().entrySet()) {
+ for(XSQLBluePrintNode node:entry.getValue().getTables()){
+ rs.addTableToQuery(node);
+ }
+ rs.getFields().addAll(entry.getValue().getFields());
+ while (entry.getValue().next()) {
+ Map rec = entry.getValue().getCurrent();
+ Map newRec = new HashMap();
+ newRec.putAll(rec);
+ rs.addRecord(newRec);
+ }
+ }
+ rs.setFinished(true);
+ return;
+ }
+
+ Map<String, XSQLBluePrintNode> logicalNameToNode = new HashMap<String, XSQLBluePrintNode>();
+ Map<String, String> origNameToName = new HashMap<String, String>();
+ List<XSQLColumn> columnOrder = new ArrayList<>();
+ int nextLogField = addNextLogicalField(sql, 0,
+ logicalNameToNode, origNameToName,columnOrder);
+ int next = sql.toLowerCase().indexOf(" as ", nextLogField);
+ while (next != -1) {
+ nextLogField = addNextLogicalField(sql, nextLogField + 1,
+ logicalNameToNode, origNameToName,columnOrder);
+ next = sql.toLowerCase().indexOf(" as ", nextLogField + 1);
+ }
+
+ for (XSQLBluePrintNode node : logicalNameToNode.values()) {
+ rs.addTableToQuery(node);
+ }
+ rs.getFields().addAll(columnOrder);
+ for (Map.Entry<String, JDBCResultSet> entry : rs.getSubQueries().entrySet()) {
+ while (entry.getValue().next()) {
+ Map rec = entry.getValue().getCurrent();
+ Map newRec = new HashMap();
+ for (Iterator iter = rec.entrySet().iterator(); iter.hasNext();) {
+ Map.Entry e = (Map.Entry) iter.next();
+ String key = (String) e.getKey();
+ Object value = e.getValue();
+ String logicalKey = origNameToName.get(key);
+ if (value != null && logicalKey != null) {
+ newRec.put(logicalKey, value);
+ }
+ }
+ rs.addRecord(newRec);
+ }
+ }
+ rs.setFinished(true);
+ }
+
+ public static void main(String args[]) {
+ String sql = "SELECT DISTINCT"
+ + "\"LOGICAL_TABLE_1\".\"nodes/node.id\" AS \"COL0\"\n"
+ + ",\"LOGICAL_TABLE_1\".\"nodes/node.id\" AS \"COL1\"\n"
+ + ",\"LOGICAL_TABLE_1\".\"nodes/node.id\" AS \"COL2\"\n"
+ + "FROM\n"
+ + "(select * from nodes/node;) \"LOGICAL_TABLE_1\"\n";
+ JDBCResultSet rs = new JDBCResultSet(sql);
try {
- adapter.execute(rs);
+ parseLogicalFields(sql, rs);
} catch (Exception err) {
- throw new SQLException("Error", err);
+ err.printStackTrace();
+ }
+ }
+
+ public static int addNextLogicalField(String sql, int startIndex,
+ Map<String, XSQLBluePrintNode> logicalNameToNode,
+ Map<String, String> origNameToName, List<XSQLColumn> columnOrder) {
+ int index1 = sql.indexOf("\"", startIndex);
+ int index2 = sql.indexOf("\".\"", index1);
+ int index3 = sql.indexOf("\"", index2 + 3);
+ int index4 = sql.toLowerCase().indexOf(" as ", startIndex);
+ int index5 = sql.indexOf("\"", index4);
+ int index6 = sql.indexOf("\"", index5 + 1);
+
+ String tblName = sql.substring(index1 + 1, index2);
+ String origFieldNameFull = sql.substring(index2 + 3, index3);
+ String origTableName = "";
+ String origFieldName = "";
+ if (origFieldNameFull.indexOf(".") != -1) {
+ origTableName = origFieldNameFull.substring(0,origFieldNameFull.indexOf("."));
+ origFieldName = origFieldNameFull.substring(origFieldNameFull.indexOf(".") + 1);
+ }
+ String logicalFieldName = sql.substring(index5 + 1, index6);
+ XSQLBluePrintNode node = logicalNameToNode.get(tblName);
+ if (node == null) {
+ node = new XSQLBluePrintNode(tblName, origTableName, 0);
+ logicalNameToNode.put(tblName, node);
+ }
+ columnOrder.add(node.addColumn(logicalFieldName, tblName, origFieldName, origTableName));
+ origNameToName.put(origFieldNameFull, tblName + "." + logicalFieldName);
+ return index6;
+ }
+
+ public static void checkAndBreakSubQueries(JDBCResultSet rs,XSQLAdapter adapter) throws SQLException {
+ String sql = rs.getSQL().toLowerCase();
+ int index = sql.indexOf("select");
+ if (index == -1)
+ throw new SQLException("Select statement is missing...");
+ int index2 = sql.indexOf("select", index + 6);
+ if (index2 != -1) {
+ int startSubQuery = index2;
+ for (int i = startSubQuery; i >= 0; i--) {
+ if (sql.charAt(i) == '(') {
+ startSubQuery = i;
+ break;
+ }
+ }
+ int braketCount = 0;
+ int endSubQuery = startSubQuery;
+ do {
+ if (sql.charAt(endSubQuery) == '(')
+ braketCount++;
+ else if (sql.charAt(endSubQuery) == ')')
+ braketCount--;
+ endSubQuery++;
+ } while (braketCount > 0 || endSubQuery == sql.length());
+ String subQuerySQL = sql.substring(startSubQuery + 1,endSubQuery - 1);
+ if(rs.getSQL().toLowerCase().substring(0,startSubQuery).trim().equals("select * from")){
+ rs.setSQL(subQuerySQL);
+ return;
+ }
+ index = sql.indexOf("\"", endSubQuery);
+ index2 = sql.indexOf("\"", index + 1);
+ if(index==-1){
+ index = endSubQuery;
+ index2 = sql.length();
+ }
+ String logicalName = rs.getSQL().substring(index + 1, index2).trim();
+ JDBCResultSet subRS = rs.addSubQuery(subQuerySQL, logicalName);
+ JDBCServer.execute(subRS, adapter);
}
}
public static void parseTables(JDBCResultSet rs, XSQLBluePrint bp)
- throws SQLException {
+ throws SQLException {
String lowSQL = rs.getSQL().toLowerCase();
int from = lowSQL.indexOf("from");
int where = lowSQL.indexOf("where");
String tableName = tokens.nextToken().trim();
XSQLBluePrintNode table = bp.getBluePrintNodeByTableName(tableName);
if (table == null) {
- throw new SQLException(
- "Unknown table name \"" + tableName + "\"");
+ throw new SQLException("Unknown table name \"" + tableName
+ + "\"");
}
rs.addTableToQuery(table);
}
}
public static void addCriteria(XSQLColumn col, XSQLCriteria c,
- JDBCResultSet rs) {
- Map<XSQLColumn, List<XSQLCriteria>> tblCriteria =
- rs.getCriteria().get(col.getTableName());
+ JDBCResultSet rs) {
+ Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = rs.getCriteria().get(
+ col.getTableName());
if (tblCriteria == null) {
- tblCriteria =
- new ConcurrentHashMap<XSQLColumn, List<XSQLCriteria>>();
+ tblCriteria = new ConcurrentHashMap<XSQLColumn, List<XSQLCriteria>>();
rs.getCriteria().put(col.getTableName(), tblCriteria);
}
List<XSQLCriteria> lstCriteria = tblCriteria.get(col);
}
public static void parseFields(JDBCResultSet rs, XSQLBluePrint bp)
- throws SQLException {
+ throws SQLException {
String lowSQL = rs.getSQL().toLowerCase();
if (!lowSQL.startsWith("select")) {
throw new SQLException("Missing 'select' statement.");
return;
}
if (token.indexOf(".") != -1) {
- XSQLBluePrintNode tbl = bp.getBluePrintNodeByTableName(
- token.substring(0, token.indexOf(".")).trim());
+ XSQLBluePrintNode tbl = bp.getBluePrintNodeByTableName(token
+ .substring(0, token.indexOf(".")).trim());
String p = token.substring(token.indexOf(".") + 1);
if (p.equals("*")) {
for (XSQLColumn c : tbl.getColumns()) {
}
}
if (col == null) {
- throw new SQLException(
- "Unknown field name '" + token + "'.");
+ throw new SQLException("Unknown field name '" + token
+ + "'.");
}
rs.getFields().add(col);
String lowSQL = rs.getSQL().toLowerCase();
int where = lowSQL.indexOf("where");
int order = lowSQL.indexOf("order");
- int subQuery = lowSQL.indexOf("select", 2);
int whereTo = lowSQL.indexOf(";");
if (where == -1) {
return;
}
- if (where != -1 && subQuery != -1 && subQuery < where) {
- return;
- }
-
- if (order != -1 && subQuery != -1 && order < subQuery) {
- whereTo = order;
- } else if (order != -1 && subQuery != -1 && order > subQuery) {
- whereTo = subQuery;
- } else if (order != -1) {
+ if (order != -1) {
whereTo = order;
- } else if (subQuery != -1) {
- whereTo = subQuery;
}
- String whereStatement =
- rs.getSQL().substring(where + 5, whereTo).trim();
+
+ if(whereTo==-1)
+ whereTo=lowSQL.length();
+
+ String whereStatement = rs.getSQL().substring(where + 5, whereTo)
+ .trim();
XSQLCriteria cr = new XSQLCriteria(whereStatement, -1);
for (XSQLBluePrintNode tbl : rs.getTables()) {
for (XSQLColumn col : tbl.getColumns()) {
package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLWarning;
-import java.sql.Statement;
+import java.sql.SQLXML;
+import java.sql.Time;
+import java.sql.Timestamp;
import java.util.ArrayList;
+import java.util.Calendar;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-public class JDBCStatement implements Statement {
+public class JDBCStatement implements PreparedStatement {
private JDBCResultSet rs = null;
private transient JDBCConnection connection = null;
- private static Map<Integer, JDBCResultSet> queries =
- new ConcurrentHashMap<Integer, JDBCResultSet>();
+ private static Map<Integer, JDBCResultSet> queries = new ConcurrentHashMap<Integer, JDBCResultSet>();
+ private String sql = null;
+
+ public JDBCStatement(JDBCConnection con,String _sql) {
+ this.connection = con;
+ this.sql = _sql;
+ }
public JDBCStatement(JDBCConnection con) {
this.connection = con;
}
+ public void setSQL(String _sql){
+ this.sql = _sql;
+ }
+
public JDBCStatement() {
}
+ public PreparedStatement getProxy() {
+ return this;
+ /*
+ return (PreparedStatement) Proxy.newProxyInstance(this.getClass()
+ .getClassLoader(), new Class[] { PreparedStatement.class },
+ new JDBCProxy(this));
+ */
+ }
+
public static JDBCResultSet getQuery(int id) {
return queries.get(id);
}
rs = new JDBCResultSet(_sql);
queries.put(rs.getID(), rs);
synchronized (rs) {
- this.connection
- .send(new JDBCCommand(rs, JDBCCommand.TYPE_EXECUTE_QUERY));
+ this.connection.send(new JDBCCommand(rs,
+ JDBCCommand.TYPE_EXECUTE_QUERY));
try {
rs.wait();
} catch (Exception err) {
throw ((SQLException) rs.getError());
}
}
- return rs;
+ return rs.getProxy();
}
@Override
@Override
public boolean execute(String sql, int autoGeneratedKeys)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
- public boolean execute(String sql, int[] columnIndexes)
- throws SQLException {
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean execute(String sql, String[] columnNames)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return false;
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return 0;
}
@Override
public int executeUpdate(String sql, int[] columnIndexes)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return 0;
}
@Override
public int executeUpdate(String sql, String[] columnNames)
- throws SQLException {
+ throws SQLException {
// TODO Auto-generated method stub
return 0;
}
@Override
public int getMaxRows() throws SQLException {
- // TODO Auto-generated method stub
- return 0;
+ return 200;
}
@Override
return false;
}
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+ return this.executeQuery(this.sql);
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+ // TODO Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+ // TODO Auto-generated method stub
+ return false;
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value,
+ long length) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType,
+ int scaleOrLength) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader,
+ long length) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream)
+ throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ // TODO Auto-generated method stub
+
+ }
}
--- /dev/null
+package org.opendaylight.xsql.test;
+
+import java.io.InputStream;
+import java.sql.SQLException;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
+import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCResultSet;
+import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCServer;
+
+public class XSQLTest {
+
+ XSQLBluePrint bluePrint = null;
+
+ @Before
+ public void before() {
+ try{
+ InputStream in = this.getClass().getClassLoader().getResourceAsStream("BluePrintCache.dat");
+ if(in!=null){
+ bluePrint = XSQLBluePrint.load(in);
+ log("Loaded Blue Print!");
+ }else{
+ log("Can't find Blue Print!");
+ }
+ in.close();
+ }catch(Exception err){
+ err.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testQueryParsingSimpleNoCriteria() {
+ String sql = "select * from nodes/node;";
+ JDBCResultSet rs = new JDBCResultSet(sql);
+ parseTables(sql,bluePrint, rs);
+ parseFields(sql, bluePrint, rs);
+ JDBCServer.parseCriteria(rs, bluePrint);
+ if(rs.getCriteria().isEmpty()){
+ log("Test Criteria parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true, true);
+ }else{
+ log("Test Criteria parsing of \""+sql+"\" Failed!");
+ Assert.assertEquals(false, true);
+ }
+ }
+
+ @Test
+ public void testQueryParsingComplexNoCriteria() {
+ String sql = "select nodes/node.id,nodes/node/node-connector.id,nodes/node/node-connector.hardware-address from nodes/node,nodes/node/node-connector;";
+ JDBCResultSet rs = new JDBCResultSet(sql);
+ parseTables(sql,bluePrint, rs);
+ parseFields(sql, bluePrint, rs);
+ JDBCServer.parseCriteria(rs, bluePrint);
+ if(rs.getCriteria().isEmpty()){
+ log("Test Criteria parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true, true);
+ }else{
+ log("Test Criteria parsing of \""+sql+"\" Failed!");
+ Assert.assertEquals(false, true);
+ }
+ }
+
+ @Test
+ public void testQueryParsingComplexWithCriteria() {
+ String sql = "select nodes/node.id,nodes/node/node-connector.id,nodes/node/node-connector.hardware-address from nodes/node,nodes/node/node-connector where hardware-address like 'AB';";
+ JDBCResultSet rs = new JDBCResultSet(sql);
+ parseTables(sql,bluePrint, rs);
+ parseFields(sql, bluePrint, rs);
+ JDBCServer.parseCriteria(rs, bluePrint);
+ if(!rs.getCriteria().isEmpty()){
+ log("Test Criteria parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true, true);
+ }else{
+ log("Test Criteria parsing of \""+sql+"\" Failed!");
+ Assert.assertEquals(false, true);
+ }
+ }
+
+ @Test
+ public void testQueryParsingSimpleWithCriteria() {
+ String sql = "select * from nodes/node where nodes/node.id like 'something...';";
+ JDBCResultSet rs = new JDBCResultSet(sql);
+ parseTables(sql,bluePrint, rs);
+ parseFields(sql, bluePrint, rs);
+ JDBCServer.parseCriteria(rs, bluePrint);
+ if(!rs.getCriteria().isEmpty()){
+ log("Test Criteria parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true, true);
+ }else{
+ log("Test Criteria parsing of \""+sql+"\" Failed!");
+ Assert.assertEquals(false, true);
+ }
+ }
+
+ private static void parseTables(String sql,XSQLBluePrint bp,JDBCResultSet rs){
+ try{
+ JDBCServer.parseTables(rs, bp);
+ log("Test Table parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true,true);
+ }catch(SQLException err){
+ log("Test Table parsing of \""+sql+"\" Failed!");
+ err.printStackTrace();
+ Assert.assertEquals(false,true);
+ }
+ }
+
+ @Test
+ public void testQueryParsingComplexWithCriteriaAndGrouping() {
+
+ String sub_sql = "select nodes/node.id,nodes/node/node-connector.id,nodes/node/node-connector.hardware-address from nodes/node,nodes/node/node-connector where hardware-address like 'AB';";
+
+ String sql = "SELECT DISTINCT"
+ + "\"LOGICAL_TABLE_1\".\"nodes/node.id\" AS \"COL0\"\n"
+ + ",\"LOGICAL_TABLE_1\".\"nodes/node.address\" AS \"COL1\"\n"
+ + ",\"LOGICAL_TABLE_1\".\"nodes/node/node-connector.hardware-address\" AS \"COL2\"\n"
+ + "FROM\n"
+ + "("+sub_sql+") \"LOGICAL_TABLE_1\"\n";
+
+
+
+ JDBCResultSet rs = new JDBCResultSet(sql);
+ XSQLAdapter.getInstance().loadBluePrint();
+ try{
+ JDBCServer.checkAndBreakSubQueries(rs, XSQLAdapter.getInstance());
+ if(rs.getSubQueries().isEmpty()){
+ log("Logical table parsing for "+sql+" Failed!");
+ }else{
+ JDBCServer.parseExternalQuery(rs);
+ log("Fields="+rs.getFields().size());
+ Assert.assertEquals(rs.getFields().size(), 3);
+ Assert.assertEquals(rs.getTables().size(), 1);
+ Assert.assertEquals(rs.getTables().get(0).getODLTableName(), "LOGICAL_TABLE_1");
+
+ JDBCResultSet subRS = rs.getSubQueries().values().iterator().next();
+ parseTables(sql,bluePrint, subRS);
+ parseFields(sql, bluePrint, subRS);
+ JDBCServer.parseCriteria(subRS, bluePrint);
+ if(!subRS.getCriteria().isEmpty()){
+ log("Test Criteria parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true, true);
+ }else{
+ log("Test Criteria parsing of \""+sql+"\" Failed!");
+ Assert.assertEquals(false, true);
+ }
+ }
+ }catch(SQLException err){
+ err.printStackTrace();
+ }
+ }
+
+ private static void parseFields(String sql,XSQLBluePrint bp,JDBCResultSet rs){
+ try{
+ JDBCServer.parseFields(rs, bp);
+ log("Test Fields parsing of \""+sql+"\" Passed!");
+ Assert.assertEquals(true,true);
+ }catch(SQLException err){
+ log("Test Fields parsing of \""+sql+"\" Failed!");
+ err.printStackTrace();
+ Assert.assertEquals(false,true);
+ }
+ }
+
+ private static void log(String str) {
+ System.out.print("*** XSQL Tests -");
+ System.out.println(str);
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-inmemory-datastore</artifactId>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+
+final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
+ private final SnapshotBackedWriteTransaction transaction;
+ private final DOMStoreThreePhaseCommitCohort delegate;
+ private final DOMStoreTransactionChainImpl txChain;
+
+ protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
+ final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ this.delegate = Preconditions.checkNotNull(delegate);
+ this.txChain = Preconditions.checkNotNull(txChain);
+ }
+
+ @Override
+ public ListenableFuture<Boolean> canCommit() {
+ return delegate.canCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> preCommit() {
+ return delegate.preCommit();
+ }
+
+ @Override
+ public ListenableFuture<Void> abort() {
+ return delegate.abort();
+ }
+
+ @Override
+ public ListenableFuture<Void> commit() {
+ ListenableFuture<Void> commitFuture = delegate.commit();
+ Futures.addCallback(commitFuture, new FutureCallback<Void>() {
+ @Override
+ public void onFailure(final Throwable t) {
+ txChain.onTransactionFailed(transaction, t);
+ }
+
+ @Override
+ public void onSuccess(final Void result) {
+ txChain.onTransactionCommited(transaction);
+ }
+ });
+ return commitFuture;
+ }
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.AbstractMap.SimpleEntry;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+ private static abstract class State {
+ /**
+ * Allocate a new snapshot.
+ *
+ * @return A new snapshot
+ */
+ protected abstract DataTreeSnapshot getSnapshot();
+ }
+
+ private static final class Idle extends State {
+ private final InMemoryDOMDataStore store;
+
+ Idle(final InMemoryDOMDataStore store) {
+ this.store = Preconditions.checkNotNull(store);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ return store.takeSnapshot();
+ }
+ }
+
+ /**
+ * We have a transaction out there.
+ */
+ private static final class Allocated extends State {
+ private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
+ private final DOMStoreWriteTransaction transaction;
+ private volatile DataTreeSnapshot snapshot;
+
+ Allocated(final DOMStoreWriteTransaction transaction) {
+ this.transaction = Preconditions.checkNotNull(transaction);
+ }
+
+ public DOMStoreWriteTransaction getTransaction() {
+ return transaction;
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ final DataTreeSnapshot ret = snapshot;
+ Preconditions.checkState(ret != null, "Previous transaction %s is not ready yet", transaction.getIdentifier());
+ return ret;
+ }
+
+ void setSnapshot(final DataTreeSnapshot snapshot) {
+ final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
+ Preconditions.checkState(success, "Transaction %s has already been marked as ready", transaction.getIdentifier());
+ }
+ }
+
+ /**
+ * Chain is logically shut down, no further allocation allowed.
+ */
+ private static final class Shutdown extends State {
+ private final String message;
+
+ Shutdown(final String message) {
+ this.message = Preconditions.checkNotNull(message);
+ }
+
+ @Override
+ protected DataTreeSnapshot getSnapshot() {
+ throw new IllegalStateException(message);
+ }
+ }
+
+ private static final AtomicReferenceFieldUpdater<DOMStoreTransactionChainImpl, State> STATE_UPDATER =
+ AtomicReferenceFieldUpdater.newUpdater(DOMStoreTransactionChainImpl.class, State.class, "state");
+ private static final Logger LOG = LoggerFactory.getLogger(DOMStoreTransactionChainImpl.class);
+ private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
+ private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
+ private final InMemoryDOMDataStore store;
+ private final Idle idleState;
+ private volatile State state;
+
+ DOMStoreTransactionChainImpl(final InMemoryDOMDataStore store) {
+ this.store = Preconditions.checkNotNull(store);
+ idleState = new Idle(store);
+ state = idleState;
+ }
+
+ private Entry<State, DataTreeSnapshot> getSnapshot() {
+ final State localState = state;
+ return new SimpleEntry<>(localState, localState.getSnapshot());
+ }
+
+ private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
+ final State state = new Allocated(transaction);
+ return STATE_UPDATER.compareAndSet(this, expected, state);
+ }
+
+ @Override
+ public DOMStoreReadTransaction newReadOnlyTransaction() {
+ final Entry<State, DataTreeSnapshot> entry = getSnapshot();
+ return new SnapshotBackedReadTransaction(store.nextIdentifier(), store.getDebugTransactions(), entry.getValue());
+ }
+
+ @Override
+ public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreReadWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedReadWriteTransaction(store.nextIdentifier(),
+ store.getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+ Entry<State, DataTreeSnapshot> entry;
+ DOMStoreWriteTransaction ret;
+
+ do {
+ entry = getSnapshot();
+ ret = new SnapshotBackedWriteTransaction(store.nextIdentifier(),
+ store.getDebugTransactions(), entry.getValue(), this);
+ } while (!recordTransaction(entry.getKey(), ret));
+
+ return ret;
+ }
+
+ @Override
+ protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+ final State localState = state;
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ if (allocated.getTransaction().equals(tx)) {
+ final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
+ if (!success) {
+ LOG.info("State already transitioned from {} to {}", localState, state);
+ }
+ }
+ }
+ }
+
+ @Override
+ protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+ final State localState = state;
+
+ if (localState instanceof Allocated) {
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction transaction = allocated.getTransaction();
+ Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s", tx, transaction);
+ allocated.setSnapshot(tree);
+ } else {
+ LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
+ }
+
+ return new ChainedTransactionCommitImpl(tx, store.transactionReady(tx, tree), this);
+ }
+
+ @Override
+ public void close() {
+ final State localState = state;
+
+ do {
+ Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain {} has been closed", this);
+
+ if (FAILED.equals(localState)) {
+ LOG.debug("Ignoring user close in failed state");
+ return;
+ }
+ } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
+ }
+
+ void onTransactionFailed(final SnapshotBackedWriteTransaction transaction, final Throwable t) {
+ LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, t);
+ state = FAILED;
+ }
+
+ void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
+ // If the committed transaction was the one we allocated last,
+ // we clear it and the ready snapshot, so the next transaction
+ // allocated refers to the data tree directly.
+ final State localState = state;
+
+ if (!(localState instanceof Allocated)) {
+ LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
+ return;
+ }
+
+ final Allocated allocated = (Allocated)localState;
+ final DOMStoreWriteTransaction tx = allocated.getTransaction();
+ if (!tx.equals(transaction)) {
+ LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
+ return;
+ }
+
+ if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
+ LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle", this, localState, state);
+ }
+ }
+}
\ No newline at end of file
import static com.google.common.base.Preconditions.checkState;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.GuardedBy;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
@Override
public DOMStoreTransactionChain createTransactionChain() {
- return new DOMStoreTransactionChainImpl();
+ return new DOMStoreTransactionChainImpl(this);
}
@Override
}
}
- boolean getDebugTransactions() {
+ public final boolean getDebugTransactions() {
return debugTransactions;
}
+ final DataTreeSnapshot takeSnapshot() {
+ return dataTree.takeSnapshot();
+ }
+
@Override
public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> ListenerRegistration<L> registerChangeListener(
final YangInstanceIdentifier path, final L listener, final DataChangeScope scope) {
return new ThreePhaseCommitImpl(tx, tree);
}
- private Object nextIdentifier() {
+ Object nextIdentifier() {
return name + "-" + txCounter.getAndIncrement();
}
- private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
- @GuardedBy("this")
- private SnapshotBackedWriteTransaction allocatedTransaction;
- @GuardedBy("this")
- private DataTreeSnapshot readySnapshot;
- @GuardedBy("this")
- private boolean chainFailed = false;
-
- @GuardedBy("this")
- private void checkFailed() {
- Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
- }
-
- @GuardedBy("this")
- private DataTreeSnapshot getSnapshot() {
- checkFailed();
-
- if (allocatedTransaction != null) {
- Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
- return readySnapshot;
- } else {
- return dataTree.takeSnapshot();
- }
- }
-
- @GuardedBy("this")
- private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
- allocatedTransaction = transaction;
- readySnapshot = null;
- return transaction;
- }
-
- @Override
- public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
- final DataTreeSnapshot snapshot = getSnapshot();
- return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
- }
-
- @Override
- public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
- final DataTreeSnapshot snapshot = getSnapshot();
- return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this));
- }
-
- @Override
- public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
- final DataTreeSnapshot snapshot = getSnapshot();
- return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
- getDebugTransactions(), snapshot, this));
- }
-
- @Override
- protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
- if (tx.equals(allocatedTransaction)) {
- Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
- allocatedTransaction = null;
- }
- }
-
- @Override
- protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
- Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
- if (readySnapshot != null) {
- // The snapshot should have been cleared
- LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
- }
-
- final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
- readySnapshot = tree;
- return new ChainedTransactionCommitImpl(tx, cohort, this);
- }
-
- @Override
- public void close() {
- // FIXME: this call doesn't look right here - listeningExecutor is shared and owned
- // by the outer class.
- //listeningExecutor.shutdownNow();
- }
-
- protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
- final Throwable t) {
- chainFailed = true;
- }
-
- public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
- // If the committed transaction was the one we allocated last,
- // we clear it and the ready snapshot, so the next transaction
- // allocated refers to the data tree directly.
- if (transaction.equals(allocatedTransaction)) {
- if (readySnapshot == null) {
- LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
- }
-
- allocatedTransaction = null;
- readySnapshot = null;
- }
- }
- }
-
- private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
- private final SnapshotBackedWriteTransaction transaction;
- private final DOMStoreThreePhaseCommitCohort delegate;
- private final DOMStoreTransactionChainImpl txChain;
-
- protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
- final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
- this.transaction = transaction;
- this.delegate = delegate;
- this.txChain = txChain;
- }
-
- @Override
- public ListenableFuture<Boolean> canCommit() {
- return delegate.canCommit();
- }
-
- @Override
- public ListenableFuture<Void> preCommit() {
- return delegate.preCommit();
- }
-
- @Override
- public ListenableFuture<Void> abort() {
- return delegate.abort();
- }
-
- @Override
- public ListenableFuture<Void> commit() {
- ListenableFuture<Void> commitFuture = delegate.commit();
- Futures.addCallback(commitFuture, new FutureCallback<Void>() {
- @Override
- public void onFailure(final Throwable t) {
- txChain.onTransactionFailed(transaction, t);
- }
-
- @Override
- public void onSuccess(final Void result) {
- txChain.onTransactionCommited(transaction);
- }
- });
- return commitFuture;
- }
- }
-
- private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
+ private final class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
private final SnapshotBackedWriteTransaction transaction;
private final DataTreeModification modification;
<parent>\r
<artifactId>sal-parent</artifactId>\r
<groupId>org.opendaylight.controller</groupId>\r
- <version>1.1-SNAPSHOT</version>\r
+ <version>1.2.0-SNAPSHOT</version>\r
</parent>\r
\r
<groupId>org.opendaylight.controller</groupId>\r
<dependency>\r
<groupId>org.opendaylight.controller</groupId>\r
<artifactId>sal-dom-xsql</artifactId>\r
- <type>bundle</type>\r
- <version>1.1-SNAPSHOT</version>\r
+ <version>1.2.0-SNAPSHOT</version>\r
</dependency>\r
</dependencies>\r
\r
private String argument;
protected Object doExecute() throws Exception {
+ if(argument==null){
+ System.out.println("Nothing to do..., please specify a command.");
+ return null;
+ }
XSQLAdapter.getInstance().processCommand(new StringBuffer(argument),
System.out);
return null;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+ xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.0.0">
+
+ <command-bundle xmlns="http://karaf.apache.org/xmlns/shell/v1.1.0">
+ <command>
+ <action class="org.opendaylight.controller.xsql.xsql">
+ </action>
+ </command>
+ </command-bundle>
+</blueprint>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-netconf-connector</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-remote</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-remoterpc-connector</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-connector-config</artifactId>
<description>Configuration files for sal-rest-connector</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-connector</artifactId>
<packaging>bundle</packaging>
org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.rest.connector.rev140724.*,
</Private-Package>
<Import-Package>*,
- com.sun.jersey.spi.container.servlet</Import-Package>
+ com.sun.jersey.spi.container.servlet, org.eclipse.jetty.servlets</Import-Package>
<Web-ContextPath>/restconf</Web-ContextPath>
</instructions>
</configuration>
import org.slf4j.LoggerFactory;
class JsonToCompositeNodeReader {
- private static final Logger LOG = LoggerFactory.getLogger(JsonReader.class);
+ private static final Logger LOG = LoggerFactory.getLogger(JsonToCompositeNodeReader.class);
private static final Splitter COLON_SPLITTER = Splitter.on(':');
private JsonToCompositeNodeReader() {
}
}
+ /**
+ * Transform input value to URI instance.
+ *
+ * Input string has to be in format moduleName:localName. moduleName part is then transformed to URI instance.
+ * If moduleName part contains character like "<" or ">" then null value is returned because they
+ * aren't valid URI characters.
+ *
+ * @param jsonElementName
+ * value in format moduleName:localName
+ * @return
+ */
private static URI getNamespaceFor(final String jsonElementName) {
final Iterator<String> it = COLON_SPLITTER.split(jsonElementName).iterator();
- // The string needs to me in form "moduleName:localName"
+ // The string needs to be in form "moduleName:localName"
if (it.hasNext()) {
final String maybeURI = it.next();
if (Iterators.size(it) == 1) {
- return URI.create(maybeURI);
+ try {
+ return URI.create(maybeURI);
+ } catch (IllegalArgumentException e) {
+ LOG.debug("Value {} couldn't be interpreted as URI.", maybeURI);
+ }
}
}
}
}
- // it could be identityref Built-In Type
+ // it could be identityref Built-In Type therefore it is necessary to look at value as module_name:local_name
URI namespace = getNamespaceFor(value);
if (namespace != null) {
return new IdentityValuesDTO(namespace.toString(), getLocalNameFor(value), null, value);
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
-import javax.ws.rs.core.Response;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import javax.xml.stream.FactoryConfigurationError;
import org.opendaylight.controller.sal.restconf.impl.InstanceIdentifierContext;
import org.opendaylight.controller.sal.restconf.impl.NormalizedNodeContext;
import org.opendaylight.controller.sal.restconf.impl.RestconfDocumentedException;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorTag;
+import org.opendaylight.controller.sal.restconf.impl.RestconfError.ErrorType;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
WebApplicationException {
InstanceIdentifierContext pathContext = t.getInstanceIdentifierContext();
if (t.getData() == null) {
- throw new RestconfDocumentedException(Response.Status.NOT_FOUND);
+ throw new RestconfDocumentedException(
+ "Request could not be completed because the relevant data model content does not exist.",
+ ErrorType.APPLICATION, ErrorTag.DATA_MISSING);
}
XMLStreamWriter xmlWriter;
*/
package org.opendaylight.controller.sal.restconf.impl;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import javax.ws.rs.core.Response.Status;
import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.ws.rs.core.Response.Status;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
public class BrokerFacade {
private final static Logger LOG = LoggerFactory.getLogger(BrokerFacade.class);
private CheckedFuture<Void, TransactionCommitFailedException> deleteDataViaTransaction(
final DOMDataWriteTransaction writeTransaction, final LogicalDatastoreType datastore,
YangInstanceIdentifier path) {
- LOG.info("Delete " + datastore.name() + " via Restconf: {}", path);
+ LOG.trace("Delete " + datastore.name() + " via Restconf: {}", path);
writeTransaction.delete(datastore, path);
return writeTransaction.submit();
}
try {
- CheckedFuture<Boolean, ReadFailedException> future =
- rwTx.exists(store, currentPath);
+ CheckedFuture<Boolean, ReadFailedException> future = rwTx.exists(store, currentPath);
exists = future.checkedGet();
} catch (ReadFailedException e) {
LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
throw new IllegalStateException("Failed to read pre-existing data", e);
}
-
if (!exists && iterator.hasNext()) {
rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
}
package org.opendaylight.controller.sal.restconf.impl;
import com.google.common.base.Objects;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
import com.google.common.base.Splitter;
import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.ModifiedNodeDoesNotExistException;
import org.opendaylight.yangtools.yang.data.composite.node.schema.cnsn.parser.CnSnToNormalizedNodeParserFactory;
import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
import org.opendaylight.yangtools.yang.data.impl.NodeFactory;
broker.commitConfigurationDataDelete(normalizedII).get();
}
} catch (Exception e) {
- throw new RestconfDocumentedException("Error creating data", e);
+ final Optional<Throwable> searchedException = Iterables.tryFind(Throwables.getCausalChain(e),
+ Predicates.instanceOf(ModifiedNodeDoesNotExistException.class));
+ if (searchedException.isPresent()) {
+ throw new RestconfDocumentedException("Data specified for deleting doesn't exist.", ErrorType.APPLICATION, ErrorTag.DATA_MISSING);
+ }
+ throw new RestconfDocumentedException("Error while deleting data", e);
}
-
return Response.status(Status.OK).build();
}
<url-pattern>/*</url-pattern>
</servlet-mapping>
+ <filter>
+ <filter-name>cross-origin-restconf</filter-name>
+ <filter-class>org.eclipse.jetty.servlets.CrossOriginFilter</filter-class>
+ <init-param>
+ <param-name>allowedOrigins</param-name>
+ <param-value>*</param-value>
+ </init-param>
+ <init-param>
+ <param-name>allowedMethods</param-name>
+ <param-value>GET,POST,OPTIONS,DELETE,PUT,HEAD</param-value>
+ </init-param>
+ <init-param>
+ <param-name>allowedHeaders</param-name>
+ <param-value>origin, content-type, accept, authorization</param-value>
+ </init-param>
+ </filter>
+ <filter-mapping>
+ <filter-name>cross-origin-restconf</filter-name>
+ <url-pattern>/*</url-pattern>
+ </filter-mapping>
+
<security-constraint>
<web-resource-collection>
<web-resource-name>NB api</web-resource-name>
description
"Added input parameters to rpc create-data-change-event-subscription";
- revision "2014-7-8" {
+ revision "2014-07-08" {
}
augment "/salrmt:create-data-change-event-subscription/salrmt:input" {
}
}
-}
\ No newline at end of file
+}
assertTrue(exceptionMessage.contains("Root element of Json has to be Object"));
}
+ /**
+ * Tests case when JSON input data value is in format string1:string2 and first string contain characters "<" or ">" (invalid URI characters).
+ *
+ * During loading data it is also interpreting as data value in moduleName:localName (potential leafref value).
+ * ModuleName part is transformed to URI which causes exception which is caught and URI value is null which cause that potential value in simple node is
+ * simple string (value from JSON input) and not IdentityValueDTO instance which is used for leaf-ref candidates.
+ */
+ @Test
+ public void invalidUriCharacterInValue() {
+ final Node<?> rootNode = TestUtils.readInputToCnSn("/json-to-cnsn/invalid-uri-character-in-value.json", true,
+ JsonToCompositeNodeProvider.INSTANCE);
+
+ assertTrue(rootNode instanceof CompositeNode);
+ Node<?> lf1 = null;
+ Node<?> lf2 = null;
+ for(Node<?> child : ((CompositeNode)rootNode).getChildren()) {
+ if (child.getNodeType().getLocalName().equals("lf1")) {
+ lf1 = child;
+ } else if (child.getNodeType().getLocalName().equals("lf2")) {
+ lf2 = child;
+ }
+ }
+
+ assertNotNull(lf1);
+ assertNotNull(lf2);
+ assertTrue(lf1 instanceof SimpleNode<?>);
+ assertTrue(lf2 instanceof SimpleNode<?>);
+
+ assertEquals("module<Name:value lf1", ((SimpleNode<?>) lf1).getValue());
+ assertEquals("module>Name:value lf2", ((SimpleNode<?>) lf2).getValue());
+ }
+
}
package org.opendaylight.controller.sal.restconf.impl.test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
import com.google.common.base.Optional;
import com.google.common.util.concurrent.CheckedFuture;
import com.google.common.util.concurrent.Futures;
+import java.util.concurrent.Future;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import java.util.concurrent.Future;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
/**
* Unit tests for BrokerFacade.
*
when(wTransaction.submit()).thenReturn(expFuture);
+ NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
+
+
CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade
.commitConfigurationDataDelete(instanceID);
--- /dev/null
+{
+ "moduleName:cont":{
+ "lf1":"module<Name:value lf1",
+ "lf2":"module>Name:value lf2"
+ }
+}
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-rest-docgen</artifactId>
<instructions>
<Bundle-Name>MD SAL Rest Api Doc Generator</Bundle-Name>
<Import-Package>*,
- com.sun.jersey.spi.container.servlet</Import-Package>
+ com.sun.jersey.spi.container.servlet, org.eclipse.jetty.servlets</Import-Package>
<Bundle-Activator>org.opendaylight.controller.sal.rest.doc.DocProvider</Bundle-Activator>
<Web-ContextPath>/apidoc</Web-ContextPath>
</instructions>
resourcePath = getDataStorePath("/operational/", context);
addApis(node, apis, resourcePath, pathParams, schemaContext, false);
}
+ }
- Set<RpcDefinition> rpcs = m.getRpcs();
- for (RpcDefinition rpcDefinition : rpcs) {
- String resourcePath = getDataStorePath("/operations/", context);
- addRpcs(rpcDefinition, apis, resourcePath, schemaContext);
- }
+ Set<RpcDefinition> rpcs = m.getRpcs();
+ for (RpcDefinition rpcDefinition : rpcs) {
+ String resourcePath = getDataStorePath("/operations/", context);
+ addRpcs(rpcDefinition, apis, resourcePath, schemaContext);
}
_logger.debug("Number of APIs found [{}]", apis.size());
for (DataSchemaNode childNode : module.getChildNodes()) {
// For every container and list in the module
- processDataNodeContainer((DataNodeContainer) childNode, moduleName, models, true, schemaContext);
- processDataNodeContainer((DataNodeContainer) childNode, moduleName, models, false, schemaContext);
+ if (childNode instanceof ContainerSchemaNode || childNode instanceof ListSchemaNode) {
+ processDataNodeContainer((DataNodeContainer) childNode, moduleName, models, true, schemaContext);
+ processDataNodeContainer((DataNodeContainer) childNode, moduleName, models, false, schemaContext);
+ }
}
}
property.put(TYPE_KEY, childNode instanceof ListSchemaNode ? ARRAY_TYPE : OBJECT_TYPE);
property.put(ITEMS_KEY, items);
properties.put(childNode.getQName().getLocalName(), property);
+ } else if (childNode instanceof LeafSchemaNode){
+ JSONObject property = processLeafNode((LeafSchemaNode)childNode);
+ properties.put(childNode.getQName().getLocalName(), property);
}
}
return properties;
<url-pattern>/apis/*</url-pattern>
</servlet-mapping>
- <!--filter>
- <filter-name>CorsFilter</filter-name>
- <filter-class>org.apache.catalina.filters.CorsFilter</filter-class>
+ <filter>
+ <filter-name>cross-origin-api-doc</filter-name>
+ <filter-class>org.eclipse.jetty.servlets.CrossOriginFilter</filter-class>
<init-param>
- <param-name>cors.allowed.origins</param-name>
+ <param-name>allowedOrigins</param-name>
<param-value>*</param-value>
</init-param>
<init-param>
- <param-name>cors.allowed.methods</param-name>
- <param-value>GET,POST,HEAD,OPTIONS,PUT,DELETE</param-value>
+ <param-name>allowedMethods</param-name>
+ <param-value>GET,POST,OPTIONS,DELETE,PUT,HEAD</param-value>
</init-param>
<init-param>
- <param-name>cors.allowed.headers</param-name>
- <param-value>Content-Type,X-Requested-With,accept,authorization,
- origin,Origin,Access-Control-Request-Method,Access-Control-Request-Headers</param-value>
- </init-param>
- <init-param>
- <param-name>cors.exposed.headers</param-name>
- <param-value>Access-Control-Allow-Origin,Access-Control-Allow-Credentials</param-value>
- </init-param>
- <init-param>
- <param-name>cors.support.credentials</param-name>
- <param-value>true</param-value>
- </init-param>
- <init-param>
- <param-name>cors.preflight.maxage</param-name>
- <param-value>10</param-value>
+ <param-name>allowedHeaders</param-name>
+ <param-value>origin, content-type, accept, authorization</param-value>
</init-param>
</filter>
<filter-mapping>
- <filter-name>CorsFilter</filter-name>
- <url-pattern>/*</url-pattern>
- </filter-mapping-->
+ <filter-name>cross-origin-api-doc</filter-name>
+ <url-pattern>/apis/*</url-pattern>
+ </filter-mapping>
+
+
<security-constraint>
<web-resource-collection>
<web-resource-name>free access</web-resource-name>
--- /dev/null
+package org.opendaylight.controller.sal.rest.doc.impl;
+
+import com.google.common.base.Preconditions;
+import org.json.JSONObject;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+import java.io.File;
+import java.util.HashSet;
+import java.util.Map;
+
+
+public class ModelGeneratorTest {
+
+ private DocGenTestHelper helper;
+ private SchemaContext schemaContext;
+
+ @Before
+ public void setUp() throws Exception {
+ helper = new DocGenTestHelper();
+ helper.setUp();
+ schemaContext = new YangParserImpl().resolveSchemaContext(new HashSet<Module>(helper.getModules().values()));
+ }
+
+ @Test
+ public void testConvertToJsonSchema() throws Exception {
+
+ Preconditions.checkArgument(helper.getModules() != null, "No modules found");
+
+ ModelGenerator generator = new ModelGenerator();
+
+ for (Map.Entry<File, Module> m : helper.getModules().entrySet()) {
+ if (m.getKey().getAbsolutePath().endsWith("opflex.yang")) {
+
+ JSONObject jsonObject = generator.convertToJsonSchema(m.getValue(), schemaContext);
+ Assert.assertNotNull(jsonObject);
+ }
+ }
+
+ }
+}
\ No newline at end of file
--- /dev/null
+module opflex {
+ yang-version 1;
+
+ namespace "urn:opendaylight:groupbasedpolicy:opflex";
+ prefix "opflex";
+
+
+
+
+
+ description
+ "This module defines the group-based policy OpFlex renderer model.";
+
+ revision "2014-05-28" {
+ description
+ "Initial revision.";
+ }
+
+ typedef serialization {
+ description
+ "The serialization to use for OpFlex messages.";
+
+ type enumeration {
+ enum json {
+ description
+ "JSON 1.0 serialization.";
+ }
+ enum xml {
+ description
+ "XML serialization.";
+ }
+ enum binary {
+ description
+ "OpFlex binary serialization.";
+ }
+ }
+ }
+
+ // ******************
+ // Configuration Data
+ // ******************
+ leaf domain {
+ description
+ "The OpFlex administrative domain.";
+
+ config true;
+
+ type string;
+ }
+}
\ No newline at end of file
"Toaster module in progress.";
}
+ leaf domain {
+ description
+ "Toaster domain.";
+
+ config true;
+
+ type string;
+ }
identity toast-type {
description
"Base for all bread types supported by the toaster.
- New bread types not listed here nay be added in the
+ New bread types not listed here nay be added in the
future.";
}
"Indicates the toaster service is available";
description
"Top-level container for all toaster database objects.";
-
+
leaf testToasterBits {
type bits {
bit testbit1 {
}
default "testbit2";
}
-
+
leaf testUnion {
type union {
type int32;
type string;
}
-
- }
-
+
+ }
+
leaf-list allow-user {
type string;
description "A list of user name patterns to allow";
-
+
}
-
+
choice how {
default interval;
case interval {
type string;
}
}
- }
-
+ }
+
leaf toasterManufacturer {
type DisplayString;
config false;
mandatory true;
description
- "The name of the toaster's manufacturer. For instance,
+ "The name of the toaster's manufacturer. For instance,
Microsoft Toaster.";
}
config false;
mandatory true;
description
- "This variable indicates the current state of
+ "This variable indicates the current state of
the toaster.";
}
}
rpc make-toast {
description
"Make some toast.
- The toastDone notification will be sent when
+ The toastDone notification will be sent when
the toast is finished.
An 'in-use' error will be returned if toast
is already being made.
- A 'resource-denied' error will be returned
+ A 'resource-denied' error will be returned
if the toaster service is disabled.";
input {
leaf toasterDoneness {
}
default '5';
description
- "This variable controls how well-done is the
+ "This variable controls how well-done is the
ensuing toast. It should be on a scale of 1 to 10.
- Toast made at 10 generally is considered unfit
- for human consumption; toast made at 1 is warmed
+ Toast made at 10 generally is considered unfit
+ for human consumption; toast made at 1 is warmed
lightly.";
}
}
default 'wheat-bread';
description
- "This variable informs the toaster of the type of
- material that is being toasted. The toaster
- uses this information, combined with
- toasterDoneness, to compute for how
- long the material must be toasted to achieve
+ "This variable informs the toaster of the type of
+ material that is being toasted. The toaster
+ uses this information, combined with
+ toasterDoneness, to compute for how
+ long the material must be toasted to achieve
the required doneness.";
}
}
- }
+ }
rpc cancel-toast {
description
"Stop making toast, if any is being made.
- A 'resource-denied' error will be returned
+ A 'resource-denied' error will be returned
if the toaster service is disabled.";
- }
-
+ }
+
notification toastDone {
description
"Indicates that the toast in progress has completed.";
description
"Indicates the final toast status";
}
- }
- }
+ }
+ }
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sal-restconf-broker</artifactId>
<packaging>bundle</packaging>
<parent>
<artifactId>sal-parent</artifactId>
<groupId>org.opendaylight.controller</groupId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-config</artifactId>
<packaging>jar</packaging>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-model</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it</artifactId>
<packaging>pom</packaging>
<parent>
<artifactId>clustering-it</artifactId>
<groupId>org.opendaylight.controller.samples</groupId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>clustering-it-provider</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
</parent>
<groupId>org.opendaylight.controller.samples.l2switch.md</groupId>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../..</relativePath>
</parent>
<groupId>org.opendaylight.controller.samples.l2switch.md</groupId>
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>org.opendaylight.controller.samples</groupId>
+ <artifactId>sal-samples</artifactId>
+ <version>1.2.0-SNAPSHOT</version>
+ </parent>
<artifactId>l2switch.aggregator</artifactId>
<groupId>org.opendaylight.controller.samples.l2switch</groupId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<modules>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.samples</groupId>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>toaster-config</artifactId>
<description>Configuration files for toaster</description>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-consumer</artifactId>
<packaging>bundle</packaging>
<properties>
- <sal-binding-api.version>1.1-SNAPSHOT</sal-binding-api.version>
+ <sal-binding-api.version>1.2.0-SNAPSHOT</sal-binding-api.version>
</properties>
<dependencies>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-it</artifactId>
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster-provider</artifactId>
<packaging>bundle</packaging>
<properties>
- <sal-binding-api.version>1.1-SNAPSHOT</sal-binding-api.version>
+ <sal-binding-api.version>1.2.0-SNAPSHOT</sal-binding-api.version>
</properties>
<dependencies>
{
darknessFactor.set( darkness );
}
+
+ LOG.info("onDataChanged - new Toaster config: {}", toaster);
}
}
<parent>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>sal-samples</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<artifactId>sample-toaster</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>statistics-manager</artifactId>
<artifactId>junit</artifactId>
</dependency>
<dependency>
- <groupId>org.eclipse.xtend</groupId>
- <artifactId>org.eclipse.xtend.lib</artifactId>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>sal-binding-broker-impl</artifactId>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<configuration>
<instructions>
<Bundle-Activator>org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator</Bundle-Activator>
- <Private-Package>org.opendaylight.controller.md.statistics.manager</Private-Package>
</instructions>
</configuration>
</plugin>
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-abstract class AbstractListeningStatsTracker<I, K> extends AbstractStatsTracker<I, K> implements AutoCloseable, DataChangeListener {
- private static final Logger logger = LoggerFactory.getLogger(AbstractListeningStatsTracker.class);
- private ListenerRegistration<?> reg;
-
- protected AbstractListeningStatsTracker(FlowCapableContext context) {
- super(context);
- }
-
- protected abstract InstanceIdentifier<?> listenPath();
- protected abstract String statName();
-
- public void start(final DataBrokerService dbs) {
- Preconditions.checkState(reg == null);
-
- reg = dbs.registerDataChangeListener(listenPath(), this);
- logger.debug("{} Statistics tracker for node {} started", statName(), getNodeIdentifier());
- }
-
- @Override
- public final void close() {
- if (reg != null) {
- try {
- reg.close();
- } catch (Exception e) {
- logger.warn("Failed to stop {} Statistics tracker for node {}", statName(), getNodeIdentifier(), e);
- }
- reg = null;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.Future;
-
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.InstanceIdentifierBuilder;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.JdkFutureAdapters;
-
-abstract class AbstractStatsTracker<I, K> {
- private static final Logger logger = LoggerFactory.getLogger(AbstractStatsTracker.class);
-
- private static final int WAIT_FOR_REQUEST_CYCLE = 2;
-
- private final FutureCallback<RpcResult<? extends TransactionAware>> callback =
- new FutureCallback<RpcResult<? extends TransactionAware>>() {
- @Override
- public void onSuccess(RpcResult<? extends TransactionAware> result) {
- if (result.isSuccessful()) {
- final TransactionId id = result.getResult().getTransactionId();
- if (id == null) {
- final Throwable t = new UnsupportedOperationException("No protocol support");
- t.fillInStackTrace();
- onFailure(t);
- } else {
- context.registerTransaction(id);
- }
- } else {
- logger.debug("Statistics request failed: {}", result.getErrors());
-
- final Throwable t = new RPCFailedException("Failed to send statistics request", result.getErrors());
- t.fillInStackTrace();
- onFailure(t);
- }
- }
-
- @Override
- public void onFailure(Throwable t) {
- logger.debug("Failed to send statistics request", t);
- }
- };
-
- private final Map<K, Long> trackedItems = new HashMap<>();
- private final FlowCapableContext context;
- private long requestCounter;
-
- protected AbstractStatsTracker(final FlowCapableContext context) {
- this.context = Preconditions.checkNotNull(context);
- this.requestCounter = 0;
- }
-
- protected final InstanceIdentifierBuilder<Node> getNodeIdentifierBuilder() {
- return getNodeIdentifier().builder();
- }
-
- protected final NodeRef getNodeRef() {
- return context.getNodeRef();
- }
-
- protected final InstanceIdentifier<Node> getNodeIdentifier() {
- return context.getNodeIdentifier();
- }
-
- protected final <T extends TransactionAware> void requestHelper(Future<RpcResult<T>> future) {
- Futures.addCallback(JdkFutureAdapters.listenInPoolThread(future), callback);
- }
-
- protected final DataModificationTransaction startTransaction() {
- return context.startDataModification();
- }
-
- public final synchronized void increaseRequestCounter(){
- this.requestCounter++;
- }
- protected abstract void cleanupSingleStat(DataModificationTransaction trans, K item);
- protected abstract K updateSingleStat(DataModificationTransaction trans, I item);
- protected abstract K createInvariantKey(K item);
- public abstract void request();
-
- public final synchronized void updateStats(List<I> list) {
-
- final DataModificationTransaction trans = startTransaction();
- for (final I item : list) {
- K key = updateSingleStat(trans, item);
- trackedItems.put(createInvariantKey(key), requestCounter);
- }
-
- trans.commit();
- }
-
- /**
- * Statistics will be cleaned up if not update in last two request cycles.
- * @param trans
- */
- public final synchronized void cleanup(final DataModificationTransaction trans) {
- for (Iterator<Entry<K, Long>> it = trackedItems.entrySet().iterator();it.hasNext();){
- Entry<K, Long> e = it.next();
- if (requestCounter >= e.getValue()+WAIT_FOR_REQUEST_CYCLE) {
- cleanupSingleStat(trans, e.getKey());
- it.remove();
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Interface exposed to AbstractStatsTracker by its parent NodeStatisticsHandler.
- * While we could simply exist without this interface, its purpose is to document
- * the contract between the two classes.
- */
-interface FlowCapableContext {
- InstanceIdentifier<Node> getNodeIdentifier();
- NodeRef getNodeRef();
- DataModificationTransaction startDataModification();
- void registerTransaction(TransactionId id);
- void registerTableTransaction(TransactionId id, Short tableId);
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Collection;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Collections2;
-import com.google.common.collect.Sets;
-
-/**
- * There is a single instance of this class and that instance is responsible for
- * monitoring the operational data store for nodes being created/deleted and
- * notifying StatisticsProvider. These events then control the lifecycle of
- * NodeStatisticsHandler for a particular switch.
- */
-final class FlowCapableTracker implements DataChangeListener {
- private static final Logger logger = LoggerFactory.getLogger(FlowCapableTracker.class);
-
- private final InstanceIdentifier<FlowCapableNode> root;
- private final StatisticsProvider stats;
-
- private final Predicate<InstanceIdentifier<?>> filterIdentifiers = new Predicate<InstanceIdentifier<?>>() {
- @Override
- public boolean apply(final InstanceIdentifier<?> input) {
- /*
- * This notification has been triggered either by the ancestor,
- * descendant or directly for the FlowCapableNode itself. We
- * are not interested descendants, so let's prune them based
- * on the depth of their identifier.
- */
- if (root.getPath().size() < input.getPath().size()) {
- logger.debug("Ignoring notification for descendant {}", input);
- return false;
- }
-
- logger.debug("Including notification for {}", input);
- return true;
- }
- };
-
- public FlowCapableTracker(final StatisticsProvider stats, InstanceIdentifier<FlowCapableNode> root) {
- this.stats = Preconditions.checkNotNull(stats);
- this.root = Preconditions.checkNotNull(root);
- }
-
- /*
- * This method is synchronized because we want to make sure to serialize input
- * from the datastore. Competing add/remove could be problematic otherwise.
- */
- @Override
- public synchronized void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- logger.debug("Tracker at root {} processing notification", root);
-
- /*
- * First process all the identifiers which were removed, trying to figure out
- * whether they constitute removal of FlowCapableNode.
- */
- final Collection<NodeKey> removedNodes =
- Collections2.filter(Collections2.transform(
- Sets.filter(change.getRemovedOperationalData(), filterIdentifiers),
- new Function<InstanceIdentifier<?>, NodeKey>() {
- @Override
- public NodeKey apply(final InstanceIdentifier<?> input) {
- final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
- if (key == null) {
- // FIXME: do we have a backup plan?
- logger.info("Failed to extract node key from {}", input);
- }
- return key;
- }
- }), Predicates.notNull());
- stats.stopNodeHandlers(removedNodes);
-
- final Collection<NodeKey> addedNodes =
- Collections2.filter(Collections2.transform(
- Sets.filter(change.getCreatedOperationalData().keySet(), filterIdentifiers),
- new Function<InstanceIdentifier<?>, NodeKey>() {
- @Override
- public NodeKey apply(final InstanceIdentifier<?> input) {
- final NodeKey key = input.firstKeyOf(Node.class, NodeKey.class);
- if (key == null) {
- // FIXME: do we have a backup plan?
- logger.info("Failed to extract node key from {}", input);
- }
- return key;
- }
- }), Predicates.notNull());
- stats.startNodeHandlers(addedNodes);
-
- logger.debug("Tracker at root {} finished processing notification", root);
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-
-final class FlowStatsEntry {
- private final Short tableId;
- private final Flow flow;
-
- public FlowStatsEntry(Short tableId, Flow flow){
- this.tableId = tableId;
- this.flow = flow;
- }
-
- public Short getTableId() {
- return tableId;
- }
-
- public Flow getFlow() {
- return flow;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((flow == null) ? 0 : flow.hashCode());
- result = prime * result + ((tableId == null) ? 0 : tableId.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- FlowStatsEntry other = (FlowStatsEntry) obj;
- if (flow == null) {
- if (other.flow != null)
- return false;
- } else if (!flow.equals(other.flow))
- return false;
- if (tableId == null) {
- if (other.tableId != null)
- return false;
- } else if (!tableId.equals(other.tableId))
- return false;
- return true;
- }
-
- @Override
- public String toString() {
- return "FlowStatsEntry [tableId=" + tableId + ", flow=" + flow + "]";
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.math.BigInteger;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCookieMapping;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowCookieMap;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowCookieMapBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowCookieMapKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.statistics.FlowStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.statistics.types.rev130925.GenericStatistics;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-
-final class FlowStatsTracker extends AbstractListeningStatsTracker<FlowAndStatisticsMapList, FlowStatsEntry> {
- private static final Logger LOG = LoggerFactory.getLogger(FlowStatsTracker.class);
- private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
- private final OpendaylightFlowStatisticsService flowStatsService;
- private FlowTableStatsTracker flowTableStats;
- private int unaccountedFlowsCounter = 1;
-
-
- FlowStatsTracker(final OpendaylightFlowStatisticsService flowStatsService, final FlowCapableContext context) {
- super(context);
- this.flowStatsService = flowStatsService;
- }
- FlowStatsTracker(final OpendaylightFlowStatisticsService flowStatsService, final FlowCapableContext context, final FlowTableStatsTracker flowTableStats) {
- this(flowStatsService, context);
- this.flowTableStats = flowTableStats;
- }
-
- @Override
- protected void cleanupSingleStat(final DataModificationTransaction trans, final FlowStatsEntry item) {
- KeyedInstanceIdentifier<Flow, FlowKey> flowRef = getNodeIdentifier()
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(item.getTableId()))
- .child(Flow.class, item.getFlow().getKey());
- trans.removeOperationalData(flowRef);
- }
-
- @Override
- protected FlowStatsEntry updateSingleStat(final DataModificationTransaction trans, final FlowAndStatisticsMapList map) {
- short tableId = map.getTableId();
-
- FlowStatisticsDataBuilder flowStatisticsData = new FlowStatisticsDataBuilder();
-
- FlowBuilder flowBuilder = new FlowBuilder(map);
- if (map.getFlowId() != null) {
- flowBuilder.setId(new FlowId(map.getFlowId().getValue()));
- }
- if (map.getFlowId() != null) {
- flowBuilder.setKey(new FlowKey(new FlowId(map.getKey().getFlowId().getValue())));
- }
-
- Flow flowRule = flowBuilder.build();
-
- FlowAndStatisticsMapListBuilder stats = new FlowAndStatisticsMapListBuilder();
- stats.setByteCount(map.getByteCount());
- stats.setPacketCount(map.getPacketCount());
- stats.setDuration(map.getDuration());
-
- GenericStatistics flowStats = stats.build();
-
- //Augment the data to the flow node
-
- FlowStatisticsBuilder flowStatistics = new FlowStatisticsBuilder();
- flowStatistics.setByteCount(flowStats.getByteCount());
- flowStatistics.setPacketCount(flowStats.getPacketCount());
- flowStatistics.setDuration(flowStats.getDuration());
-
- flowStatisticsData.setFlowStatistics(flowStatistics.build());
-
- LOG.debug("Flow : {}",flowRule.toString());
- LOG.debug("Statistics to augment : {}",flowStatistics.build().toString());
-
- InstanceIdentifier<Table> tableRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId)).toInstance();
-
- final FlowCookie flowCookie = flowRule.getCookie() != null
- ? flowRule.getCookie() : new FlowCookie(BigInteger.ZERO);
- final InstanceIdentifier<FlowCookieMap> flowCookieRef = tableRef
- .augmentation(FlowCookieMapping.class)
- .child(FlowCookieMap.class, new FlowCookieMapKey(flowCookie));
-
- FlowCookieMap cookieMap = (FlowCookieMap) trans.readOperationalData(flowCookieRef);
-
- /* find flowKey in FlowCookieMap from DataStore/OPERATIONAL */
- Optional<FlowKey> flowKey = this.getExistFlowKey(flowRule, tableRef, trans, cookieMap);
- if ( ! flowKey.isPresent()) {
- /* DataStore/CONFIG For every first statistic needs to be created */
- flowKey = this.getFlowKeyFromExistFlow(flowRule, tableRef, trans);
- if ( ! flowKey.isPresent()) {
- /* Alien flow */
- flowKey = this.makeAlienFlowKey(flowRule);
- }
- cookieMap = applyNewFlowKey(cookieMap, flowKey, flowCookie);
- trans.putOperationalData(flowCookieRef, cookieMap);
- }
-
- InstanceIdentifier<Flow> flowRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class)
- .child(Table.class, new TableKey(tableId))
- .child(Flow.class, flowKey.get()).toInstance();
- flowBuilder.setKey(flowKey.get());
- flowBuilder.addAugmentation(FlowStatisticsData.class, flowStatisticsData.build());
-
- // Update entry with timestamp of latest response
- flowBuilder.setKey(flowKey.get());
- FlowStatsEntry flowStatsEntry = new FlowStatsEntry(tableId, flowBuilder.build());
- trans.putOperationalData(flowRef, flowBuilder.build());
- return flowStatsEntry;
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().augmentation(FlowCapableNode.class).child(Table.class).child(Flow.class).build();
- }
-
- @Override
- protected String statName() {
- return "Flow";
- }
-
- @Override
- public void request() {
- // FIXME: it does not make sense to trigger this before sendAllFlowTablesStatisticsRequest()
- // comes back -- we do not have any tables anyway.
- final Collection<TableKey> tables = flowTableStats.getTables();
- LOG.debug("Node {} supports {} table(s)", this.getNodeRef(), tables.size());
- for (final TableKey key : tables) {
- LOG.debug("Send aggregate stats request for flow table {} to node {}", key.getId(), this.getNodeRef());
- this.requestAggregateFlows(key);
- }
-
- this.requestAllFlowsAllTables();
-
- }
- public void requestAllFlowsAllTables() {
- if (flowStatsService != null) {
- final GetAllFlowsStatisticsFromAllFlowTablesInputBuilder input = new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(flowStatsService.getAllFlowsStatisticsFromAllFlowTables(input.build()));
- }
- }
-
- public void requestAggregateFlows(final TableKey key) {
- if (flowStatsService != null) {
- GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder input =
- new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
-
- input.setNode(getNodeRef());
- input.setTableId(new org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId(key.getId()));
- requestHelper(flowStatsService.getAggregateFlowStatisticsFromFlowTableForAllFlows(input.build()));
- }
- }
-
- public void requestFlow(final Flow flow) {
- if (flowStatsService != null) {
- final GetFlowStatisticsFromFlowTableInputBuilder input =
- new GetFlowStatisticsFromFlowTableInputBuilder(flow);
- input.setNode(getNodeRef());
-
- requestHelper(flowStatsService.getFlowStatisticsFromFlowTable(input.build()));
- }
- }
-
- @Override
- public void onDataChanged(final DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- for (Entry<InstanceIdentifier<?>, DataObject> e : change.getCreatedConfigurationData().entrySet()) {
- if (Flow.class.equals(e.getKey().getTargetType())) {
- final Flow flow = (Flow) e.getValue();
- LOG.debug("Key {} triggered request for flow {}", e.getKey(), flow);
- requestFlow(flow);
- } else {
- LOG.debug("Ignoring key {}", e.getKey());
- }
- }
-
- final DataModificationTransaction trans = startTransaction();
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Flow.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- final InstanceIdentifier<Flow> flow = (InstanceIdentifier<Flow>)key;
- LOG.debug("Key {} triggered remove of Flow from operational space.", key);
- trans.removeOperationalData(flow);
- }
- }
- trans.commit();
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (flowStatsService == null) {
- LOG.debug("No Flow Statistics service, not subscribing to flows on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- /* Returns Exist FlowKey from exist FlowCookieMap identified by cookie
- * and by switch flow identification (priority and match)*/
- private Optional<FlowKey> getExistFlowKey(final Flow flowRule, final InstanceIdentifier<Table> tableRef,
- final DataModificationTransaction trans, final FlowCookieMap cookieMap) {
-
- if (cookieMap != null) {
- for (FlowId flowId : cookieMap.getFlowIds()) {
- InstanceIdentifier<Flow> flowIdent = tableRef.child(Flow.class, new FlowKey(flowId));
- if (flowId.getValue().startsWith(ALIEN_SYSTEM_FLOW_ID)) {
- LOG.debug("Search for flow in the operational datastore by flowID: {} ", flowIdent);
- Flow readedFlow = (Flow) trans.readOperationalData(flowIdent);
- if (FlowComparator.flowEquals(flowRule, readedFlow)) {
- return Optional.<FlowKey> of(new FlowKey(flowId));
- }
- } else {
- LOG.debug("Search for flow in the configuration datastore by flowID: {} ", flowIdent);
- Flow readedFlow = (Flow) trans.readConfigurationData(flowIdent);
- if (FlowComparator.flowEquals(flowRule, readedFlow)) {
- return Optional.<FlowKey> of(new FlowKey(flowId));
- }
- }
- }
- LOG.debug("Flow was not found in the datastore. Flow {} ", flowRule);
- }
- return Optional.absent();
- }
-
- /* Returns FlowKey from existing Flow in DataStore/CONFIGURATIONAL which is identified by cookie
- * and by switch flow identification (priority and match) */
- private Optional<FlowKey> getFlowKeyFromExistFlow(final Flow flowRule, final InstanceIdentifier<Table> tableRef,
- final DataModificationTransaction trans) {
-
- /* Try to find it in DataSotre/CONFIG */
- Table table= (Table)trans.readConfigurationData(tableRef);
- if(table != null) {
- for(Flow existingFlow : table.getFlow()) {
- LOG.debug("Existing flow in data store : {}",existingFlow.toString());
- if(FlowComparator.flowEquals(flowRule,existingFlow)){
- return Optional.<FlowKey> of(new FlowKey(existingFlow.getId()));
- }
- }
- }
- return Optional.absent();
- }
-
- /* Returns FlowKey which doesn't exist in any DataStore for now */
- private Optional<FlowKey> makeAlienFlowKey(final Flow flowRule) {
-
- StringBuilder sBuilder = new StringBuilder(ALIEN_SYSTEM_FLOW_ID)
- .append(flowRule.getTableId()).append("-").append(this.unaccountedFlowsCounter);
- this.unaccountedFlowsCounter++;
- final FlowId flowId = new FlowId(sBuilder.toString());
- return Optional.<FlowKey> of(new FlowKey(flowId));
- }
-
- /* Build new whole FlowCookieMap or add new flowKey */
- private FlowCookieMap applyNewFlowKey(FlowCookieMap flowCookieMap, final Optional<FlowKey> flowKey,
- final FlowCookie flowCookie) {
- if (flowCookieMap != null) {
- flowCookieMap.getFlowIds().add(flowKey.get().getId());
- } else {
- final FlowCookieMapBuilder flowCookieMapBuilder = new FlowCookieMapBuilder();
- flowCookieMapBuilder.setCookie(flowCookie);
- flowCookieMapBuilder.setFlowIds(Collections.singletonList(flowKey.get().getId()));
- flowCookieMap = flowCookieMapBuilder.build();
- }
- return flowCookieMap;
- }
-
- @Override
- protected FlowStatsEntry createInvariantKey(final FlowStatsEntry item) {
- FlowBuilder newFlow = new FlowBuilder();
- newFlow.setId(item.getFlow().getId());
- newFlow.setKey(item.getFlow().getKey());
- newFlow.fieldsFrom(item.getFlow());
- return new FlowStatsEntry(item.getTableId(),newFlow.build());
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMapBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatisticsBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-final class FlowTableStatsTracker extends AbstractStatsTracker<FlowTableAndStatisticsMap, FlowTableAndStatisticsMap> {
- private final Set<TableKey> privateTables = new ConcurrentSkipListSet<>();
- private final Set<TableKey> tables = Collections.unmodifiableSet(privateTables);
- private final OpendaylightFlowTableStatisticsService flowTableStatsService;
-
- FlowTableStatsTracker(OpendaylightFlowTableStatisticsService flowTableStatsService, final FlowCapableContext context) {
- super(context);
- this.flowTableStatsService = flowTableStatsService;
- }
-
- Set<TableKey> getTables() {
- return tables;
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, FlowTableAndStatisticsMap item) {
- // TODO: do we want to do this?
- }
-
- @Override
- protected FlowTableAndStatisticsMap updateSingleStat(DataModificationTransaction trans, FlowTableAndStatisticsMap item) {
-
- InstanceIdentifier<Table> tableRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(item.getTableId().getValue())).build();
-
- FlowTableStatisticsDataBuilder statisticsDataBuilder = new FlowTableStatisticsDataBuilder();
- final FlowTableStatistics stats = new FlowTableStatisticsBuilder(item).build();
- statisticsDataBuilder.setFlowTableStatistics(stats);
-
- TableBuilder tableBuilder = new TableBuilder();
- tableBuilder.setKey(new TableKey(item.getTableId().getValue()));
- tableBuilder.addAugmentation(FlowTableStatisticsData.class, statisticsDataBuilder.build());
- trans.putOperationalData(tableRef, tableBuilder.build());
- return item;
- }
-
- @Override
- public void request() {
- if (flowTableStatsService != null) {
- final GetFlowTablesStatisticsInputBuilder input = new GetFlowTablesStatisticsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(flowTableStatsService.getFlowTablesStatistics(input.build()));
- }
- }
-
- @Override
- protected FlowTableAndStatisticsMap createInvariantKey(FlowTableAndStatisticsMap item) {
- FlowTableAndStatisticsMapBuilder flowTableAndStatisticsMapBuilder = new FlowTableAndStatisticsMapBuilder();
- flowTableAndStatisticsMapBuilder.setTableId(item.getTableId());
- flowTableAndStatisticsMapBuilder.setKey(item.getKey());
- return flowTableAndStatisticsMapBuilder.build();
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStatsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.desc.GroupDescBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class GroupDescStatsTracker extends AbstractListeningStatsTracker<GroupDescStats, GroupDescStats> {
- private static final Logger logger = LoggerFactory.getLogger(GroupDescStatsTracker.class);
- private final OpendaylightGroupStatisticsService groupStatsService;
-
- public GroupDescStatsTracker(OpendaylightGroupStatisticsService groupStatsService, final FlowCapableContext context) {
- super(context);
- this.groupStatsService = groupStatsService;
- }
-
- @Override
- protected GroupDescStats updateSingleStat(DataModificationTransaction trans, GroupDescStats item) {
- GroupBuilder groupBuilder = new GroupBuilder();
- GroupKey groupKey = new GroupKey(item.getGroupId());
- groupBuilder.setKey(groupKey);
-
- InstanceIdentifier<Group> groupRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class).child(Group.class,groupKey).build();
-
- NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
- groupDesc.setGroupDesc(new GroupDescBuilder(item).build());
-
- //Update augmented data
- groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
-
- trans.putOperationalData(groupRef, groupBuilder.build());
- return item;
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, GroupDescStats item) {
- InstanceIdentifier<NodeGroupDescStats> groupRef = getNodeIdentifierBuilder().augmentation(FlowCapableNode.class)
- .child(Group.class, new GroupKey(item.getGroupId())).augmentation(NodeGroupDescStats.class).build();
- trans.removeOperationalData(groupRef);
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().augmentation(FlowCapableNode.class).child(Group.class).build();
- }
-
- @Override
- protected String statName() {
- return "Group Descriptor";
- }
-
- @Override
- public void request() {
- if (groupStatsService != null) {
- final GetGroupDescriptionInputBuilder input = new GetGroupDescriptionInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(groupStatsService.getGroupDescription(input.build()));
- }
- }
-
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- for (InstanceIdentifier<?> key : change.getCreatedConfigurationData().keySet()) {
- if (Group.class.equals(key.getTargetType())) {
- logger.debug("Key {} triggered request", key);
- request();
- } else {
- logger.debug("Ignoring key {}", key);
- }
- }
-
- final DataModificationTransaction trans = startTransaction();
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Group.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- InstanceIdentifier<Group> group = (InstanceIdentifier<Group>)key;
- InstanceIdentifier<?> del = group.augmentation(NodeGroupDescStats.class);
- logger.debug("Key {} triggered remove of augmentation {}", key, del);
-
- trans.removeOperationalData(del);
- }
- }
- trans.commit();
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (groupStatsService == null) {
- logger.debug("No Group Statistics service, not subscribing to groups on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- @Override
- protected GroupDescStats createInvariantKey(GroupDescStats item) {
- // No invariant data exist in the group description stats.
- return item;
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStatsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-final class GroupStatsTracker extends AbstractListeningStatsTracker<GroupStats, GroupStats> {
- private static final Logger logger = LoggerFactory.getLogger(GroupStatsTracker.class);
- private final OpendaylightGroupStatisticsService groupStatsService;
-
- GroupStatsTracker(OpendaylightGroupStatisticsService groupStatsService, FlowCapableContext context) {
- super(context);
- this.groupStatsService = Preconditions.checkNotNull(groupStatsService);
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, GroupStats item) {
- InstanceIdentifier<NodeGroupStatistics> groupRef = getNodeIdentifierBuilder().augmentation(FlowCapableNode.class)
- .child(Group.class, new GroupKey(item.getGroupId())).augmentation(NodeGroupStatistics.class).build();
- trans.removeOperationalData(groupRef);
- }
-
- @Override
- protected GroupStats updateSingleStat(DataModificationTransaction trans,
- GroupStats item) {
- GroupBuilder groupBuilder = new GroupBuilder();
- GroupKey groupKey = new GroupKey(item.getGroupId());
- groupBuilder.setKey(groupKey);
-
- InstanceIdentifier<Group> groupRef = getNodeIdentifierBuilder().augmentation(FlowCapableNode.class)
- .child(Group.class,groupKey).build();
-
- NodeGroupStatisticsBuilder groupStatisticsBuilder= new NodeGroupStatisticsBuilder();
- groupStatisticsBuilder.setGroupStatistics(new GroupStatisticsBuilder(item).build());
-
- //Update augmented data
- groupBuilder.addAugmentation(NodeGroupStatistics.class, groupStatisticsBuilder.build());
- trans.putOperationalData(groupRef, groupBuilder.build());
- return item;
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().augmentation(FlowCapableNode.class).child(Group.class).build();
- }
-
- @Override
- protected String statName() {
- return "Group";
- }
-
- @Override
- public void request() {
- final GetAllGroupStatisticsInputBuilder input = new GetAllGroupStatisticsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(groupStatsService.getAllGroupStatistics(input.build()));
- }
-
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- final DataModificationTransaction trans = startTransaction();
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Group.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- InstanceIdentifier<Group> group = (InstanceIdentifier<Group>)key;
- InstanceIdentifier<?> del = group.augmentation(NodeGroupStatistics.class);
- logger.debug("Key {} triggered remove of augmentation {}", key, del);
-
- trans.removeOperationalData(del);
- }
- }
- trans.commit();
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (groupStatsService == null) {
- logger.debug("No Group Statistics service, not subscribing to groups on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- @Override
- protected GroupStats createInvariantKey(GroupStats item) {
- GroupStatsBuilder groupStatsBuilder = new GroupStatsBuilder();
- groupStatsBuilder.setKey(item.getKey());
- groupStatsBuilder.setGroupId(item.getGroupId());
- return groupStatsBuilder.build();
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStatsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterConfigStatsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class MeterConfigStatsTracker extends AbstractListeningStatsTracker<MeterConfigStats, MeterConfigStats> {
- private static final Logger logger = LoggerFactory.getLogger(MeterConfigStatsTracker.class);
- private final OpendaylightMeterStatisticsService meterStatsService;
-
- protected MeterConfigStatsTracker(OpendaylightMeterStatisticsService meterStatsService, final FlowCapableContext context) {
- super(context);
- this.meterStatsService = meterStatsService;
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, MeterConfigStats item) {
- InstanceIdentifier<NodeMeterConfigStats> meterRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class)
- .child(Meter.class, new MeterKey(item.getMeterId()))
- .augmentation(NodeMeterConfigStats.class).build();
- trans.removeOperationalData(meterRef);
- }
-
- @Override
- protected MeterConfigStats updateSingleStat(DataModificationTransaction trans, MeterConfigStats item) {
- MeterBuilder meterBuilder = new MeterBuilder();
- MeterKey meterKey = new MeterKey(item.getMeterId());
- meterBuilder.setKey(meterKey);
-
- InstanceIdentifier<Meter> meterRef = getNodeIdentifierBuilder().augmentation(FlowCapableNode.class)
- .child(Meter.class,meterKey).toInstance();
-
- NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
- meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(item).build());
-
- //Update augmented data
- meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
-
- trans.putOperationalData(meterRef, meterBuilder.build());
- return item;
- }
-
- @Override
- public void request() {
- if (meterStatsService != null) {
- GetAllMeterConfigStatisticsInputBuilder input = new GetAllMeterConfigStatisticsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(meterStatsService.getAllMeterConfigStatistics(input.build()));
- }
- }
-
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- final DataModificationTransaction trans = startTransaction();
-
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Meter.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- InstanceIdentifier<Meter> meter = (InstanceIdentifier<Meter>)key;
-
- InstanceIdentifier<?> nodeMeterStatisticsAugmentation =
- meter.augmentation(NodeMeterConfigStats.class);
- trans.removeOperationalData(nodeMeterStatisticsAugmentation);
- }
- }
-
- trans.commit();
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().augmentation(FlowCapableNode.class).child(Meter.class).build();
- }
-
- @Override
- protected String statName() {
- return "Meter Config";
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (meterStatsService == null) {
- logger.debug("No Meter Statistics service, not subscribing to meter on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- @Override
- protected MeterConfigStats createInvariantKey(MeterConfigStats item) {
- // No invariant data exist in the meter config stats.
- return item;
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStatsBuilder;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class MeterStatsTracker extends AbstractListeningStatsTracker<MeterStats, MeterStats> {
- private static final Logger logger = LoggerFactory.getLogger(MeterStatsTracker.class);
- private final OpendaylightMeterStatisticsService meterStatsService;
-
- MeterStatsTracker(OpendaylightMeterStatisticsService meterStatsService, final FlowCapableContext context) {
- super(context);
- this.meterStatsService = meterStatsService;
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, MeterStats item) {
- InstanceIdentifier<NodeMeterStatistics> meterRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class)
- .child(Meter.class,new MeterKey(item.getMeterId()))
- .augmentation(NodeMeterStatistics.class).build();
- trans.removeOperationalData(meterRef);
- }
-
- @Override
- protected MeterStats updateSingleStat(DataModificationTransaction trans, MeterStats item) {
- MeterBuilder meterBuilder = new MeterBuilder();
- MeterKey meterKey = new MeterKey(item.getMeterId());
- meterBuilder.setKey(meterKey);
-
- InstanceIdentifier<Meter> meterRef = getNodeIdentifierBuilder()
- .augmentation(FlowCapableNode.class).child(Meter.class,meterKey).build();
-
- NodeMeterStatisticsBuilder meterStatsBuilder= new NodeMeterStatisticsBuilder();
- meterStatsBuilder.setMeterStatistics(new MeterStatisticsBuilder(item).build());
-
- //Update augmented data
- meterBuilder.addAugmentation(NodeMeterStatistics.class, meterStatsBuilder.build());
- trans.putOperationalData(meterRef, meterBuilder.build());
- return item;
- }
-
- @Override
- public void request() {
- if (meterStatsService != null) {
- GetAllMeterStatisticsInputBuilder input = new GetAllMeterStatisticsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(meterStatsService.getAllMeterStatistics(input.build()));
- }
- }
-
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- for (InstanceIdentifier<?> key : change.getCreatedConfigurationData().keySet()) {
- if (Meter.class.equals(key.getTargetType())) {
- request();
- }
- }
-
- final DataModificationTransaction trans = startTransaction();
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Meter.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- InstanceIdentifier<Meter> meter = (InstanceIdentifier<Meter>)key;
-
- InstanceIdentifier<?> nodeMeterStatisticsAugmentation =
- meter.augmentation(NodeMeterStatistics.class);
- trans.removeOperationalData(nodeMeterStatisticsAugmentation);
- }
- }
- trans.commit();
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().augmentation(FlowCapableNode.class).child(Meter.class).build();
- }
-
- @Override
- protected String statName() {
- return "Meter";
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (meterStatsService == null) {
- logger.debug("No Meter Statistics service, not subscribing to meters on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- @Override
- protected MeterStats createInvariantKey(MeterStats item) {
- MeterStatsBuilder meterStatsBuilder = new MeterStatsBuilder();
- meterStatsBuilder.setKey(item.getKey());
- meterStatsBuilder.setMeterId(item.getMeterId());
- return meterStatsBuilder.build();
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.MultipartTransactionAware;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Main responsibility of the class is to manage multipart response
- * for multipart request. It also handles the flow aggregate request
- * and response mapping.
- * @author avishnoi@in.ibm.com
- *
- */
-class MultipartMessageManager {
- /*
- * Map for tx id and type of request, to keep track of all the request sent
- * by Statistics Manager. Statistics Manager won't entertain any multipart
- * response for which it didn't send the request.
- */
- private final Map<TxIdEntry,Long> txIdToRequestTypeMap = new ConcurrentHashMap<>();
- /*
- * Map to keep track of the request tx id for flow table statistics request.
- * Because flow table statistics multi part response do not contains the table id.
- */
- private final Map<TxIdEntry,Short> txIdTotableIdMap = new ConcurrentHashMap<>();
- private final long lifetimeNanos;
-
- public MultipartMessageManager(long lifetimeNanos) {
- this.lifetimeNanos = lifetimeNanos;
- }
-
- private static final class TxIdEntry {
- private final TransactionId txId;
-
- public TxIdEntry(TransactionId txId) {
- this.txId = txId;
- }
- public TransactionId getTxId() {
- return txId;
- }
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((txId == null) ? 0 : txId.hashCode());
- return result;
- }
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof TxIdEntry)) {
- return false;
- }
- TxIdEntry other = (TxIdEntry) obj;
-
- if (txId == null) {
- if (other.txId != null) {
- return false;
- }
- } else if (!txId.equals(other.txId)) {
- return false;
- }
- return true;
- }
-
- @Override
- public String toString() {
- return "TxIdEntry [txId=" + txId + ']';
- }
- }
-
- public void recordExpectedTableTransaction(TransactionId id, Short tableId) {
- recordExpectedTransaction(id);
- txIdTotableIdMap.put(new TxIdEntry(id), Preconditions.checkNotNull(tableId));
- }
-
- public Short isExpectedTableTransaction(TransactionAware transaction) {
- Boolean more = null;
- if (transaction instanceof MultipartTransactionAware) {
- more = ((MultipartTransactionAware)transaction).isMoreReplies();
- }
-
- if (!isExpectedTransaction(transaction, more)) {
- return null;
- }
-
- final TxIdEntry key = new TxIdEntry(transaction.getTransactionId());
- if (more != null && more.booleanValue()) {
- return txIdTotableIdMap.get(key);
- } else {
- return txIdTotableIdMap.remove(key);
- }
- }
-
- public void recordExpectedTransaction(TransactionId id) {
- TxIdEntry entry = new TxIdEntry(Preconditions.checkNotNull(id));
- txIdToRequestTypeMap.put(entry, getExpiryTime());
- }
-
- private boolean isExpectedTransaction(TransactionAware transaction, Boolean more) {
- final TxIdEntry entry = new TxIdEntry(transaction.getTransactionId());
- if (more != null && more.booleanValue()) {
- return txIdToRequestTypeMap.containsKey(entry);
- } else {
- return txIdToRequestTypeMap.remove(entry) != null;
- }
- }
-
- public boolean isExpectedTransaction(TransactionAware transaction) {
- Boolean more = null;
- if (transaction instanceof MultipartTransactionAware) {
- more = ((MultipartTransactionAware)transaction).isMoreReplies();
- }
-
- return isExpectedTransaction(transaction, more);
- }
-
- private Long getExpiryTime() {
- return System.nanoTime() + lifetimeNanos;
- }
-
- public void cleanStaleTransactionIds() {
- final long now = System.nanoTime();
-
- for (Iterator<TxIdEntry> it = txIdToRequestTypeMap.keySet().iterator();it.hasNext();){
- TxIdEntry txIdEntry = it.next();
-
- Long expiryTime = txIdToRequestTypeMap.get(txIdEntry);
- if(now > expiryTime){
- it.remove();
- txIdTotableIdMap.remove(txIdEntry);
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.flow.capable.node.connector.statistics.FlowCapableNodeConnectorStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMapBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class NodeConnectorStatsTracker extends AbstractStatsTracker<NodeConnectorStatisticsAndPortNumberMap, NodeConnectorStatisticsAndPortNumberMap> {
- private static final Logger logger = LoggerFactory.getLogger(NodeConnectorStatsTracker.class);
- private final OpendaylightPortStatisticsService portStatsService;
-
- NodeConnectorStatsTracker(final OpendaylightPortStatisticsService portStatsService, final FlowCapableContext context) {
- super(context);
- this.portStatsService = portStatsService;
- }
-
- @Override
- protected void cleanupSingleStat(final DataModificationTransaction trans, final NodeConnectorStatisticsAndPortNumberMap item) {
- // TODO Auto-generated method stub
- }
-
- @Override
- protected NodeConnectorStatisticsAndPortNumberMap updateSingleStat(final DataModificationTransaction trans, final NodeConnectorStatisticsAndPortNumberMap item) {
- FlowCapableNodeConnectorStatisticsBuilder statisticsBuilder
- = new FlowCapableNodeConnectorStatisticsBuilder();
- statisticsBuilder.setBytes(item.getBytes());
- statisticsBuilder.setCollisionCount(item.getCollisionCount());
- statisticsBuilder.setDuration(item.getDuration());
- statisticsBuilder.setPackets(item.getPackets());
- statisticsBuilder.setReceiveCrcError(item.getReceiveCrcError());
- statisticsBuilder.setReceiveDrops(item.getReceiveDrops());
- statisticsBuilder.setReceiveErrors(item.getReceiveErrors());
- statisticsBuilder.setReceiveFrameError(item.getReceiveFrameError());
- statisticsBuilder.setReceiveOverRunError(item.getReceiveOverRunError());
- statisticsBuilder.setTransmitDrops(item.getTransmitDrops());
- statisticsBuilder.setTransmitErrors(item.getTransmitErrors());
-
- //Augment data to the node-connector
- FlowCapableNodeConnectorStatisticsDataBuilder statisticsDataBuilder =
- new FlowCapableNodeConnectorStatisticsDataBuilder();
-
- statisticsDataBuilder.setFlowCapableNodeConnectorStatistics(statisticsBuilder.build());
-
- final NodeConnectorKey key = new NodeConnectorKey(item.getNodeConnectorId());
- final InstanceIdentifier<NodeConnector> nodeConnectorRef = getNodeIdentifier().child(NodeConnector.class, key);
-
- // FIXME: can we bypass this read?
- NodeConnector nodeConnector = (NodeConnector)trans.readOperationalData(nodeConnectorRef);
- if(nodeConnector != null){
- final FlowCapableNodeConnectorStatisticsData stats = statisticsDataBuilder.build();
- logger.debug("Augmenting port statistics {} to port {}",stats,nodeConnectorRef.toString());
- NodeConnectorBuilder nodeConnectorBuilder = new NodeConnectorBuilder()
- .setKey(key).setId(item.getNodeConnectorId())
- .addAugmentation(FlowCapableNodeConnectorStatisticsData.class, stats);
- trans.putOperationalData(nodeConnectorRef, nodeConnectorBuilder.build());
- }
-
- return item;
- }
-
- @Override
- public void request() {
- if (portStatsService != null) {
- final GetAllNodeConnectorsStatisticsInputBuilder input = new GetAllNodeConnectorsStatisticsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(portStatsService.getAllNodeConnectorsStatistics(input.build()));
- }
- }
-
- @Override
- protected NodeConnectorStatisticsAndPortNumberMap createInvariantKey(NodeConnectorStatisticsAndPortNumberMap item) {
- NodeConnectorStatisticsAndPortNumberMapBuilder ncStatsBuilder = new NodeConnectorStatisticsAndPortNumberMapBuilder();
- ncStatsBuilder.setNodeConnectorId(item.getNodeConnectorId());
- ncStatsBuilder.setKey(item.getKey());
- return ncStatsBuilder.build();
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.TimeUnit;
-
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.aggregate.flow.statistics.AggregateFlowStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupFeatures;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterFeatures;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.model.statistics.types.rev130925.AggregateFlowStatistics;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * This class handles the lifecycle of per-node statistics. It receives data
- * from StatisticsListener, stores it in the data store and keeps track of
- * when the data should be removed.
- *
- * @author avishnoi@in.ibm.com
- */
-public final class NodeStatisticsHandler implements AutoCloseable, FlowCapableContext {
- private static final Logger logger = LoggerFactory.getLogger(NodeStatisticsHandler.class);
-
- private static final long STATS_COLLECTION_MILLIS = TimeUnit.SECONDS.toMillis(15);
- private static final long FIRST_COLLECTION_MILLIS = TimeUnit.SECONDS.toMillis(5);
- private static final int NUMBER_OF_WAIT_CYCLES = 2;
-
- private final MultipartMessageManager msgManager;
- private final StatisticsRequestScheduler srScheduler;
- private final InstanceIdentifier<Node> targetNodeIdentifier;
- private final FlowStatsTracker flowStats;
- private final FlowTableStatsTracker flowTableStats;
- private final GroupDescStatsTracker groupDescStats;
- private final GroupStatsTracker groupStats;
- private final MeterConfigStatsTracker meterConfigStats;
- private final MeterStatsTracker meterStats;
- private final NodeConnectorStatsTracker nodeConnectorStats;
- private final QueueStatsTracker queueStats;
- private final DataProviderService dps;
- private final NodeRef targetNodeRef;
- private final NodeKey targetNodeKey;
- private final TimerTask task = new TimerTask() {
- @Override
- public void run() {
- try{
- requestPeriodicStatistics();
- cleanStaleStatistics();
- }catch(Exception e){
- logger.warn("Exception occured while sending statistics request : {}",e);
- }
- }
- };
-
- public NodeStatisticsHandler(final DataProviderService dps, final NodeKey nodeKey,
- final OpendaylightFlowStatisticsService flowStatsService,
- final OpendaylightFlowTableStatisticsService flowTableStatsService,
- final OpendaylightGroupStatisticsService groupStatsService,
- final OpendaylightMeterStatisticsService meterStatsService,
- final OpendaylightPortStatisticsService portStatsService,
- final OpendaylightQueueStatisticsService queueStatsService,
- final StatisticsRequestScheduler srScheduler) {
- this.dps = Preconditions.checkNotNull(dps);
- this.targetNodeKey = Preconditions.checkNotNull(nodeKey);
- this.srScheduler = Preconditions.checkNotNull(srScheduler);
- this.targetNodeIdentifier = InstanceIdentifier.builder(Nodes.class).child(Node.class, targetNodeKey).build();
- this.targetNodeRef = new NodeRef(targetNodeIdentifier);
-
- final long lifetimeNanos = TimeUnit.MILLISECONDS.toNanos(STATS_COLLECTION_MILLIS * NUMBER_OF_WAIT_CYCLES);
-
- msgManager = new MultipartMessageManager(lifetimeNanos);
- flowTableStats = new FlowTableStatsTracker(flowTableStatsService, this);
- flowStats = new FlowStatsTracker(flowStatsService, this, flowTableStats);
- groupDescStats = new GroupDescStatsTracker(groupStatsService, this);
- groupStats = new GroupStatsTracker(groupStatsService, this);
- meterConfigStats = new MeterConfigStatsTracker(meterStatsService, this);
- meterStats = new MeterStatsTracker(meterStatsService, this);
- nodeConnectorStats = new NodeConnectorStatsTracker(portStatsService, this);
- queueStats = new QueueStatsTracker(queueStatsService, this);
- }
-
- public NodeKey getTargetNodeKey() {
- return targetNodeKey;
- }
-
- @Override
- public InstanceIdentifier<Node> getNodeIdentifier() {
- return targetNodeIdentifier;
- }
-
- @Override
- public NodeRef getNodeRef() {
- return targetNodeRef;
- }
-
- @Override
- public DataModificationTransaction startDataModification() {
- DataModificationTransaction dmt = dps.beginTransaction();
- dmt.registerListener(this.srScheduler);
- return dmt;
- }
-
- public synchronized void updateGroupDescStats(TransactionAware transaction, List<GroupDescStats> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- groupDescStats.updateStats(list);
- }
- }
-
- public synchronized void updateGroupStats(TransactionAware transaction, List<GroupStats> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- groupStats.updateStats(list);
- }
- }
-
- public synchronized void updateMeterConfigStats(TransactionAware transaction, List<MeterConfigStats> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- meterConfigStats.updateStats(list);
- }
- }
-
- public synchronized void updateMeterStats(TransactionAware transaction, List<MeterStats> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- meterStats.updateStats(list);
- }
- }
-
- public synchronized void updateQueueStats(TransactionAware transaction, List<QueueIdAndStatisticsMap> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- queueStats.updateStats(list);
- }
- }
-
- public synchronized void updateFlowTableStats(TransactionAware transaction, List<FlowTableAndStatisticsMap> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- flowTableStats.updateStats(list);
- }
- }
-
- public synchronized void updateNodeConnectorStats(TransactionAware transaction, List<NodeConnectorStatisticsAndPortNumberMap> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- nodeConnectorStats.updateStats(list);
- }
- }
-
- public synchronized void updateAggregateFlowStats(TransactionAware transaction, AggregateFlowStatistics flowStats) {
- final Short tableId = msgManager.isExpectedTableTransaction(transaction);
- if (tableId != null) {
- final DataModificationTransaction trans = this.startDataModification();
- InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class).child(Node.class, targetNodeKey)
- .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId)).toInstance();
-
- AggregateFlowStatisticsDataBuilder aggregateFlowStatisticsDataBuilder = new AggregateFlowStatisticsDataBuilder();
- AggregateFlowStatisticsBuilder aggregateFlowStatisticsBuilder = new AggregateFlowStatisticsBuilder(flowStats);
-
- aggregateFlowStatisticsDataBuilder.setAggregateFlowStatistics(aggregateFlowStatisticsBuilder.build());
-
- logger.debug("Augment aggregate statistics: {} for table {} on Node {}",
- aggregateFlowStatisticsBuilder.build().toString(),tableId,targetNodeKey);
-
- TableBuilder tableBuilder = new TableBuilder();
- tableBuilder.setKey(new TableKey(tableId));
- tableBuilder.addAugmentation(AggregateFlowStatisticsData.class, aggregateFlowStatisticsDataBuilder.build());
- trans.putOperationalData(tableRef, tableBuilder.build());
-
- trans.commit();
- }
- }
-
- public synchronized void updateFlowStats(TransactionAware transaction, List<FlowAndStatisticsMapList> list) {
- if (msgManager.isExpectedTransaction(transaction)) {
- flowStats.updateStats(list);
- }
- }
-
- public synchronized void updateGroupFeatures(GroupFeatures notification) {
- final DataModificationTransaction trans = this.startDataModification();
-
- final NodeBuilder nodeData = new NodeBuilder();
- nodeData.setKey(targetNodeKey);
-
- NodeGroupFeaturesBuilder nodeGroupFeatures = new NodeGroupFeaturesBuilder();
- GroupFeaturesBuilder groupFeatures = new GroupFeaturesBuilder(notification);
- nodeGroupFeatures.setGroupFeatures(groupFeatures.build());
-
- //Update augmented data
- nodeData.addAugmentation(NodeGroupFeatures.class, nodeGroupFeatures.build());
- trans.putOperationalData(targetNodeIdentifier, nodeData.build());
-
- // FIXME: should we be tracking this data?
- trans.commit();
- }
-
- public synchronized void updateMeterFeatures(MeterFeatures features) {
- final DataModificationTransaction trans = this.startDataModification();
-
- final NodeBuilder nodeData = new NodeBuilder();
- nodeData.setKey(targetNodeKey);
-
- NodeMeterFeaturesBuilder nodeMeterFeatures = new NodeMeterFeaturesBuilder();
- MeterFeaturesBuilder meterFeature = new MeterFeaturesBuilder(features);
- nodeMeterFeatures.setMeterFeatures(meterFeature.build());
-
- //Update augmented data
- nodeData.addAugmentation(NodeMeterFeatures.class, nodeMeterFeatures.build());
- trans.putOperationalData(targetNodeIdentifier, nodeData.build());
-
- // FIXME: should we be tracking this data?
- trans.commit();
- }
-
- public synchronized void cleanStaleStatistics() {
- final DataModificationTransaction trans = this.startDataModification();
-
- flowStats.cleanup(trans);
- groupDescStats.cleanup(trans);
- groupStats.cleanup(trans);
- meterConfigStats.cleanup(trans);
- meterStats.cleanup(trans);
- nodeConnectorStats.cleanup(trans);
- queueStats.cleanup(trans);
- msgManager.cleanStaleTransactionIds();
-
- trans.commit();
- }
-
- public synchronized void requestPeriodicStatistics() {
- logger.debug("Send requests for statistics collection to node : {}", targetNodeKey);
-
- this.srScheduler.addRequestToSchedulerQueue(flowTableStats);
-
- this.srScheduler.addRequestToSchedulerQueue(flowStats);
-
- this.srScheduler.addRequestToSchedulerQueue(nodeConnectorStats);
-
- this.srScheduler.addRequestToSchedulerQueue(groupStats);
-
- this.srScheduler.addRequestToSchedulerQueue(groupDescStats);
-
- this.srScheduler.addRequestToSchedulerQueue(meterStats);
-
- this.srScheduler.addRequestToSchedulerQueue(meterConfigStats);
-
- this.srScheduler.addRequestToSchedulerQueue(queueStats);
- }
-
- public synchronized void start(final Timer timer) {
- flowStats.start(dps);
- groupDescStats.start(dps);
- groupStats.start(dps);
- meterConfigStats.start(dps);
- meterStats.start(dps);
- queueStats.start(dps);
-
- timer.schedule(task, (long) (Math.random() * FIRST_COLLECTION_MILLIS), STATS_COLLECTION_MILLIS);
-
- logger.debug("Statistics handler for node started with base interval {}ms", STATS_COLLECTION_MILLIS);
-
- requestPeriodicStatistics();
- }
-
- @Override
- public synchronized void close() {
- task.cancel();
- flowStats.close();
- groupDescStats.close();
- groupStats.close();
- meterConfigStats.close();
- meterStats.close();
- queueStats.close();
-
- //Clean up queued statistics request from scheduler queue
- srScheduler.removeRequestsFromSchedulerQueue(this.getNodeRef());
-
- logger.debug("Statistics handler for {} shut down", targetNodeKey.getId());
- }
-
- @Override
- public void registerTransaction(TransactionId id) {
- msgManager.recordExpectedTransaction(id);
- logger.debug("Transaction {} for node {} sent successfully", id, targetNodeKey);
- }
-
- @Override
- public void registerTableTransaction(final TransactionId id, final Short table) {
- msgManager.recordExpectedTableTransaction(id, table);
- logger.debug("Transaction {} for node {} table {} sent successfully", id, targetNodeKey, table);
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.queue.rev130925.QueueId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-
-final class QueueStatsEntry {
- private final NodeConnectorId nodeConnectorId;
- private final QueueId queueId;
- public QueueStatsEntry(NodeConnectorId ncId, QueueId queueId){
- this.nodeConnectorId = ncId;
- this.queueId = queueId;
- }
- public NodeConnectorId getNodeConnectorId() {
- return nodeConnectorId;
- }
- public QueueId getQueueId() {
- return queueId;
- }
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((nodeConnectorId == null) ? 0 : nodeConnectorId.hashCode());
- result = prime * result + ((queueId == null) ? 0 : queueId.hashCode());
- return result;
- }
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (!(obj instanceof QueueStatsEntry)) {
- return false;
- }
- QueueStatsEntry other = (QueueStatsEntry) obj;
- if (nodeConnectorId == null) {
- if (other.nodeConnectorId != null) {
- return false;
- }
- } else if (!nodeConnectorId.equals(other.nodeConnectorId)) {
- return false;
- }
- if (queueId == null) {
- if (other.queueId != null) {
- return false;
- }
- } else if (!queueId.equals(other.queueId)) {
- return false;
- }
- return true;
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Map.Entry;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataChangeEvent;
-import org.opendaylight.controller.sal.binding.api.data.DataBrokerService;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.queue.rev130925.QueueId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsData;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsDataBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.flow.capable.node.connector.queue.statistics.FlowCapableNodeConnectorQueueStatisticsBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class QueueStatsTracker extends AbstractListeningStatsTracker<QueueIdAndStatisticsMap, QueueStatsEntry> {
- private static final Logger logger = LoggerFactory.getLogger(QueueStatsTracker.class);
- private final OpendaylightQueueStatisticsService queueStatsService;
-
- QueueStatsTracker(OpendaylightQueueStatisticsService queueStatsService, final FlowCapableContext context) {
- super(context);
- this.queueStatsService = queueStatsService;
- }
-
- @Override
- protected void cleanupSingleStat(DataModificationTransaction trans, QueueStatsEntry item) {
- InstanceIdentifier<?> queueRef
- = getNodeIdentifierBuilder().child(NodeConnector.class, new NodeConnectorKey(item.getNodeConnectorId()))
- .augmentation(FlowCapableNodeConnector.class)
- .child(Queue.class, new QueueKey(item.getQueueId()))
- .augmentation(FlowCapableNodeConnectorQueueStatisticsData.class).build();
- trans.removeOperationalData(queueRef);
- }
-
- @Override
- protected QueueStatsEntry updateSingleStat(DataModificationTransaction trans, QueueIdAndStatisticsMap item) {
-
- QueueStatsEntry queueEntry = new QueueStatsEntry(item.getNodeConnectorId(), item.getQueueId());
-
- FlowCapableNodeConnectorQueueStatisticsDataBuilder queueStatisticsDataBuilder = new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
-
- FlowCapableNodeConnectorQueueStatisticsBuilder queueStatisticsBuilder = new FlowCapableNodeConnectorQueueStatisticsBuilder();
-
- queueStatisticsBuilder.fieldsFrom(item);
-
- queueStatisticsDataBuilder.setFlowCapableNodeConnectorQueueStatistics(queueStatisticsBuilder.build());
-
- InstanceIdentifier<Queue> queueRef = getNodeIdentifierBuilder().child(NodeConnector.class, new NodeConnectorKey(item.getNodeConnectorId()))
- .augmentation(FlowCapableNodeConnector.class)
- .child(Queue.class, new QueueKey(item.getQueueId())).toInstance();
-
- QueueBuilder queueBuilder = new QueueBuilder();
- FlowCapableNodeConnectorQueueStatisticsData qsd = queueStatisticsDataBuilder.build();
- queueBuilder.addAugmentation(FlowCapableNodeConnectorQueueStatisticsData.class, qsd);
- queueBuilder.setKey(new QueueKey(item.getQueueId()));
-
- logger.debug("Augmenting queue statistics {} of queue {} to port {}",
- qsd,
- item.getQueueId(),
- item.getNodeConnectorId());
-
- trans.putOperationalData(queueRef, queueBuilder.build());
- return queueEntry;
- }
-
- @Override
- public void request() {
- if (queueStatsService != null) {
- GetAllQueuesStatisticsFromAllPortsInputBuilder input = new GetAllQueuesStatisticsFromAllPortsInputBuilder();
- input.setNode(getNodeRef());
-
- requestHelper(queueStatsService.getAllQueuesStatisticsFromAllPorts(input.build()));
- }
- }
-
- public void request(NodeConnectorId nodeConnectorId, QueueId queueId) {
- if (queueStatsService != null) {
- GetQueueStatisticsFromGivenPortInputBuilder input = new GetQueueStatisticsFromGivenPortInputBuilder();
-
- input.setNode(getNodeRef());
- input.setNodeConnectorId(nodeConnectorId);
- input.setQueueId(queueId);
-
- requestHelper(queueStatsService.getQueueStatisticsFromGivenPort(input.build()));
- }
- }
-
- @Override
- public void onDataChanged(DataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
- for (Entry<InstanceIdentifier<?>, DataObject> e : change.getCreatedConfigurationData().entrySet()) {
- if (Queue.class.equals(e.getKey().getTargetType())) {
- final Queue queue = (Queue) e.getValue();
- final NodeConnectorKey key = e.getKey().firstKeyOf(NodeConnector.class, NodeConnectorKey.class);
- logger.debug("Key {} triggered request for connector {} queue {}", key.getId(), queue.getQueueId());
- request(key.getId(), queue.getQueueId());
- } else {
- logger.debug("Ignoring key {}", e.getKey());
- }
- }
-
- final DataModificationTransaction trans = startTransaction();
- for (InstanceIdentifier<?> key : change.getRemovedConfigurationData()) {
- if (Queue.class.equals(key.getTargetType())) {
- @SuppressWarnings("unchecked")
- final InstanceIdentifier<Queue> queue = (InstanceIdentifier<Queue>)key;
- final InstanceIdentifier<?> del = queue
- .augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
- logger.debug("Key {} triggered remove of augmentation {}", key, del);
-
- trans.removeOperationalData(del);
- }
- }
- trans.commit();
- }
-
- @Override
- protected InstanceIdentifier<?> listenPath() {
- return getNodeIdentifierBuilder().child(NodeConnector.class)
- .augmentation(FlowCapableNodeConnector.class).child(Queue.class).build();
- }
-
- @Override
- protected String statName() {
- return "Queue";
- }
-
- @Override
- public void start(final DataBrokerService dbs) {
- if (queueStatsService == null) {
- logger.debug("No Queue Statistics service, not subscribing to queues on node {}", getNodeIdentifier());
- return;
- }
-
- super.start(dbs);
- }
-
- @Override
- protected QueueStatsEntry createInvariantKey(QueueStatsEntry item) {
- // No invariant data exist in the group description stats.
- return item;
- }
-}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatListeningCommiter
+ * Definition Interface for {@link DataChangeListener} implementer class rule.
+ * Interface represent a contract between Config/DataStore changes and
+ * Operational/DataStore commits. All Operational/DataStore commit have
+ * to by represent as RPC Device response Notification processing. So
+ * Operational/DS could contains only real mirror of OF Device
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 27, 2014
+ */
+public interface StatListeningCommiter<T extends DataObject, N extends NotificationListener> extends DataChangeListener, StatNotifyCommiter<N> {
+
+
+ /**
+ * All StatListeningCommiter implementer has to clean its actual state
+ * for all cached data related to disconnected node.
+ * Method prevents unwanted dataStore changes.
+ *
+ * @param nodeIdent
+ */
+ void cleanForDisconnect(InstanceIdentifier<Node> nodeIdent);
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.OpendaylightInventoryListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNodeRegistration
+ * Class represents {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener} in Operational/DataStore for ADD / REMOVE
+ * actions which are represented connect / disconnect OF actions. Connect functionality are expecting
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Sep 5, 2014
+ */
+public interface StatNodeRegistration extends OpendaylightInventoryListener, AutoCloseable {
+
+ /**
+ * Method contains {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode} registration to {@link StatisticsManager}
+ * for permanently collecting statistics by {@link StatPermCollector} and
+ * as a prevention to use a validation check to the Operational/DS for identify
+ * connected {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}.
+ *
+ * @param InstanceIdentifier<SwitchFeatures> keyIdent
+ * @param FlowCapableNode data
+ * @param InstanceIdentifier<Node> nodeIdent
+ */
+ void connectFlowCapableNode(InstanceIdentifier<SwitchFeatures> keyIdent,
+ SwitchFeatures data, InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method cut {@link Node} registration for {@link StatPermCollector}
+ *
+ * @param InstanceIdentifier<Node> keyIdent
+ */
+ void disconnectFlowCapableNode(InstanceIdentifier<Node> keyIdent);
+}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatNotifyCommiter
+ * Definition Interface for notification implementer class rule
+ * Interface represent a contract between RPC Device Notification
+ * and Operational/DataStore commits.
+ *
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public interface StatNotifyCommiter<N extends NotificationListener> extends AutoCloseable, NotificationListener {
+
+
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatPermCollector
+ * Class implement {@link Runnable} and inside is running statistic collecting
+ * process DataObject statistics by DataObject statistics for every {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}.
+ * Every statistics wait to finish previous statistics. Only if all statistics finish,
+ * next {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * Statistics should be collecting. We are able to set minimal time for start next round cross all Network,
+ * but all depends on network possibility.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public interface StatPermCollector extends Runnable, AutoCloseable {
+
+ /**
+ * StatCapType
+ * Enum class refers ofp_statistics capabilities fields from OF Switch
+ * capabilities specification which have to come as a post HandShake
+ * information from OF Switch and Inventory Manager adds all to the
+ * Operational/DS.
+ * If the capabilities are not add (for any reason) NodeRegistrator
+ * adds all StatCapTypes for the {@link Node}.
+ */
+ public enum StatCapabTypes {
+ /**
+ * OFPC_FLOW_STATS
+ */
+ FLOW_STATS,
+ /**
+ * OFPC_TABLE_STATS
+ */
+ TABLE_STATS,
+ /**
+ * OFPC_PORT_STATS
+ */
+ PORT_STATS,
+ /**
+ * OFPC_GROUP_STATS
+ */
+ GROUP_STATS,
+ /**
+ * OFPC_QUEUE_STATS
+ */
+ QUEUE_STATS,
+ /**
+ * Meter statistics has no support from OF Switch capabilities
+ * so we have to try get statistics for it and wait for response
+ * Error or response package with results.
+ */
+ METER_STATS
+ }
+
+ /**
+ * Add new connected node for permanent statistics collecting process
+ *
+ * @param flowNode
+ * @param statTypes
+ * @param nrOfSwitchTables
+ * @return true/false if the {@link Node} added successful
+ */
+ boolean connectedNodeRegistration(InstanceIdentifier<Node> nodeIdent,
+ List<StatCapabTypes> statTypes, Short nrOfSwitchTables);
+
+ /**
+ * All disconnected Nodes need be removed from stat list Nodes
+ * @param flowNode
+ * @return true/false if the {@link Node} removed successful
+ */
+ boolean disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method return true only and only if {@link StatPermCollector} contain
+ * valid node registration in its internal {@link Node} map.
+ * Otherwise return false.
+ *
+ * @param InstanceIdentifier<FlowCapableNode> flowNode
+ * @return
+ */
+ boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Object notification for continue statistics collecting process.
+ * It is call from collecting allStatistics methods as a future result for
+ * Operational/DS statistic store call (does not matter in the outcome).
+ */
+ void collectNextStatistics();
+
+ /**
+ * Method returns true if collector has registered some active nodes
+ * otherwise return false.
+ *
+ * @return
+ */
+ boolean hasActiveNodes();
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatRpcMsgManager
+ * It represent access point for Device statistics RPC services which are
+ * filtered for needed methods only and they are wrapped in simply way.
+ * Many statistics responses are Multipart messages, so StatRpcMsgManager
+ * provide a functionality to add all multipart msg and provides back whole
+ * stack to listener when listener catch the last Multipart msg.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 29, 2014
+ */
+public interface StatRpcMsgManager extends Runnable, AutoCloseable {
+
+ interface RpcJobsQueue extends Callable<Void> {}
+
+ /**
+ * Transaction container is definition for Multipart transaction
+ * join container for all Multipart msg with same TransactionId
+ * Input {@link DataObject} is a possible light-weight DataObject
+ * which is used for identification (e.g. Flow-> Priority,Match,Cookie,FlowId)
+ *
+ * @param <T> extends TransactionAware -
+ */
+ interface TransactionCacheContainer<T extends TransactionAware> {
+
+ void addNotif(T notification);
+
+ TransactionId getId();
+
+ NodeId getNodeId();
+
+ Optional<? extends DataObject> getConfInput();
+
+ List<T> getNotifications();
+ }
+
+ /**
+ * Method is used for check a transaction registration
+ * for multipart cache holder
+ *
+ * @param TransactionId id
+ * @return true if the transaction has been correctly registered
+ */
+ Future<Boolean> isExpectedStatistics(TransactionId id, NodeId nodeId);
+
+ /**
+ * Method converts {@link java.util.concurrent.Future} object to listenenable future which
+ * is registered for Multipart Notification Statistics Collecting processing.
+ *
+ * @param future - result every Device RPC call
+ */
+ <T extends TransactionAware, D extends DataObject> void registrationRpcFutureCallBack(Future<RpcResult<T>> future, D inputObj, NodeRef ref);
+
+ /**
+ * Method adds Notification which is marked as Multipart to the transaction cash
+ * to wait for the last one.
+ *
+ * @param notification
+ */
+ <T extends TransactionAware> void addNotification(T notification, NodeId nodeId);
+
+ /**
+ * The last Multipart should inform code about possibility to take all previous
+ * messages for next processing. The method take all msg and possible input object
+ * and build all to TransactionCacheContainer Object to return. This process clean
+ * all instances in Cache.
+ *
+ * @param TransactionId id
+ * @return TransactionCacheContainer
+ */
+ Future<Optional<TransactionCacheContainer<?>>> getTransactionCacheContainer(TransactionId id, NodeId nodeId);
+
+ /**
+ * Method wraps OpendaylightGroupStatisticsService.getAllGroupStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllGroupsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightGroupStatisticsService.getGroupDescription
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllGroupsConfStats(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getGroupFeatures
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getGroupFeaturesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getAllMeterStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllMetersStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getAllMeterConfigStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllMeterConfigStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightMeterStatisticsService.getMeterFeatures
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getMeterFeaturesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowStatisticsService.getAllFlowsStatisticsFromAllFlowTables
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllFlowsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowStatisticsService.getAggregateFlowStatisticsFromFlowTableForAllFlows
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ * @param TableId tableId
+ */
+ void getAggregateFlowStat(NodeRef nodeRef, TableId tableId);
+
+ /**
+ * Method wraps OpendaylightPortStatisticsService.getAllNodeConnectorsStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllPortsStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightFlowTableStatisticsService.getFlowTablesStatistics
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllTablesStat(NodeRef nodeRef);
+
+ /**
+ * Method wraps OpendaylightQueueStatisticsService.getAllQueuesStatisticsFromAllPorts
+ * and registers to Transaction Cache
+ *
+ * @param NodeRef nodeRef
+ */
+ void getAllQueueStat(NodeRef nodeRef);
+
+}
+
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupDescStatsUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupFeaturesUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupStatisticsUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterConfigStatsUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterFeaturesUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterStatisticsUpdated;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.NodeConnectorStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.QueueStatisticsUpdate;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is responsible for listening for statistics update notifications and
- * routing them to the appropriate NodeStatisticsHandler.
-
- * TODO: Need to add error message listener and clean-up the associated tx id
- * if it exists in the tx-id cache.
- * @author vishnoianil
- */
-public class StatisticsListener implements OpendaylightGroupStatisticsListener,
- OpendaylightMeterStatisticsListener,
- OpendaylightFlowStatisticsListener,
- OpendaylightPortStatisticsListener,
- OpendaylightFlowTableStatisticsListener,
- OpendaylightQueueStatisticsListener{
-
- private final static Logger sucLogger = LoggerFactory.getLogger(StatisticsListener.class);
- private final StatisticsProvider statisticsManager;
-
- /**
- * default ctor
- * @param manager
- */
- public StatisticsListener(final StatisticsProvider manager){
- this.statisticsManager = manager;
- }
-
- @Override
- public void onMeterConfigStatsUpdated(final MeterConfigStatsUpdated notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateMeterConfigStats(notification, notification.getMeterConfigStats());
- }
- }
-
- @Override
- public void onMeterStatisticsUpdated(MeterStatisticsUpdated notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateMeterStats(notification, notification.getMeterStats());
- }
- }
-
- @Override
- public void onGroupDescStatsUpdated(GroupDescStatsUpdated notification) {
- final NodeStatisticsHandler handler = statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateGroupDescStats(notification, notification.getGroupDescStats());
- }
- }
-
- @Override
- public void onGroupStatisticsUpdated(GroupStatisticsUpdated notification) {
- final NodeStatisticsHandler handler = statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateGroupStats(notification, notification.getGroupStats());
- }
- }
-
- @Override
- public void onMeterFeaturesUpdated(MeterFeaturesUpdated notification) {
- final NodeStatisticsHandler sna = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (sna != null) {
- sna.updateMeterFeatures(notification);
- }
- }
-
- @Override
- public void onGroupFeaturesUpdated(GroupFeaturesUpdated notification) {
- final NodeStatisticsHandler sna = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (sna != null) {
- sna.updateGroupFeatures(notification);
- }
- }
-
- @Override
- public void onFlowsStatisticsUpdate(final FlowsStatisticsUpdate notification) {
- sucLogger.debug("Received flow stats update : {}",notification.toString());
- final NodeStatisticsHandler sna = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (sna != null) {
- sna.updateFlowStats(notification, notification.getFlowAndStatisticsMapList());
- }
- }
-
- @Override
- public void onAggregateFlowStatisticsUpdate(AggregateFlowStatisticsUpdate notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateAggregateFlowStats(notification, notification);
- }
- }
-
- @Override
- public void onNodeConnectorStatisticsUpdate(NodeConnectorStatisticsUpdate notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateNodeConnectorStats(notification, notification.getNodeConnectorStatisticsAndPortNumberMap());
- }
- }
-
- @Override
- public void onFlowTableStatisticsUpdate(FlowTableStatisticsUpdate notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateFlowTableStats(notification, notification.getFlowTableAndStatisticsMap());
- }
- }
-
- @Override
- public void onQueueStatisticsUpdate(QueueStatisticsUpdate notification) {
- final NodeStatisticsHandler handler = this.statisticsManager.getStatisticsHandler(notification.getId());
- if (handler != null) {
- handler.updateQueueStats(notification, notification.getQueueIdAndStatisticsMap());
- }
- }
-}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ * StatisticsManager
+ * It represent a central point for whole module. Implementation
+ * StatisticsManager registers all Operation/DS {@link StatNotifyCommiter} and
+ * Config/DS {@StatListeningCommiter}, as well as {@link StatPermCollector}
+ * for statistic collecting and {@link StatRpcMsgManager} as Device RPCs provider.
+ * In next, StatisticsManager provides all DS contact Transaction services.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 27, 2014
+ */
+public interface StatisticsManager extends AutoCloseable, TransactionChainListener {
+
+ /**
+ * StatDataStoreOperation
+ * Interface represent functionality to submit changes to DataStore.
+ * Internal {@link TransactionChainListener} joining all DS commits
+ * to Set of chained changes for prevent often DataStore touches.
+ */
+ public interface StatDataStoreOperation {
+
+ /**
+ * Apply all read / write (put|merge) operation
+ * for DataStore
+ * @param {@link ReadWriteTransaction} tx
+ */
+ void applyOperation(ReadWriteTransaction tx);
+
+ }
+
+ /**
+ * Method starts whole StatisticManager functionality
+ *
+ * @param {@link NotificationProviderService} notifService
+ * @param {@link RpcConsumerRegistry} rpcRegistry
+ * @param minReqNetMonitInt
+ */
+ void start(final NotificationProviderService notifService,
+ final RpcConsumerRegistry rpcRegistry, final long minReqNetMonitInt);
+
+ /**
+ * Method provides read/write DataStore functionality cross applyOperation
+ * defined in {@link StatDataStoreOperation}
+ *
+ * @param inventoryOper - operation for DataStore
+ */
+ void enqueue(final StatDataStoreOperation inventoryOper);
+
+ /**
+ * Method wraps {@link StatisticCollector}.isProvidedFlowNodeActive method
+ * to provide parallel statCollection process for Set of Nodes. So it has to
+ * identify correct Node Set by NodeIdentifier
+ *
+ * @param nodeIdent
+ */
+ boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method wraps {@link StatPermCollector}.collectNextStatistics to provide
+ * parallel statCollection process for Set of Nodes. So it has to
+ * identify correct Node Set by NodeIdentifier.
+ *
+ * @param nodeIdent
+ */
+ void collectNextStatistics(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method wraps {@link StatPermCollector}.connectedNodeRegistration to provide
+ * parallel statCollection process for Set of Nodes. So it has to
+ * connect node to new or not full Node statCollector Set.
+ *
+ * @param nodeIdent
+ * @param statTypes
+ * @param nrOfSwitchTables
+ */
+ void connectedNodeRegistration(InstanceIdentifier<Node> nodeIdent,
+ List<StatCapabTypes> statTypes, Short nrOfSwitchTables);
+
+ /**
+ * Method wraps {@link StatPermCollector}.disconnectedNodeUnregistration to provide
+ * parallel statCollection process for Set of Nodes. So it has to identify
+ * correct collector for disconnect node.
+ *
+ * @param nodeIdent
+ */
+ void disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+
+ /**
+ * Method provides access to Device RPC methods by wrapped
+ * internal method. In next {@link StatRpcMsgManager} is registered all
+ * Multipart device msg response and joining all to be able run all
+ * collected statistics in one time (easy identification Data for delete)
+ *
+ * @return {@link StatRpcMsgManager}
+ */
+ StatRpcMsgManager getRpcMsgManager();
+
+ /**
+ * Define Method : {@link org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode}
+ * Operational/DS data change listener -> impl. target -> register FlowCapableNode to Statistic Collecting process
+ * @return {@link StatNodeRegistration}
+ */
+ StatNodeRegistration getNodeRegistrator();
+
+ /**
+ * Define Method : Flow Config/DS data change listener -> impl. target ->
+ * -> make pair between Config/DS FlowId and Device Flow response Hash
+ * @return
+ */
+ StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> getFlowListenComit();
+
+ /**
+ * Define Method : Meter Config/DS data change listener and Operation/DS notify commit
+ * functionality
+ * @return
+ */
+ StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> getMeterListenCommit();
+
+ /**
+ * Define Method : Group Config/DS data change listener and Operation/DS notify commit
+ * functionality
+ * @return
+ */
+ StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> getGroupListenCommit();
+
+ /**
+ * Define Method : Queue Config/DS change listener and Operation/DS notify commit functionality
+ * @return
+ */
+ StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> getQueueNotifyCommit();
+
+ /**
+ * Define Method : Table Operation/DS notify commit functionality
+ * @return
+ */
+ StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> getTableNotifCommit();
+
+ /**
+ * Define Method : Port Operation/DS notify commit functionality
+ * @return
+ */
+ StatNotifyCommiter<OpendaylightPortStatisticsListener> getPortNotifyCommit();
+
+}
+
package org.opendaylight.controller.md.statistics.manager;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.statistics.manager.impl.StatisticsManagerImpl;
import org.opendaylight.controller.sal.binding.api.AbstractBindingAwareProvider;
import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Statistics Manager Activator
+ *
+ * OSGi bundle activator
+ *
+ */
public class StatisticsManagerActivator extends AbstractBindingAwareProvider {
- private StatisticsProvider statsProvider;
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatisticsManagerActivator.class);
+
+ /* TODO move it to ConfigSubsystem */
+ private static final long DEFAULT_MIN_REQUEST_NET_MONITOR_INTERVAL = 3000L;
+ private static final int MAX_NODES_FOR_COLLECTOR = 16;
+
+ private StatisticsManager statsProvider;
@Override
- public void onSessionInitiated(ProviderContext session) {
- final DataProviderService dps = session.getSALService(DataProviderService.class);
- final NotificationProviderService nps = session.getSALService(NotificationProviderService.class);
+ public void onSessionInitiated(final ProviderContext session) {
+ LOG.info("StatisticsManagerActivator initialization.");
+ try {
+ final DataBroker dataBroker = session.getSALService(DataBroker.class);
+ final NotificationProviderService notifService =
+ session.getSALService(NotificationProviderService.class);
+ statsProvider = new StatisticsManagerImpl(dataBroker, MAX_NODES_FOR_COLLECTOR);
+ statsProvider.start(notifService, session, DEFAULT_MIN_REQUEST_NET_MONITOR_INTERVAL);
+ LOG.info("StatisticsManagerActivator started successfully.");
+ }
+ catch (final Exception e) {
+ LOG.error("Unexpected error by initialization of StatisticsManagerActivator", e);
+ stopImpl(null);
+ }
+ }
- statsProvider = new StatisticsProvider(dps);
- statsProvider.start(nps, session);
+ @VisibleForTesting
+ StatisticsManager getStatisticManager() {
+ return statsProvider;
}
@Override
- protected void stopImpl(BundleContext context) {
+ protected void stopImpl(final BundleContext context) {
if (statsProvider != null) {
- statsProvider.close();
+ try {
+ statsProvider.close();
+ }
+ catch (final Exception e) {
+ LOG.error("Unexpected error by stopping StatisticsManagerActivator", e);
+ }
statsProvider = null;
}
+ LOG.info("StatisticsManagerActivator stoped.");
}
}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Collection;
-import java.util.Timer;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.data.DataChangeListener;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Following are main responsibilities of the class:
- * 1) Invoke statistics request thread to send periodic statistics request to all the
- * flow capable switch connected to the controller. It sends statistics request for
- * Group,Meter,Table,Flow,Queue,Aggregate stats.
- *
- * 2) Invoke statistics ager thread, to clean up all the stale statistics data from
- * operational data store.
- *
- * @author avishnoi@in.ibm.com
- *
- */
-public class StatisticsProvider implements AutoCloseable {
- private static final Logger spLogger = LoggerFactory.getLogger(StatisticsProvider.class);
-
- private final ConcurrentMap<NodeId, NodeStatisticsHandler> handlers = new ConcurrentHashMap<>();
- private final Timer timer = new Timer("statistics-manager", true);
- private final DataProviderService dps;
-
- private OpendaylightGroupStatisticsService groupStatsService;
-
- private OpendaylightMeterStatisticsService meterStatsService;
-
- private OpendaylightFlowStatisticsService flowStatsService;
-
- private OpendaylightPortStatisticsService portStatsService;
-
- private OpendaylightFlowTableStatisticsService flowTableStatsService;
-
- private OpendaylightQueueStatisticsService queueStatsService;
-
- private final StatisticsRequestScheduler srScheduler;
-
- public StatisticsProvider(final DataProviderService dataService) {
- this.dps = Preconditions.checkNotNull(dataService);
- this.srScheduler = new StatisticsRequestScheduler();
- }
-
- private final StatisticsListener updateCommiter = new StatisticsListener(StatisticsProvider.this);
-
- private ListenerRegistration<NotificationListener> listenerRegistration;
-
- private ListenerRegistration<DataChangeListener> flowCapableTrackerRegistration;
-
- public void start(final NotificationProviderService nps, final RpcConsumerRegistry rpcRegistry) {
-
- // Get Group/Meter statistics service instances
- groupStatsService = rpcRegistry.getRpcService(OpendaylightGroupStatisticsService.class);
- meterStatsService = rpcRegistry.getRpcService(OpendaylightMeterStatisticsService.class);
- flowStatsService = rpcRegistry.getRpcService(OpendaylightFlowStatisticsService.class);
- portStatsService = rpcRegistry.getRpcService(OpendaylightPortStatisticsService.class);
- flowTableStatsService = rpcRegistry.getRpcService(OpendaylightFlowTableStatisticsService.class);
- queueStatsService = rpcRegistry.getRpcService(OpendaylightQueueStatisticsService.class);
- this.srScheduler.start();
-
- // Start receiving notifications
- this.listenerRegistration = nps.registerNotificationListener(this.updateCommiter);
-
- // Register for switch connect/disconnect notifications
- final InstanceIdentifier<FlowCapableNode> fcnId = InstanceIdentifier.builder(Nodes.class)
- .child(Node.class).augmentation(FlowCapableNode.class).build();
- spLogger.debug("Registering FlowCapable tracker to {}", fcnId);
- this.flowCapableTrackerRegistration = dps.registerDataChangeListener(fcnId,
- new FlowCapableTracker(this, fcnId));
-
- spLogger.info("Statistics Provider started.");
- }
-
- /**
- * Get the handler for a particular node.
- *
- * @param nodeId source node
- * @return Node statistics handler for that node. Null if the statistics should
- * not handled.
- */
- public final NodeStatisticsHandler getStatisticsHandler(final NodeId nodeId) {
- Preconditions.checkNotNull(nodeId);
- NodeStatisticsHandler handler = handlers.get(nodeId);
- if (handler == null) {
- spLogger.info("Attempted to get non-existing handler for {}", nodeId);
- }
- return handler;
- }
-
- @Override
- public void close() {
- try {
- if (this.listenerRegistration != null) {
- this.listenerRegistration.close();
- this.listenerRegistration = null;
- }
- if (this.flowCapableTrackerRegistration != null) {
- this.flowCapableTrackerRegistration.close();
- this.flowCapableTrackerRegistration = null;
- }
- timer.cancel();
- } catch (Exception e) {
- spLogger.warn("Failed to stop Statistics Provider completely", e);
- } finally {
- spLogger.info("Statistics Provider stopped.");
- }
- }
-
- void startNodeHandlers(final Collection<NodeKey> addedNodes) {
- for (NodeKey key : addedNodes) {
- if (handlers.containsKey(key.getId())) {
- spLogger.warn("Attempted to start already-existing handler for {}, very strange", key.getId());
- continue;
- }
-
- final NodeStatisticsHandler h = new NodeStatisticsHandler(dps, key,
- flowStatsService, flowTableStatsService, groupStatsService,
- meterStatsService, portStatsService, queueStatsService,srScheduler);
- final NodeStatisticsHandler old = handlers.putIfAbsent(key.getId(), h);
- if (old == null) {
- spLogger.debug("Started node handler for {}", key.getId());
- h.start(timer);
- } else {
- spLogger.debug("Prevented race on handler for {}", key.getId());
- }
- }
- }
-
- void stopNodeHandlers(final Collection<NodeKey> removedNodes) {
- for (NodeKey key : removedNodes) {
- final NodeStatisticsHandler s = handlers.remove(key.getId());
- if (s != null) {
- spLogger.debug("Stopping node handler for {}", key.getId());
- s.close();
- } else {
- spLogger.warn("Attempted to remove non-existing handler for {}, very strange", key.getId());
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright IBM Corporation, 2013. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.TimeUnit;
-
-import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction.DataTransactionListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Main responsibility of the class is to check the MD-SAL data store read/write
- * transaction accumulation level and send statistics request if number of pending
- * read/write transactions are zero.
- * @author avishnoi@in.ibm.com
- *
- */
-@SuppressWarnings("rawtypes")
-public class StatisticsRequestScheduler implements DataTransactionListener {
-
- private static final Logger srsLogger = LoggerFactory.getLogger(StatisticsRequestScheduler.class);
- private final Timer timer = new Timer("request-monitor", true);
-
- // We need ordered retrieval, and O(1) contains operation
- private final Map<AbstractStatsTracker,Integer> requestQueue =
- Collections.synchronizedMap(new LinkedHashMap<AbstractStatsTracker,Integer>());
-
- private Long PendingTransactions;
-
- private long lastRequestTime = System.nanoTime();
-
- private static final long REQUEST_MONITOR_INTERVAL = 1000;
-
- private final TimerTask task = new TimerTask() {
- @Override
- public void run() {
- try{
- long now = System.nanoTime();
- if(now > lastRequestTime+TimeUnit.MILLISECONDS.toNanos(REQUEST_MONITOR_INTERVAL)){
- requestStatistics();
- }
- }catch (IllegalArgumentException | IllegalStateException | NullPointerException e){
- srsLogger.warn("Exception occured while sending statistics request : {}",e);
- }
- }
- };
-
- public StatisticsRequestScheduler(){
- PendingTransactions = (long) 0;
- }
-
- public void addRequestToSchedulerQueue(AbstractStatsTracker statsRequest){
- requestQueue.put(statsRequest, null);
- }
-
- public void removeRequestsFromSchedulerQueue(NodeRef node){
- AbstractStatsTracker stats = null;
- synchronized(requestQueue){
- Iterator<Map.Entry<AbstractStatsTracker, Integer>> nodesItr = requestQueue.entrySet().iterator();
- while(nodesItr.hasNext()){
- stats = nodesItr.next().getKey();
- if(stats.getNodeRef().equals(node)){
- nodesItr.remove();
- }
- }
- }
-
- }
- public AbstractStatsTracker getNextRequestFromSchedulerQueue(){
- //Remove first element
- AbstractStatsTracker stats = null;
- synchronized(requestQueue){
- Iterator<Map.Entry<AbstractStatsTracker, Integer>> nodesItr = requestQueue.entrySet().iterator();
- if(nodesItr.hasNext()){
- stats = nodesItr.next().getKey();
- srsLogger.debug("{} chosen up for execution",stats.getNodeRef());
- nodesItr.remove();
- return stats;
- }
- }
- return stats;
- }
-
- private void requestStatistics(){
- AbstractStatsTracker stats = this.getNextRequestFromSchedulerQueue();
- sendStatsRequest(stats);
- }
- @Override
- public void onStatusUpdated(DataModificationTransaction transaction, TransactionStatus status) {
-
- AbstractStatsTracker stats = null;
- synchronized(PendingTransactions){
- switch(status){
- case SUBMITED:
- this.PendingTransactions++;
- break;
- case COMMITED:
- case FAILED:
- this.PendingTransactions--;
- if(PendingTransactions == 0){
- lastRequestTime = System.nanoTime();
- stats = this.getNextRequestFromSchedulerQueue();
- }
- srsLogger.debug("Pending MD-SAL transactions : {} & Scheduler queue size : {}",this.PendingTransactions,this.requestQueue.size());
- break;
- default:
- break;
- }
- }
- sendStatsRequest(stats);
- }
-
- private void sendStatsRequest(AbstractStatsTracker stats){
- if(stats != null){
- try{
- stats.request();
- stats.increaseRequestCounter();
- }catch(Exception e){
- srsLogger.warn("Statistics request was not sent successfully. Reason : {}",e.getMessage());
- }
- }
- }
- public void start(){
- timer.schedule(task, 0, REQUEST_MONITOR_INTERVAL);
- }
-}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatListeningCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatAbstractListeneningCommiter
+ * Class is abstract implementation for all Configuration/DataStore DataChange
+ * listenable DataObjects like flows, groups, meters. It is a holder for common
+ * functionality needed by construction/destruction class and for DataChange
+ * event processing.
+ *
+ */
+public abstract class StatAbstractListenCommit<T extends DataObject, N extends NotificationListener>
+ extends StatAbstractNotifyCommit<N> implements StatListeningCommiter<T,N> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatAbstractListenCommit.class);
+
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+
+ protected final Map<InstanceIdentifier<Node>, Map<InstanceIdentifier<T>, Integer>> mapNodesForDelete = new ConcurrentHashMap<>();
+
+ private final Class<T> clazz;
+
+ private final DataBroker dataBroker;
+
+ private volatile ReadOnlyTransaction currentReadTx;
+
+ /* Constructor has to make a registration */
+ public StatAbstractListenCommit(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps, final Class<T> clazz) {
+ super(manager,nps);
+ this.clazz = Preconditions.checkNotNull(clazz, "Referenced Class can not be null");
+ Preconditions.checkArgument(db != null, "DataBroker can not be null!");
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+ getWildCardedRegistrationPath(), this, DataChangeScope.BASE);
+ this.dataBroker = db;
+ }
+
+ /**
+ * Method returns WildCarded Path which is used for registration as a listening path changes in
+ * {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}
+ * @return
+ */
+ protected abstract InstanceIdentifier<T> getWildCardedRegistrationPath();
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /*
+ * If we have opened read transaction for configuraiton data store,
+ * we will close and null it.
+ *
+ * Latest read transaction will be allocated on another read using readLatestConfiguration
+ */
+ if(currentReadTx != null) {
+ final ReadOnlyTransaction previous = currentReadTx;
+ currentReadTx = null;
+ previous.close();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ protected void removeData(final InstanceIdentifier<?> key, final Integer value) {
+ if (clazz.equals(key.getTargetType())) {
+ final InstanceIdentifier<Node> nodeIdent = key.firstIdentifierOf(Node.class);
+ Map<InstanceIdentifier<T>, Integer> map = null;
+ if (mapNodesForDelete.containsKey(nodeIdent)) {
+ map = mapNodesForDelete.get(nodeIdent);
+ }
+ if (map == null) {
+ map = new ConcurrentHashMap<>();
+ mapNodesForDelete.put(nodeIdent, map);
+ }
+ map.put((InstanceIdentifier<T>) key, value);
+ }
+ }
+
+ @Override
+ public void cleanForDisconnect(final InstanceIdentifier<Node> nodeIdent) {
+ mapNodesForDelete.remove(nodeIdent);
+ }
+
+ @Override
+ public void close() {
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.error("Error by stop {} DataChange StatListeningCommiter.", clazz.getSimpleName(), e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ protected final <K extends DataObject> Optional<K> readLatestConfiguration(final InstanceIdentifier<K> path) {
+ if(currentReadTx == null) {
+ currentReadTx = dataBroker.newReadOnlyTransaction();
+ }
+ try {
+ return currentReadTx.read(LogicalDatastoreType.CONFIGURATION, path).checkedGet();
+ } catch (final ReadFailedException e) {
+ return Optional.absent();
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.opendaylight.controller.md.statistics.manager.StatNotifyCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatAbstratNotifiCommiter
+ * Class is abstract implementation for all no Configuration/DataStore DataObjects
+ * and represent common functionality for all DataObject Statistics Commiters.
+ * Class defines contract between DataObject and relevant Statistics NotificationListener.
+ *
+ */
+public abstract class StatAbstractNotifyCommit<N extends NotificationListener> implements StatNotifyCommiter<N> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatAbstractNotifyCommit.class);
+
+ protected final StatisticsManager manager;
+ private ListenerRegistration<NotificationListener> notifyListenerRegistration;
+
+ public StatAbstractNotifyCommit(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ Preconditions.checkArgument(nps != null, "NotificationProviderService can not be null!");
+ this.manager = Preconditions.checkNotNull(manager, "StatisticManager can not be null!");
+ notifyListenerRegistration = nps.registerNotificationListener(getStatNotificationListener());
+ }
+
+ @Override
+ public void close() {
+ if (notifyListenerRegistration != null) {
+ try {
+ notifyListenerRegistration.close();
+ }
+ catch (final Exception e) {
+ LOG.error("Error by stop {} StatNotificationListener.", this.getClass().getSimpleName());
+ }
+ notifyListenerRegistration = null;
+ }
+ }
+
+ /**
+ * Method returns Statistics Notification Listener for relevant DataObject implementation,
+ * which is declared for {@link StatNotifyCommiter} interface.
+ *
+ * @return
+ */
+ protected abstract N getStatNotificationListener();
+
+ /**
+ * PreConfigurationCheck - Node identified by input InstanceIdentifier<Node>
+ * has to be registered in {@link org.opendaylight.controller.md.statistics.manager.StatPermCollector}
+ *
+ * @param InstanceIdentifier<Node> nodeIdent
+ */
+ protected boolean preConfigurationCheck(final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ return manager.isProvidedFlowNodeActive(nodeIdent);
+ }
+
+ protected void notifyToCollectNextStatistics(final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkNotNull(nodeIdent, "FlowCapableNode ident can not be null!");
+ manager.collectNextStatistics(nodeIdent);
+ }
+
+ /**
+ * Wrapping Future object call for {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
+ * getTransactionCacheContainer with 10sec TimeOut.
+ * Method has returned {@link Optional} which could contains a {@link TransactionCacheContainer}
+ *
+ * @param TransactionId transId
+ * @param NodeId nodeId
+ * @return
+ */
+ protected Optional<TransactionCacheContainer<?>> getTransactionCacheContainer(final TransactionId transId, final NodeId nodeId) {
+ Optional<TransactionCacheContainer<?>> txContainer;
+ try {
+ txContainer = manager.getRpcMsgManager().getTransactionCacheContainer(transId, nodeId).get(10, TimeUnit.SECONDS);
+ }
+ catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Get TransactionCacheContainer fail!", e);
+ txContainer = Optional.absent();
+ }
+ return txContainer;
+ }
+
+ /**
+ * Wrapping Future object call to {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
+ * isExpectedStatistics with 10sec TimeOut.
+ * Method has checked registration for provided {@link TransactionId} and {@link NodeId}
+ *
+ * @param TransactionId transId - Transaction identification
+ * @param NodeId nodeId - Node identification
+ * @return boolean
+ */
+ protected boolean isExpectedStatistics(final TransactionId transId, final NodeId nodeId) {
+ Boolean isExpectedStat = Boolean.FALSE;
+ try {
+ isExpectedStat = manager.getRpcMsgManager().isExpectedStatistics(transId, nodeId).get(10, TimeUnit.SECONDS);
+ }
+ catch (InterruptedException | ExecutionException | TimeoutException e) {
+ LOG.warn("Check Transaction registraion {} fail!", transId, e);
+ return false;
+ }
+ return isExpectedStat.booleanValue();
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.md.statistics.manager.impl.helper.FlowComparator;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowHashIdMapping;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowHashIdMappingBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.nodes.node.table.FlowHashIdMapKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.aggregate.flow.statistics.AggregateFlowStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.statistics.FlowStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.BiMap;
+import com.google.common.collect.HashBiMap;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitFlow
+ * Class is a NotifyListener for FlowStatistics and DataChangeListener for Config/DataStore for Flow node.
+ * All expected (registered) FlowStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Flow in Operational/DS create process needs to pair
+ * Device Flow HashCode and FlowId from Config/DS
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitFlow extends StatAbstractListenCommit<Flow, OpendaylightFlowStatisticsListener>
+ implements OpendaylightFlowStatisticsListener {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(StatListenCommitFlow.class);
+
+ private static final String ALIEN_SYSTEM_FLOW_ID = "#UF$TABLE*";
+
+ private static final Integer REMOVE_AFTER_MISSING_COLLECTION = 1;
+
+ private final AtomicInteger unaccountedFlowsCounter = new AtomicInteger(0);
+
+ public StatListenCommitFlow (final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps){
+ super(manager, db, nps, Flow.class);
+ }
+
+ @Override
+ protected OpendaylightFlowStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Flow> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Table.class).child(Flow.class);
+ }
+
+ @Override
+ public void onAggregateFlowStatisticsUpdate(final AggregateFlowStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - AggregateFlowStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ /* check flow Capable Node and write statistics */
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (( ! inputObj.isPresent()) || ( ! (inputObj.get() instanceof Table))) {
+ return;
+ }
+ final Table table = (Table) inputObj.get();
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof AggregateFlowStatisticsUpdate) {
+ final AggregateFlowStatisticsData stats = new AggregateFlowStatisticsDataBuilder()
+ .setAggregateFlowStatistics(new AggregateFlowStatisticsBuilder(notification).build()).build();
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId)).augmentation(FlowCapableNode.class);
+ final InstanceIdentifier<Table> tableRef = fNodeIdent.child(Table.class, table.getKey());
+ final InstanceIdentifier<AggregateFlowStatisticsData> tableStatRef = tableRef
+ .augmentation(AggregateFlowStatisticsData.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ } catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ return;
+ }
+ if (fNode.isPresent()) {
+ ensureTable(tx, table.getId(), tableRef);
+ tx.put(LogicalDatastoreType.OPERATIONAL, tableStatRef, stats);
+ }
+ }
+ }
+ }
+ });
+ }
+
+ public void ensureTable(final ReadWriteTransaction tx, final Short tableId, final InstanceIdentifier<Table> tableRef) {
+ final Table tableNew = new TableBuilder().setId(tableId).build();
+ tx.merge(LogicalDatastoreType.OPERATIONAL, tableRef, tableNew);
+ }
+
+ @Override
+ public void onFlowsStatisticsUpdate(final FlowsStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - FlowsStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ LOG.trace("Next notification for join txId {}", transId);
+ return;
+ }
+ /* add flow's statistics */
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final List<FlowAndStatisticsMapList> flowStats = new ArrayList<FlowAndStatisticsMapList>(10);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof FlowsStatisticsUpdate) {
+ final List<FlowAndStatisticsMapList> notifList =
+ ((FlowsStatisticsUpdate) notif).getFlowAndStatisticsMapList();
+ if (notifList != null) {
+ flowStats.addAll(notifList);
+ }
+ }
+ }
+
+ statsFlowCommitAll(flowStats, nodeIdent, tx);
+ /* cleaning all not cached hash collisions */
+ final Map<InstanceIdentifier<Flow>, Integer> listAliens = mapNodesForDelete.get(nodeIdent);
+ if (listAliens != null) {
+ for (final Entry<InstanceIdentifier<Flow>, Integer> nodeForDelete : listAliens.entrySet()) {
+ final Integer lifeIndex = nodeForDelete.getValue();
+ if (nodeForDelete.getValue() > 0) {
+ nodeForDelete.setValue(Integer.valueOf(lifeIndex.intValue() - 1));
+ } else {
+ final InstanceIdentifier<Flow> flowNodeIdent = nodeForDelete.getKey();
+ mapNodesForDelete.get(nodeIdent).remove(flowNodeIdent);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flowNodeIdent);
+ }
+ }
+ }
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ });
+ }
+
+ private void statsFlowCommitAll(final List<FlowAndStatisticsMapList> list,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ final Optional<FlowCapableNode> fNode;
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read FlowCapableNode {} in Operational/DS fail! Statistic scan not be updated.", nodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("FlowCapableNode {} is not presented in Operational/DS. Statisticscan not be updated.", nodeIdent);
+ return;
+ }
+
+ final NodeUpdateState nodeState = new NodeUpdateState(fNodeIdent,fNode.get());
+
+ for (final FlowAndStatisticsMapList flowStat : list) {
+ final TableKey tableKey = new TableKey(flowStat.getTableId());
+ final TableFlowUpdateState tableState = nodeState.getTable(tableKey, tx);
+ tableState.reportFlow(flowStat,tx);
+ }
+
+ for (final TableFlowUpdateState table : nodeState.getTables()) {
+ table.removeUnreportedFlows(tx);
+ }
+ }
+
+ /**
+ * Method adds statistics to Flow
+ *
+ * @param flowBuilder
+ * @param deviceFlow
+ */
+ private void addStatistics(final FlowBuilder flowBuilder, final FlowAndStatisticsMapList deviceFlow) {
+ final FlowAndStatisticsMapListBuilder stats = new FlowAndStatisticsMapListBuilder(deviceFlow);
+ final FlowStatisticsBuilder flowStatisticsBuilder = new FlowStatisticsBuilder(stats.build());
+ final FlowStatisticsDataBuilder flowStatisticsData =new FlowStatisticsDataBuilder();
+ flowStatisticsData.setFlowStatistics(flowStatisticsBuilder.build());
+ flowBuilder.addAugmentation(FlowStatisticsData.class, flowStatisticsData.build());
+ }
+
+ /**
+ * build pseudoUnique hashCode for flow in table
+ * for future easy identification
+ *
+ * FIXME: we expect same version for YANG models for all clusters and that has to be fix
+ * FIXME: CREATE BETTER KEY - for flow (MATCH is the problem)
+ */
+ static String buildFlowIdOperKey(final FlowAndStatisticsMapList deviceFlow) {
+ return new StringBuffer().append(deviceFlow.getMatch())
+ .append(deviceFlow.getPriority()).append(deviceFlow.getCookie().getValue()).toString();
+ }
+
+ private class NodeUpdateState {
+ private final InstanceIdentifier<FlowCapableNode> nodeIdentifier;
+ private final Map<TableKey,TableFlowUpdateState> tables = new HashMap<>();
+
+ public NodeUpdateState(final InstanceIdentifier<FlowCapableNode> fNodeIdent, final FlowCapableNode flowCapableNode) {
+ nodeIdentifier = fNodeIdent;
+ final List<Table> tableList = flowCapableNode.getTable();
+ if(tableList != null) {
+ for (final Table table : tableList) {
+ final TableKey tableKey = table.getKey();
+ tables.put(tableKey, new TableFlowUpdateState(nodeIdentifier.child(Table.class,tableKey),table));
+ }
+ }
+ }
+
+ public Iterable<TableFlowUpdateState> getTables() {
+ return tables.values();
+ }
+
+ TableFlowUpdateState getTable(final TableKey key,final ReadWriteTransaction tx) {
+ TableFlowUpdateState table = tables.get(key);
+ if(table == null) {
+ table = new TableFlowUpdateState(nodeIdentifier.child(Table.class, key), null);
+ tables.put(key, table);
+ }
+ return table;
+ }
+ }
+
+ private class TableFlowUpdateState {
+
+ private boolean tableEnsured = false;
+ final KeyedInstanceIdentifier<Table, TableKey> tableRef;
+ final TableKey tableKey;
+ final BiMap<FlowHashIdMapKey, FlowId> flowIdByHash;
+ List<Flow> configFlows;
+
+ public TableFlowUpdateState(final KeyedInstanceIdentifier<Table, TableKey> tablePath, final Table table) {
+ tableRef = tablePath;
+ tableKey = tablePath.getKey();
+ flowIdByHash = HashBiMap.create();
+ if(table != null) {
+ final FlowHashIdMapping flowHashMapping = table.getAugmentation(FlowHashIdMapping.class);
+ if (flowHashMapping != null) {
+ final List<FlowHashIdMap> flowHashMap = flowHashMapping.getFlowHashIdMap() != null
+ ? flowHashMapping.getFlowHashIdMap() : Collections.<FlowHashIdMap> emptyList();
+ for (final FlowHashIdMap flowHashId : flowHashMap) {
+ try {
+ flowIdByHash.put(flowHashId.getKey(), flowHashId.getFlowId());
+ } catch (final Exception e) {
+ LOG.warn("flow hashing hit a duplicate for {} -> {}", flowHashId.getKey(), flowHashId.getFlowId());
+ }
+ }
+ }
+ }
+ }
+
+ private void ensureTableFowHashIdMapping(final ReadWriteTransaction tx) {
+ if( ! tableEnsured) {
+ ensureTable(tx, tableKey.getId(), tableRef);
+ final FlowHashIdMapping emptyMapping = new FlowHashIdMappingBuilder()
+ .setFlowHashIdMap(Collections.<FlowHashIdMap> emptyList()).build();
+ tx.merge(LogicalDatastoreType.OPERATIONAL, tableRef.augmentation(FlowHashIdMapping.class), emptyMapping);
+ tableEnsured = true;
+ }
+ }
+
+ private FlowKey searchInConfiguration(final FlowAndStatisticsMapList flowStat, final ReadWriteTransaction trans) {
+ initConfigFlows(trans);
+ final Iterator<Flow> it = configFlows.iterator();
+ while(it.hasNext()) {
+ final Flow cfgFlow = it.next();
+ final FlowKey cfgKey = cfgFlow.getKey();
+ if(flowIdByHash.inverse().containsKey(cfgKey)) {
+ it.remove();
+ } else if(FlowComparator.flowEquals(flowStat, cfgFlow)) {
+ it.remove();
+ return cfgKey;
+ }
+ }
+ return null;
+ }
+
+ private void initConfigFlows(final ReadWriteTransaction trans) {
+ final Optional<Table> table = readLatestConfiguration(tableRef);
+ List<Flow> localList = null;
+ if(table.isPresent()) {
+ localList = table.get().getFlow();
+ }
+ if(localList == null) {
+ configFlows = Collections.emptyList();
+ } else {
+ configFlows = new LinkedList<>(localList);
+ }
+ }
+
+ private FlowKey getFlowKeyAndRemoveHash(final FlowHashIdMapKey key) {
+ final FlowId ret = flowIdByHash.get(key);
+ if(ret != null) {
+ flowIdByHash.remove(key);
+ return new FlowKey(ret);
+ }
+ return null;
+ }
+
+ /* Returns FlowKey which doesn't exist in any DataStore for now */
+ private FlowKey makeAlienFlowKey() {
+ final StringBuilder sBuilder = new StringBuilder(ALIEN_SYSTEM_FLOW_ID)
+ .append(tableKey.getId()).append("-").append(unaccountedFlowsCounter.incrementAndGet());
+ final FlowId flowId = new FlowId(sBuilder.toString());
+ return new FlowKey(flowId);
+ }
+
+ private Map<FlowHashIdMapKey, FlowId> getRemovalList() {
+ return flowIdByHash;
+ }
+
+ void reportFlow(final FlowAndStatisticsMapList flowStat, final ReadWriteTransaction trans) {
+ ensureTableFowHashIdMapping(trans);
+ final FlowHashIdMapKey hashingKey = new FlowHashIdMapKey(buildFlowIdOperKey(flowStat));
+ FlowKey flowKey = getFlowKeyAndRemoveHash(hashingKey);
+ if (flowKey == null) {
+ flowKey = searchInConfiguration(flowStat, trans);
+ if ( flowKey == null) {
+ flowKey = makeAlienFlowKey();
+ }
+ updateHashCache(trans,flowKey,hashingKey);
+ }
+ final FlowBuilder flowBuilder = new FlowBuilder(flowStat);
+ flowBuilder.setKey(flowKey);
+ addStatistics(flowBuilder, flowStat);
+ final InstanceIdentifier<Flow> flowIdent = tableRef.child(Flow.class, flowKey);
+ trans.put(LogicalDatastoreType.OPERATIONAL, flowIdent, flowBuilder.build());
+ /* check life for Alien flows */
+ if (flowKey.getId().getValue().startsWith(ALIEN_SYSTEM_FLOW_ID)) {
+ removeData(flowIdent, REMOVE_AFTER_MISSING_COLLECTION);
+ }
+ }
+
+ /* Build and deploy new FlowHashId map */
+ private void updateHashCache(final ReadWriteTransaction trans, final FlowKey flowKey, final FlowHashIdMapKey hashingKey) {
+ final FlowHashIdMapBuilder flHashIdMap = new FlowHashIdMapBuilder();
+ flHashIdMap.setFlowId(flowKey.getId());
+ flHashIdMap.setKey(hashingKey);
+ final KeyedInstanceIdentifier<FlowHashIdMap, FlowHashIdMapKey> flHashIdent = tableRef
+ .augmentation(FlowHashIdMapping.class).child(FlowHashIdMap.class, hashingKey);
+ /* Add new FlowHashIdMap */
+ trans.put(LogicalDatastoreType.OPERATIONAL, flHashIdent, flHashIdMap.build());
+ }
+
+ void removeUnreportedFlows(final ReadWriteTransaction tx) {
+ final InstanceIdentifier<Node> nodeIdent = tableRef.firstIdentifierOf(Node.class);
+ final List<InstanceIdentifier<Flow>> listMissingConfigFlows = notStatReportedConfigFlows();
+ final Map<InstanceIdentifier<Flow>, Integer> nodeDeleteMap = mapNodesForDelete.get(nodeIdent);
+ final Map<FlowHashIdMapKey, FlowId> listForRemove = getRemovalList();
+ for (final Entry<FlowHashIdMapKey, FlowId> entryForRemove : listForRemove.entrySet()) {
+ final FlowKey flowKey = new FlowKey(entryForRemove.getValue());
+ final InstanceIdentifier<Flow> flowRef = tableRef.child(Flow.class, flowKey);
+ if (nodeDeleteMap != null && flowKey.getId().getValue().startsWith(ALIEN_SYSTEM_FLOW_ID)) {
+ final Integer lifeIndex = nodeDeleteMap.get(flowRef);
+ if (lifeIndex > 0) {
+ break;
+ } else {
+ nodeDeleteMap.remove(flowRef);
+ }
+ } else {
+ if (listMissingConfigFlows.remove(flowRef)) {
+ break; // we probably lost some multipart msg
+ }
+ }
+ final InstanceIdentifier<FlowHashIdMap> flHashIdent =
+ tableRef.augmentation(FlowHashIdMapping.class).child(FlowHashIdMap.class, entryForRemove.getKey());
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flowRef);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, flHashIdent);
+ }
+ }
+
+ List<InstanceIdentifier<Flow>> notStatReportedConfigFlows() {
+ if (configFlows != null) {
+ final List<InstanceIdentifier<Flow>> returnList = new ArrayList<>(configFlows.size());
+ for (final Flow confFlow : configFlows) {
+ final InstanceIdentifier<Flow> confFlowIdent = tableRef.child(Flow.class, confFlow.getKey());
+ returnList.add(confFlowIdent);
+ }
+ return returnList;
+ }
+ return Collections.emptyList();
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupDescStatsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupFeaturesUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupStatisticsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.desc.GroupDescBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.statistics.GroupStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitGroup
+ * Class is a NotifyListener for GroupStatistics and DataChangeListener for Config/DataStore for Group node.
+ * All expected (registered) GroupStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Group in Operational/DS
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitGroup extends StatAbstractListenCommit<Group, OpendaylightGroupStatisticsListener>
+ implements OpendaylightGroupStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatListenCommitMeter.class);
+
+ public StatListenCommitGroup(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Group.class);
+ }
+
+ @Override
+ protected OpendaylightGroupStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Group> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Group.class);
+ }
+
+ @Override
+ public void onGroupDescStatsUpdated(final GroupDescStatsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - GroupDescStatsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final List<GroupDescStats> groupStats = notification.getGroupDescStats() != null
+ ? new ArrayList<>(notification.getGroupDescStats()) : new ArrayList<GroupDescStats>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (txContainer.isPresent()) {
+ final List<? extends TransactionAware> cacheNotifs =
+ txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof GroupDescStatsUpdated) {
+ groupStats.addAll(((GroupDescStatsUpdated) notif).getGroupDescStats());
+ }
+ }
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ statGroupDescCommit(groupStats, nodeIdent, tx);
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ });
+ }
+
+ @Override
+ public void onGroupFeaturesUpdated(final GroupFeaturesUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! txContainer.isPresent()) {
+ return;
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ notifyToCollectNextStatistics(nodeIdent);
+ final GroupFeatures stats = new GroupFeaturesBuilder(notification).build();
+ final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeIdent
+ .augmentation(NodeGroupFeatures.class).child(GroupFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onGroupStatisticsUpdated(final GroupStatisticsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - GroupStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final List<GroupStats> groupStats = notification.getGroupStats() != null
+ ? new ArrayList<>(notification.getGroupStats()) : new ArrayList<GroupStats>(10);
+ Optional<Group> notifGroup = Optional.absent();
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (txContainer.isPresent()) {
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Group) {
+ notifGroup = Optional.<Group> of((Group)inputObj.get());
+ }
+ final List<? extends TransactionAware> cacheNotifs =
+ txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof GroupStatisticsUpdated) {
+ groupStats.addAll(((GroupStatisticsUpdated) notif).getGroupStats());
+ }
+ }
+ }
+ final Optional<Group> group = notifGroup;
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ /* Notification for continue collecting statistics */
+ if ( ! group.isPresent()) {
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ statGroupCommit(groupStats, nodeIdent, group, tx);
+ }
+ });
+ }
+
+ private void statGroupCommit(final List<GroupStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
+ final Optional<Group> group, final ReadWriteTransaction trans) {
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ for (final GroupStats groupStat : groupStats) {
+ final GroupStatistics stats = new GroupStatisticsBuilder(groupStat).build();
+
+ final GroupKey groupKey = new GroupKey(groupStat.getGroupId());
+ final InstanceIdentifier<GroupStatistics> gsIdent = fNodeIdent
+ .child(Group.class,groupKey).augmentation(NodeGroupStatistics.class)
+ .child(GroupStatistics.class);
+ /* Statistics Writing */
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if (fNode.isPresent()) {
+ trans.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
+ }
+ }
+ }
+
+ private void statGroupDescCommit(final List<GroupDescStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction trans) {
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
+ final List<GroupKey> deviceGroupKeys = new ArrayList<>();
+
+ for (final GroupDescStats group : groupStats) {
+ if (group.getGroupId() != null) {
+ final GroupBuilder groupBuilder = new GroupBuilder(group);
+ final GroupKey groupKey = new GroupKey(group.getGroupId());
+ final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
+
+ final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
+ groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
+ //Update augmented data
+ groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
+ deviceGroupKeys.add(groupKey);
+ Optional<FlowCapableNode> hashIdUpd = Optional.absent();
+ try {
+ hashIdUpd = trans.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if (hashIdUpd.isPresent()) {
+ trans.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
+ }
+ }
+ }
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentNode(fNodeIdent, trans, deviceGroupKeys);
+ }
+
+ private void deleteAllNotPresentNode(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
+ final ReadWriteTransaction trans, final List<GroupKey> deviceGroupKeys) {
+
+ final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ return;
+ }
+ final List<Group> existGroups = fNode.get().getGroup() != null
+ ? fNode.get().getGroup() : Collections.<Group> emptyList();
+ /* Add all existed groups paths - no updated paths has to be removed */
+ for (final Group group : existGroups) {
+ if (deviceGroupKeys.remove(group.getKey())) {
+ break; // group still exist on device
+ }
+ LOG.trace("Group {} has to removed.", group);
+ final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, group.getKey());
+ Optional<Group> delGroup = Optional.absent();
+ try {
+ delGroup = trans.read(LogicalDatastoreType.OPERATIONAL, delGroupIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delGroup.isPresent()) {
+ trans.delete(LogicalDatastoreType.OPERATIONAL, delGroupIdent);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterConfigStatsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterFeaturesUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterStatisticsUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatListenCommitMeter
+ * Class is a NotifyListener for MeterStatistics and DataChangeListener for Config/DataStore for Meter node.
+ * All expected (registered) MeterStatistics will be builded and commit to Operational/DataStore.
+ * DataChangeEven should call create/delete Meter in Operational/DS
+ *
+ */
+public class StatListenCommitMeter extends StatAbstractListenCommit<Meter, OpendaylightMeterStatisticsListener>
+ implements OpendaylightMeterStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatListenCommitMeter.class);
+
+ public StatListenCommitMeter(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Meter.class);
+ }
+
+ @Override
+ protected InstanceIdentifier<Meter> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class)
+ .augmentation(FlowCapableNode.class).child(Meter.class);
+ }
+
+ @Override
+ protected OpendaylightMeterStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onMeterConfigStatsUpdated(final MeterConfigStatsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterConfigStatsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final List<MeterConfigStats> meterConfStat = notification.getMeterConfigStats() != null
+ ? new ArrayList<>(notification.getMeterConfigStats()) : new ArrayList<MeterConfigStats>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (txContainer.isPresent()) {
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof MeterConfigStatsUpdated) {
+ meterConfStat.addAll(((MeterConfigStatsUpdated) notif).getMeterConfigStats());
+ }
+ }
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent);
+ comitConfMeterStats(meterConfStat, nodeIdent, tx);
+ }
+ });
+ }
+
+ @Override
+ public void onMeterFeaturesUpdated(final MeterFeaturesUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! txContainer.isPresent()) {
+ return;
+ }
+ final MeterFeatures stats = new MeterFeaturesBuilder(notification).build();
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeIdent
+ .augmentation(NodeMeterFeatures.class).child(MeterFeatures.class);
+
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onMeterStatisticsUpdated(final MeterStatisticsUpdated notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - MeterStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final List<MeterStats> meterStat = notification.getMeterStats() != null
+ ? new ArrayList<>(notification.getMeterStats()) : new ArrayList<MeterStats>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (txContainer.isPresent()) {
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if (notif instanceof MeterConfigStatsUpdated) {
+ meterStat.addAll(((MeterStatisticsUpdated) notif).getMeterStats());
+ }
+ }
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ statMeterCommit(meterStat, nodeIdent, tx);
+ /* Notification for continue collecting statistics */
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ });
+ }
+
+ private void statMeterCommit(final List<MeterStats> meterStats,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ for (final MeterStats mStat : meterStats) {
+ final MeterStatistics stats = new MeterStatisticsBuilder(mStat).build();
+
+ final MeterKey mKey = new MeterKey(mStat.getMeterId());
+ final InstanceIdentifier<MeterStatistics> msIdent = fNodeIdent
+ .child(Meter.class, mKey).augmentation(NodeMeterStatistics.class)
+ .child(MeterStatistics.class);
+ /* Meter Statistics commit */
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if (fNode.isPresent()) {
+ trans.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
+ }
+ }
+ }
+
+ private void comitConfMeterStats(final List<MeterConfigStats> meterConfStat,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ final List<MeterKey> deviceMeterKeys = new ArrayList<>();
+
+ for (final MeterConfigStats meterConf : meterConfStat) {
+ final MeterBuilder meterBuilder = new MeterBuilder(meterConf);
+ if (meterConf.getMeterId() != null) {
+ final MeterKey meterKey = new MeterKey(meterConf.getMeterId());
+ meterBuilder.setKey(meterKey);
+ final InstanceIdentifier<Meter> meterRef = nodeIdent
+ .augmentation(FlowCapableNode.class).child(Meter.class,meterKey);
+ final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
+ meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterConf).build());
+ //Update augmented data
+ meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
+ deviceMeterKeys.add(meterKey);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if (fNode.isPresent()) {
+ trans.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
+ }
+ }
+ }
+ /* Delete all not presented Meter Nodes */
+ deleteAllNotPresentedNodes(fNodeIdent, trans, deviceMeterKeys);
+ }
+
+ private void deleteAllNotPresentedNodes(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
+ final ReadWriteTransaction trans, final List<MeterKey> deviceMeterKeys) {
+ /* Delete all not presented meters */
+ final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
+
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ return;
+ }
+ final List<Meter> existMeters = fNode.get().getMeter() != null
+ ? fNode.get().getMeter() : Collections.<Meter> emptyList();
+ /* Add all existed groups paths - no updated paths has to be removed */
+ for (final Meter meter : existMeters) {
+ if (deviceMeterKeys.remove(meter.getKey())) {
+ break; // Meter still exist on device
+ }
+ final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, meter.getKey());
+ Optional<Meter> delMeter = Optional.absent();
+ try {
+ delMeter = trans.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delMeter.isPresent()) {
+ trans.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.QueueStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.flow.capable.node.connector.queue.statistics.FlowCapableNodeConnectorQueueStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.flow.capable.node.connector.queue.statistics.FlowCapableNodeConnectorQueueStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitQueue
+ * Class is a NotifyListner for Queues Statistics
+ * All expected (registered) queueStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatListenCommitQueue extends StatAbstractListenCommit<Queue, OpendaylightQueueStatisticsListener>
+ implements OpendaylightQueueStatisticsListener {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatListenCommitQueue.class);
+
+ public StatListenCommitQueue(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService nps) {
+ super(manager, db, nps, Queue.class);
+ }
+
+ @Override
+ protected OpendaylightQueueStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ protected InstanceIdentifier<Queue> getWildCardedRegistrationPath() {
+ return InstanceIdentifier.create(Nodes.class).child(Node.class).child(NodeConnector.class)
+ .augmentation(FlowCapableNodeConnector.class).child(Queue.class);
+ }
+
+ @Override
+ public void onQueueStatisticsUpdate(final QueueStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - QueueStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ if (notification.isMoreReplies()) {
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ return;
+ }
+ final List<QueueIdAndStatisticsMap> queueStats = notification.getQueueIdAndStatisticsMap() != null
+ ? new ArrayList<>(notification.getQueueIdAndStatisticsMap()) : new ArrayList<QueueIdAndStatisticsMap>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (txContainer.isPresent()) {
+ final List<? extends TransactionAware> cachedNotifs =
+ txContainer.get().getNotifications();
+ for (final TransactionAware notif : cachedNotifs) {
+ if (notif instanceof QueueStatisticsUpdate) {
+ queueStats.addAll(((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap());
+ }
+ }
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ /* Queue statistics are small size and we are not able to change for OF cross controller
+ * - don't need to make are atomic */
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction trans) {
+ /* Notification for continue */
+ notifyToCollectNextStatistics(nodeIdent);
+ statQueueCommit(queueStats, nodeIdent, trans);
+ }
+ });
+ }
+
+ private void statQueueCommit(final List<QueueIdAndStatisticsMap> queueStats,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+
+ /* check exist FlowCapableNode and write statistics */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+
+ for (final QueueIdAndStatisticsMap queueEntry : queueStats) {
+ final FlowCapableNodeConnectorQueueStatistics statChild =
+ new FlowCapableNodeConnectorQueueStatisticsBuilder(queueEntry).build();
+ final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
+ new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
+ statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
+ final QueueKey qKey = new QueueKey(queueEntry.getQueueId());
+ final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = nodeIdent
+ .child(NodeConnector.class, new NodeConnectorKey(queueEntry.getNodeConnectorId()))
+ .augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, qKey).augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ trans.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FeatureCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityFlowStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityGroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityPortStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityQueueStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityTableStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNodeRegistrationImpl
+ * {@link FlowCapableNode} Registration Implementation contains two method for registration/unregistration
+ * {@link FeatureCapability} for every connect/disconnect {@link FlowCapableNode}. Process of connection/disconnection
+ * is substituted by listening Operation/DS for add/delete {@link FeatureCapability}.
+ * All statistic capabilities are reading from new Node directly without contacting device or DS.
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Aug 28, 2014
+ */
+public class StatNodeRegistrationImpl implements StatNodeRegistration {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatNodeRegistrationImpl.class);
+
+ private final StatisticsManager manager;
+ private ListenerRegistration<DataChangeListener> listenerRegistration;
+ private ListenerRegistration<?> notifListenerRegistration;
+
+ public StatNodeRegistrationImpl(final StatisticsManager manager, final DataBroker db,
+ final NotificationProviderService notificationService) {
+ this.manager = Preconditions.checkNotNull(manager, "StatisticManager can not be null!");
+ Preconditions.checkArgument(db != null, "DataBroker can not be null!");
+ Preconditions.checkArgument(notificationService != null, "NotificationProviderService can not be null!");
+ notifListenerRegistration = notificationService.registerNotificationListener(this);
+ }
+
+ @Override
+ public void close() throws Exception {
+
+ if (notifListenerRegistration != null) {
+ try {
+ notifListenerRegistration.close();
+ }
+ catch (final Exception e) {
+ LOG.warn("Error by stop FlowCapableNode Notification StatNodeRegistration.");
+ }
+ notifListenerRegistration = null;
+ }
+
+ if (listenerRegistration != null) {
+ try {
+ listenerRegistration.close();
+ } catch (final Exception e) {
+ LOG.warn("Error by stop FlowCapableNode DataChange StatListeningCommiter.", e);
+ }
+ listenerRegistration = null;
+ }
+ }
+
+ @Override
+ public void connectFlowCapableNode(final InstanceIdentifier<SwitchFeatures> keyIdent,
+ final SwitchFeatures data, final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkNotNull(keyIdent, "InstanceIdentifier can not be null!");
+ Preconditions.checkNotNull(data, "SwitchFeatures data for {} can not be null!", keyIdent);
+ Preconditions.checkArgument(( ! keyIdent.isWildcarded()), "InstanceIdentifier is WildCarded!");
+
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final List<StatCapabTypes> statCapabTypes = new ArrayList<>();
+ Short maxCapTables = Short.valueOf("1");
+
+ final List<Class<? extends FeatureCapability>> capabilities = data.getCapabilities() != null
+ ? data.getCapabilities() : Collections.<Class<? extends FeatureCapability>> emptyList();
+ for (final Class<? extends FeatureCapability> capability : capabilities) {
+ if (capability == FlowFeatureCapabilityTableStats.class) {
+ statCapabTypes.add(StatCapabTypes.TABLE_STATS);
+ } else if (capability == FlowFeatureCapabilityFlowStats.class) {
+ statCapabTypes.add(StatCapabTypes.FLOW_STATS);
+ } else if (capability == FlowFeatureCapabilityGroupStats.class) {
+ statCapabTypes.add(StatCapabTypes.GROUP_STATS);
+ } else if (capability == FlowFeatureCapabilityPortStats.class) {
+ statCapabTypes.add(StatCapabTypes.PORT_STATS);
+ } else if (capability == FlowFeatureCapabilityQueueStats.class) {
+ statCapabTypes.add(StatCapabTypes.QUEUE_STATS);
+ }
+ }
+ maxCapTables = data.getMaxTables();
+
+ final Optional<Short> maxTables = Optional.<Short> of(maxCapTables);
+
+ /* Meters management */
+ final InstanceIdentifier<NodeMeterFeatures> meterFeaturesIdent = nodeIdent.augmentation(NodeMeterFeatures.class);
+
+
+ Optional<NodeMeterFeatures> meterFeatures = Optional.absent();
+ try {
+ meterFeatures = tx.read(LogicalDatastoreType.OPERATIONAL, meterFeaturesIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.warn("Read NodeMeterFeatures {} fail!", meterFeaturesIdent, e);
+ }
+ if (meterFeatures.isPresent()) {
+ statCapabTypes.add(StatCapabTypes.METER_STATS);
+ }
+ manager.connectedNodeRegistration(nodeIdent,
+ Collections.unmodifiableList(statCapabTypes), maxTables.get());
+ }
+ });
+ }
+
+ @Override
+ public void disconnectFlowCapableNode(final InstanceIdentifier<Node> nodeIdent) {
+ Preconditions.checkArgument(nodeIdent != null, "InstanceIdentifier can not be NULL!");
+ Preconditions.checkArgument(( ! nodeIdent.isWildcarded()),
+ "InstanceIdentifier {} is WildCarded!", nodeIdent);
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction tx) {
+ manager.disconnectedNodeUnregistration(nodeIdent);
+ }
+ });
+ }
+
+
+ @Override
+ public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
+ // NOOP
+ }
+
+ @Override
+ public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
+ // NOOP
+ }
+
+ @Override
+ public void onNodeRemoved(final NodeRemoved notification) {
+ final NodeRef nodeRef = notification.getNodeRef();
+ final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
+ final InstanceIdentifier<Node> nodeIdent =
+ nodeRefIdent.firstIdentifierOf(Node.class);
+ if (nodeIdent != null) {
+ disconnectFlowCapableNode(nodeIdent);
+ }
+ }
+
+ @Override
+ public void onNodeUpdated(final NodeUpdated notification) {
+ final FlowCapableNodeUpdated newFlowNode =
+ notification.getAugmentation(FlowCapableNodeUpdated.class);
+ if (newFlowNode != null && newFlowNode.getSwitchFeatures() != null) {
+ final NodeRef nodeRef = notification.getNodeRef();
+ final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
+ final InstanceIdentifier<Node> nodeIdent =
+ nodeRefIdent.firstIdentifierOf(Node.class);
+
+ final InstanceIdentifier<SwitchFeatures> swichFeaturesIdent =
+ nodeIdent.augmentation(FlowCapableNode.class).child(SwitchFeatures.class);
+ final SwitchFeatures switchFeatures = newFlowNode.getSwitchFeatures();
+ connectFlowCapableNode(swichFeaturesIdent, switchFeatures, nodeIdent);
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.NodeConnectorStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.flow.capable.node.connector.statistics.FlowCapableNodeConnectorStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.flow.capable.node.connector.statistics.FlowCapableNodeConnectorStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitPort
+ * Class is a NotifyListener for PortStatistics
+ * All expected (registered) portStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatNotifyCommitPort extends StatAbstractNotifyCommit<OpendaylightPortStatisticsListener>
+ implements OpendaylightPortStatisticsListener {
+
+ private static final Logger LOG = LoggerFactory.getLogger(StatNotifyCommitPort.class);
+
+ public StatNotifyCommitPort(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ super(manager, nps);
+ }
+
+ @Override
+ protected OpendaylightPortStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onNodeConnectorStatisticsUpdate(final NodeConnectorStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - NodeConnectorStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction trans) {
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if (( ! txContainer.isPresent()) || txContainer.get().getNotifications() == null) {
+ return;
+ }
+ final List<NodeConnectorStatisticsAndPortNumberMap> portStats =
+ new ArrayList<NodeConnectorStatisticsAndPortNumberMap>(10);
+ final List<? extends TransactionAware> cachedNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cachedNotifs) {
+ if (notif instanceof NodeConnectorStatisticsUpdate) {
+ final List<NodeConnectorStatisticsAndPortNumberMap> notifStat =
+ ((NodeConnectorStatisticsUpdate) notif).getNodeConnectorStatisticsAndPortNumberMap();
+ if (notifStat != null) {
+ portStats.addAll(notifStat);
+ }
+ }
+ }
+ /* write stat to trans */
+ statPortCommit(portStats, nodeIdent, trans);
+ /* Notification for continue collecting statistics - Port statistics are still same size
+ * and they are small - don't need to wait for whole apply operation*/
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ });
+ }
+
+ private void statPortCommit(final List<NodeConnectorStatisticsAndPortNumberMap> portStats,
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ /* check exist FlowCapableNode and write statistics probable with parent */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+ for (final NodeConnectorStatisticsAndPortNumberMap nConnectPort : portStats) {
+ final FlowCapableNodeConnectorStatistics stats = new FlowCapableNodeConnectorStatisticsBuilder(nConnectPort).build();
+ final NodeConnectorKey key = new NodeConnectorKey(nConnectPort.getNodeConnectorId());
+ final InstanceIdentifier<NodeConnector> nodeConnectorIdent = nodeIdent.child(NodeConnector.class, key);
+ final InstanceIdentifier<FlowCapableNodeConnectorStatisticsData> nodeConnStatIdent = nodeConnectorIdent
+ .augmentation(FlowCapableNodeConnectorStatisticsData.class);
+ final InstanceIdentifier<FlowCapableNodeConnectorStatistics> flowCapNodeConnStatIdent =
+ nodeConnStatIdent.child(FlowCapableNodeConnectorStatistics.class);
+ Optional<NodeConnector> fNodeConector;
+ try {
+ fNodeConector = tx.read(LogicalDatastoreType.OPERATIONAL, nodeConnectorIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read NodeConnector {} in Operational/DS fail!", nodeConnectorIdent, e);
+ fNodeConector = Optional.absent();
+ }
+ if (fNodeConector.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeConnectorIdent, new NodeConnectorBuilder().setId(key.getId()).build());
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeConnStatIdent, new FlowCapableNodeConnectorStatisticsDataBuilder().build());
+ tx.put(LogicalDatastoreType.OPERATIONAL, flowCapNodeConnStatIdent, stats);
+ }
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsDataBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsUpdate;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.statistics.FlowTableStatisticsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatNotifyCommitTable
+ * Class is a NotifyListener for TableStatistics
+ * All expected (registered) tableStatistics will be builded and
+ * commit to Operational/DataStore
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatNotifyCommitTable extends StatAbstractNotifyCommit<OpendaylightFlowTableStatisticsListener>
+ implements OpendaylightFlowTableStatisticsListener {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatNotifyCommitTable.class);
+
+ public StatNotifyCommitTable(final StatisticsManager manager,
+ final NotificationProviderService nps) {
+ super(manager, nps);
+ }
+
+ @Override
+ protected OpendaylightFlowTableStatisticsListener getStatNotificationListener() {
+ return this;
+ }
+
+ @Override
+ public void onFlowTableStatisticsUpdate(final FlowTableStatisticsUpdate notification) {
+ final TransactionId transId = notification.getTransactionId();
+ final NodeId nodeId = notification.getId();
+ if ( ! isExpectedStatistics(transId, nodeId)) {
+ LOG.debug("STAT-MANAGER - FlowTableStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
+ return;
+ }
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
+ if (notification.isMoreReplies()) {
+ return;
+ }
+ /* Don't block RPC Notification thread */
+ manager.enqueue(new StatDataStoreOperation() {
+ @Override
+ public void applyOperation(final ReadWriteTransaction trans) {
+ final List<FlowTableAndStatisticsMap> tableStats = new ArrayList<FlowTableAndStatisticsMap>(10);
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+ if (( ! txContainer.isPresent()) || txContainer.get().getNodeId() == null) {
+ return;
+ }
+ final List<? extends TransactionAware> cachedNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cachedNotifs) {
+ if (notif instanceof FlowTableStatisticsUpdate) {
+ final List<FlowTableAndStatisticsMap> statNotif =
+ ((FlowTableStatisticsUpdate) notif).getFlowTableAndStatisticsMap();
+ if (statNotif != null) {
+ tableStats.addAll(statNotif);
+ }
+ }
+ }
+ /* write stat to trans */
+ statTableCommit(tableStats, nodeIdent, trans);
+ /* Notification for continue collecting statistics - Tables statistics are still same size
+ * and they are small - don't need to wait to whole apply operation */
+ notifyToCollectNextStatistics(nodeIdent);
+ }
+ });
+ }
+
+ private void statTableCommit(final List<FlowTableAndStatisticsMap> tableStats, final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction trans) {
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ /* check flow Capable Node and write statistics */
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ return;
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ return;
+ }
+ for (final FlowTableAndStatisticsMap tableStat : tableStats) {
+ final InstanceIdentifier<Table> tableIdent = fNodeIdent
+ .child(Table.class, new TableKey(tableStat.getTableId().getValue()));
+ final Table table = new TableBuilder().setId(tableStat.getTableId().getValue()).build();
+ trans.merge(LogicalDatastoreType.OPERATIONAL, tableIdent, table);
+ final InstanceIdentifier<FlowTableStatisticsData> tableStatIdent = tableIdent
+ .augmentation(FlowTableStatisticsData.class);
+ trans.merge(LogicalDatastoreType.OPERATIONAL, tableStatIdent, new FlowTableStatisticsDataBuilder().build());
+
+ final FlowTableStatistics stats = new FlowTableStatisticsBuilder(tableStat).build();
+ final InstanceIdentifier<FlowTableStatistics> tStatIdent = tableStatIdent.child(FlowTableStatistics.class);
+ trans.put(LogicalDatastoreType.OPERATIONAL, tStatIdent, stats);
+ }
+ }
+}
+
--- /dev/null
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatPermCollectorImpl
+ * Thread base statistic collector. Class holds internal map for all registered
+ * (means connected) nodes with List of Switch capabilities;
+ * Statistics collecting process get cross whole Network Device by device
+ * and statistic by statistic (follow Switch capabilities to prevent unnecessary
+ * ask) Next statistic start collecting by notification or by timeout.
+ *
+ * @author @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatPermCollectorImpl implements StatPermCollector {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatPermCollectorImpl.class);
+
+ private final static long STAT_COLLECT_TIME_OUT = 30000L;
+
+ private final ExecutorService statNetCollectorServ;
+ private final StatisticsManager manager;
+
+ private final int maxNodeForCollector;
+ private final long minReqNetInterval;
+ private final String name;
+
+ private final Object statCollectorLock = new Object();
+ private final Object statNodeHolderLock = new Object();
+
+ private Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNodeHolder =
+ Collections.<InstanceIdentifier<Node>, StatNodeInfoHolder> emptyMap();
+
+ private volatile boolean wakeMe = false;
+ private volatile boolean finishing = false;
+
+ public StatPermCollectorImpl(final StatisticsManager manager, final long minReqNetInterv, final int nr,
+ final int maxNodeForCollectors) {
+ this.manager = Preconditions.checkNotNull(manager, "StatisticsManager can not be null!");
+ name = "odl-stat-collector-" + nr;
+ minReqNetInterval = minReqNetInterv;
+ final ThreadFactory threadFact = new ThreadFactoryBuilder()
+ .setNameFormat(name + "-thread-%d").build();
+ statNetCollectorServ = Executors.newSingleThreadExecutor(threadFact);
+ maxNodeForCollector = maxNodeForCollectors;
+ LOG.trace("StatCollector {} start successfull!", name);
+ }
+
+ /**
+ * finish collecting statistics
+ */
+ @Override
+ public void close() {
+ statNodeHolder = Collections.<InstanceIdentifier<Node>, StatNodeInfoHolder> emptyMap();
+ finishing = true;
+ collectNextStatistics();
+ statNetCollectorServ.shutdown();
+ }
+
+ @Override
+ public boolean isProvidedFlowNodeActive(
+ final InstanceIdentifier<Node> flowNode) {
+ return statNodeHolder.containsKey(flowNode);
+ }
+
+ @Override
+ public boolean connectedNodeRegistration(final InstanceIdentifier<Node> ident,
+ final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
+ if (ident.isWildcarded()) {
+ LOG.warn("FlowCapableNode IstanceIdentifier {} registration can not be wildcarded!", ident);
+ } else {
+ if ( ! statNodeHolder.containsKey(ident)) {
+ synchronized (statNodeHolderLock) {
+ final boolean startStatCollecting = statNodeHolder.size() == 0;
+ if ( ! statNodeHolder.containsKey(ident)) {
+ if (statNodeHolder.size() >= maxNodeForCollector) {
+ return false;
+ }
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNode =
+ new HashMap<>(statNodeHolder);
+ final NodeRef nodeRef = new NodeRef(ident);
+ final StatNodeInfoHolder nodeInfoHolder = new StatNodeInfoHolder(nodeRef,
+ statTypes, nrOfSwitchTables);
+ statNode.put(ident, nodeInfoHolder);
+ statNodeHolder = Collections.unmodifiableMap(statNode);
+ }
+ if (startStatCollecting) {
+ finishing = false;
+ statNetCollectorServ.execute(this);
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean disconnectedNodeUnregistration(final InstanceIdentifier<Node> ident) {
+ if (ident.isWildcarded()) {
+ LOG.warn("FlowCapableNode IstanceIdentifier {} unregistration can not be wildcarded!", ident);
+ } else {
+ if (statNodeHolder.containsKey(ident)) {
+ synchronized (statNodeHolderLock) {
+ if (statNodeHolder.containsKey(ident)) {
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNode =
+ new HashMap<>(statNodeHolder);
+ statNode.remove(ident);
+ statNodeHolder = Collections.unmodifiableMap(statNode);
+ }
+ if (statNodeHolder.isEmpty()) {
+ finishing = true;
+ collectNextStatistics();
+ statNetCollectorServ.shutdown();
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void collectNextStatistics() {
+ if (wakeMe) {
+ synchronized (statCollectorLock) {
+ if (wakeMe) {
+ LOG.trace("STAT-COLLECTOR is notified to conntinue");
+ statCollectorLock.notify();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(5000);
+ }
+ catch (final InterruptedException e1) {
+ // NOOP
+ }
+ LOG.debug("StatCollector {} Start collecting!", name);
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ boolean collecting = false;
+ final long startTime = System.currentTimeMillis();
+
+ if ( ! statNodeHolder.isEmpty()) {
+ collecting = true;
+ collectStatCrossNetwork();
+ collecting = false;
+ }
+
+ if ( ! collecting) {
+ final long statFinalTime = System.currentTimeMillis() - startTime;
+ LOG.debug("STAT-MANAGER {}: last all NET statistics collection cost {} ms", name, statFinalTime);
+ if (statFinalTime < minReqNetInterval) {
+ LOG.trace("statCollector is about to make a collecting sleep");
+ synchronized (statCollectorLock) {
+ wakeMe = true;
+ try {
+ final long waitTime = minReqNetInterval - statFinalTime;
+ statCollectorLock.wait(waitTime);
+ LOG.trace("STAT-MANAGER : statCollector {} is waking up from a collecting sleep for {} ms", name, waitTime);
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted during collecting sleep", e);
+ } finally {
+ wakeMe = false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private void waitingForNotification() {
+ synchronized (statCollectorLock) {
+ wakeMe = true;
+ try {
+ statCollectorLock.wait(STAT_COLLECT_TIME_OUT);
+ LOG.trace("statCollector is waking up from a wait stat Response sleep");
+ } catch (final InterruptedException e) {
+ LOG.warn("statCollector has been interrupted waiting stat Response sleep", e);
+ } finally {
+ wakeMe = false;
+ }
+ }
+ }
+
+
+ private void collectStatCrossNetwork() {
+ for (final Entry<InstanceIdentifier<Node>, StatNodeInfoHolder> nodeEntity : statNodeHolder.entrySet()) {
+ final List<StatCapabTypes> listNeededStat = nodeEntity.getValue().getStatMarkers();
+ final NodeRef actualNodeRef = nodeEntity.getValue().getNodeRef();
+ final Short maxTables = nodeEntity.getValue().getMaxTables();
+ for (final StatCapabTypes statMarker : listNeededStat) {
+ if ( ! isProvidedFlowNodeActive(nodeEntity.getKey())) {
+ break;
+ }
+ switch (statMarker) {
+ case PORT_STATS:
+ LOG.trace("STAT-MANAGER-collecting PORT-STATS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getAllPortsStat(actualNodeRef);
+ waitingForNotification();
+ break;
+ case QUEUE_STATS:
+ LOG.trace("STAT-MANAGER-collecting QUEUE-STATS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getAllQueueStat(actualNodeRef);
+ waitingForNotification();
+ break;
+ case TABLE_STATS:
+ LOG.trace("STAT-MANAGER-collecting TABLE-STATS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getAllTablesStat(actualNodeRef);
+ waitingForNotification();
+ break;
+ case GROUP_STATS:
+ LOG.trace("STAT-MANAGER-collecting GROUP-STATS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getGroupFeaturesStat(actualNodeRef);
+ waitingForNotification();
+ manager.getRpcMsgManager().getAllGroupsConfStats(actualNodeRef);
+ waitingForNotification();
+ manager.getRpcMsgManager().getAllGroupsStat(actualNodeRef);
+ waitingForNotification();
+ break;
+ case METER_STATS:
+ LOG.trace("STAT-MANAGER-collecting METER-STATS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getMeterFeaturesStat(actualNodeRef);
+ waitingForNotification();
+ manager.getRpcMsgManager().getAllMeterConfigStat(actualNodeRef);
+ waitingForNotification();
+ manager.getRpcMsgManager().getAllMetersStat(actualNodeRef);
+ waitingForNotification();
+ break;
+ case FLOW_STATS:
+ LOG.trace("STAT-MANAGER-collecting FLOW-STATS-ALL_FLOWS for NodeRef {}", actualNodeRef);
+ manager.getRpcMsgManager().getAllFlowsStat(actualNodeRef);
+ waitingForNotification();
+ LOG.trace("STAT-MANAGER-collecting FLOW-AGGREGATE-STATS for NodeRef {}", actualNodeRef);
+ for (short i = 0; i < maxTables; i++) {
+ final TableId tableId = new TableId(i);
+ manager.getRpcMsgManager().getAggregateFlowStat(actualNodeRef, tableId);
+ }
+ break;
+ default:
+ /* Exception for programmers in implementation cycle */
+ throw new IllegalStateException("Not implemented ASK for " + statMarker);
+ }
+ }
+ }
+ }
+
+ private class StatNodeInfoHolder {
+ private final NodeRef nodeRef;
+ private final List<StatCapabTypes> statMarkers;
+ private final Short maxTables;
+
+ public StatNodeInfoHolder(final NodeRef nodeRef,
+ final List<StatCapabTypes> statMarkers, final Short maxTables) {
+ this.nodeRef = nodeRef;
+ this.maxTables = maxTables;
+ this.statMarkers = statMarkers;
+ }
+
+ public final NodeRef getNodeRef() {
+ return nodeRef;
+ }
+
+ public final List<StatCapabTypes> getStatMarkers() {
+ return statMarkers;
+ }
+
+ public final Short getMaxTables() {
+ return maxTables;
+ }
+ }
+
+ @Override
+ public boolean hasActiveNodes() {
+ return ( ! statNodeHolder.isEmpty());
+ }
+}
+
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.statistics.manager;
-
-import java.util.Collection;
+package org.opendaylight.controller.md.statistics.manager.impl;
import org.opendaylight.yangtools.yang.common.RpcError;
-final class RPCFailedException extends RuntimeException {
+import java.util.Collection;
+
+public final class StatRPCFailedException extends RuntimeException {
private static final long serialVersionUID = 1L;
private final Collection<RpcError> errors;
- public RPCFailedException(final String message, final Collection<RpcError> errors) {
+ public StatRPCFailedException(final String message, final Collection<RpcError> errors) {
super(message);
this.errors = errors;
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.SettableFuture;
+
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager.impl
+ *
+ * StatRpcMsgManagerImpl
+ * Class register and provide all RPC Statistics Device Services and implement pre-defined
+ * wrapped methods for prepare easy access to RPC Statistics Device Services like getAllStatisticsFor...
+ *
+ * In next Class implement process for joining multipart messages.
+ * Class internally use two WeakHashMap and GuavaCache for holding values for joining multipart msg.
+ * One Weak map is used for holding all Multipart Messages and second is used for possible input
+ * Config/DS light-weight DataObject (DataObject contains only necessary identification fields as
+ * TableId, GroupId, MeterId or for flow Match, Priority, FlowCookie, TableId and FlowId ...
+ *
+ * @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ */
+public class StatRpcMsgManagerImpl implements StatRpcMsgManager {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatRpcMsgManagerImpl.class);
+
+ private final Cache<String, TransactionCacheContainer<? super TransactionAware>> txCache;
+
+ private final long maxLifeForRequest = 50; /* 50 second */
+ private final int queueCapacity = 5000;
+
+ private final OpendaylightGroupStatisticsService groupStatsService;
+ private final OpendaylightMeterStatisticsService meterStatsService;
+ private final OpendaylightFlowStatisticsService flowStatsService;
+ private final OpendaylightPortStatisticsService portStatsService;
+ private final OpendaylightFlowTableStatisticsService flowTableStatsService;
+ private final OpendaylightQueueStatisticsService queueStatsService;
+
+ private BlockingQueue<RpcJobsQueue> statsRpcJobQueue;
+
+ private volatile boolean finishing = false;
+
+ public StatRpcMsgManagerImpl (final StatisticsManager manager,
+ final RpcConsumerRegistry rpcRegistry, final long minReqNetMonitInt) {
+ Preconditions.checkArgument(manager != null, "StatisticManager can not be null!");
+ Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
+ groupStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightGroupStatisticsService.class),
+ "OpendaylightGroupStatisticsService can not be null!");
+ meterStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightMeterStatisticsService.class),
+ "OpendaylightMeterStatisticsService can not be null!");
+ flowStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightFlowStatisticsService.class),
+ "OpendaylightFlowStatisticsService can not be null!");
+ portStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightPortStatisticsService.class),
+ "OpendaylightPortStatisticsService can not be null!");
+ flowTableStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightFlowTableStatisticsService.class),
+ "OpendaylightFlowTableStatisticsService can not be null!");
+ queueStatsService = Preconditions.checkNotNull(
+ rpcRegistry.getRpcService(OpendaylightQueueStatisticsService.class),
+ "OpendaylightQueueStatisticsService can not be null!");
+
+ statsRpcJobQueue = new LinkedBlockingQueue<>(queueCapacity);
+ txCache = CacheBuilder.newBuilder().expireAfterWrite(maxLifeForRequest, TimeUnit.SECONDS)
+ .maximumSize(10000).build();
+ }
+
+ @Override
+ public void close() {
+ finishing = true;
+ statsRpcJobQueue = null;
+ }
+
+ @Override
+ public void run() {
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ try {
+ statsRpcJobQueue.take().call();
+ }
+ catch (final Exception e) {
+ LOG.warn("Stat Element RPC executor fail!", e);
+ }
+ }
+ // Drain all rpcCall, making sure any blocked threads are unblocked
+ while ( ! statsRpcJobQueue.isEmpty()) {
+ statsRpcJobQueue.poll();
+ }
+ }
+
+ private void addGetAllStatJob(final RpcJobsQueue getAllStatJob) {
+ final boolean success = statsRpcJobQueue.offer(getAllStatJob);
+ if ( ! success) {
+ LOG.warn("Put RPC request getAllStat fail! Queue is full.");
+ }
+ }
+
+ private void addStatJob(final RpcJobsQueue getStatJob) {
+ final boolean success = statsRpcJobQueue.offer(getStatJob);
+ if ( ! success) {
+ LOG.debug("Put RPC request for getStat fail! Queue is full.");
+ }
+ }
+
+ @Override
+ public <T extends TransactionAware, D extends DataObject> void registrationRpcFutureCallBack(
+ final Future<RpcResult<T>> future, final D inputObj, final NodeRef nodeRef) {
+
+ Futures.addCallback(JdkFutureAdapters.listenInPoolThread(future),
+ new FutureCallback<RpcResult<? extends TransactionAware>>() {
+
+ @Override
+ public void onSuccess(final RpcResult<? extends TransactionAware> result) {
+ final TransactionId id = result.getResult().getTransactionId();
+ if (id == null) {
+ LOG.warn("No protocol support");
+ } else {
+ final NodeKey nodeKey = nodeRef.getValue().firstKeyOf(Node.class, NodeKey.class);
+ final String cacheKey = buildCacheKey(id, nodeKey.getId());
+ final TransactionCacheContainer<? super TransactionAware> container =
+ new TransactionCacheContainerImpl<>(id, inputObj, nodeKey.getId());
+ txCache.put(cacheKey, container);
+ }
+ }
+
+ @Override
+ public void onFailure(final Throwable t) {
+ LOG.warn("Response Registration for Statistics RPC call fail!", t);
+ }
+
+ });
+ }
+
+ private String buildCacheKey(final TransactionId id, final NodeId nodeId) {
+ return String.valueOf(id.getValue()) + "-" + nodeId.getValue();
+ }
+
+ @Override
+ public Future<Optional<TransactionCacheContainer<?>>> getTransactionCacheContainer(
+ final TransactionId id, final NodeId nodeId) {
+ Preconditions.checkArgument(id != null, "TransactionId can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final String key = buildCacheKey(id, nodeId);
+ final SettableFuture<Optional<TransactionCacheContainer<?>>> result = SettableFuture.create();
+
+ final RpcJobsQueue getTransactionCacheContainer = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final Optional<TransactionCacheContainer<?>> resultContainer =
+ Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
+ if (resultContainer.isPresent()) {
+ txCache.invalidate(key);
+ }
+ result.set(resultContainer);
+ return null;
+ }
+ };
+ addStatJob(getTransactionCacheContainer);
+ return result;
+ }
+
+ @Override
+ public Future<Boolean> isExpectedStatistics(final TransactionId id, final NodeId nodeId) {
+ Preconditions.checkArgument(id != null, "TransactionId can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final String key = buildCacheKey(id, nodeId);
+ final SettableFuture<Boolean> checkStatId = SettableFuture.create();
+
+ final RpcJobsQueue isExpecedStatistics = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final Optional<TransactionCacheContainer<?>> result =
+ Optional.<TransactionCacheContainer<?>> fromNullable(txCache.getIfPresent(key));
+ checkStatId.set(Boolean.valueOf(result.isPresent()));
+ return null;
+ }
+ };
+ addStatJob(isExpecedStatistics);
+ return checkStatId;
+ }
+
+ @Override
+ public void addNotification(final TransactionAware notification, final NodeId nodeId) {
+ Preconditions.checkArgument(notification != null, "TransactionAware can not be null!");
+ Preconditions.checkArgument(nodeId != null, "NodeId can not be null!");
+
+ final RpcJobsQueue addNotification = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final TransactionId txId = notification.getTransactionId();
+ final String key = buildCacheKey(txId, nodeId);
+ final TransactionCacheContainer<? super TransactionAware> container = (txCache.getIfPresent(key));
+ if (container != null) {
+ container.addNotif(notification);
+ }
+ return null;
+ }
+ };
+ addStatJob(addNotification);
+ }
+
+ @Override
+ public void getAllGroupsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllGroupStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllGroupStatisticsInputBuilder builder =
+ new GetAllGroupStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService
+ .getAllGroupStatistics(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllGroupStat);
+ }
+
+ @Override
+ public void getAllMetersStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllMeterStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllMeterStatisticsInputBuilder builder =
+ new GetAllMeterStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService
+ .getAllMeterStatistics(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllMeterStat);
+ }
+
+ @Override
+ public void getAllFlowsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllFlowStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllFlowsStatisticsFromAllFlowTablesInputBuilder builder =
+ new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(flowStatsService
+ .getAllFlowsStatisticsFromAllFlowTables(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllFlowStat);
+ }
+
+ @Override
+ public void getAggregateFlowStat(final NodeRef nodeRef, final TableId tableId) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ Preconditions.checkArgument(tableId != null, "TableId can not be null!");
+ final RpcJobsQueue getAggregateFlowStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder builder =
+ new GetAggregateFlowStatisticsFromFlowTableForAllFlowsInputBuilder();
+ builder.setNode(nodeRef);
+ builder.setTableId(tableId);
+
+ final TableBuilder tbuilder = new TableBuilder();
+ tbuilder.setId(tableId.getValue());
+ tbuilder.setKey(new TableKey(tableId.getValue()));
+ registrationRpcFutureCallBack(flowStatsService
+ .getAggregateFlowStatisticsFromFlowTableForAllFlows(builder.build()), tbuilder.build(), nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAggregateFlowStat);
+ }
+
+ @Override
+ public void getAllPortsStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllPortsStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllNodeConnectorsStatisticsInputBuilder builder =
+ new GetAllNodeConnectorsStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(portStatsService
+ .getAllNodeConnectorsStatistics(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllPortsStat);
+ }
+
+ @Override
+ public void getAllTablesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllTableStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetFlowTablesStatisticsInputBuilder builder =
+ new GetFlowTablesStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(flowTableStatsService
+ .getFlowTablesStatistics(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllTableStat);
+ }
+
+ @Override
+ public void getAllQueueStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllQueueStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllQueuesStatisticsFromAllPortsInputBuilder builder =
+ new GetAllQueuesStatisticsFromAllPortsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(queueStatsService
+ .getAllQueuesStatisticsFromAllPorts(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllQueueStat);
+ }
+
+ @Override
+ public void getAllMeterConfigStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue qetAllMeterConfStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ final GetAllMeterConfigStatisticsInputBuilder builder =
+ new GetAllMeterConfigStatisticsInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService
+ .getAllMeterConfigStatistics(builder.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addGetAllStatJob(qetAllMeterConfStat);
+ }
+
+ @Override
+ public void getGroupFeaturesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getGroupFeaturesStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ /* RPC input */
+ final GetGroupFeaturesInputBuilder input = new GetGroupFeaturesInputBuilder();
+ input.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService.getGroupFeatures(input.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addStatJob(getGroupFeaturesStat);
+ }
+
+ @Override
+ public void getMeterFeaturesStat(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getMeterFeaturesStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ /* RPC input */
+ final GetMeterFeaturesInputBuilder input = new GetMeterFeaturesInputBuilder();
+ input.setNode(nodeRef);
+ registrationRpcFutureCallBack(meterStatsService.getMeterFeatures(input.build()), null, nodeRef);
+ return null;
+ }
+ };
+ addStatJob(getMeterFeaturesStat);
+ }
+
+ @Override
+ public void getAllGroupsConfStats(final NodeRef nodeRef) {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final RpcJobsQueue getAllGropConfStat = new RpcJobsQueue() {
+
+ @Override
+ public Void call() throws Exception {
+ Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
+ final GetGroupDescriptionInputBuilder builder =
+ new GetGroupDescriptionInputBuilder();
+ builder.setNode(nodeRef);
+ registrationRpcFutureCallBack(groupStatsService
+ .getGroupDescription(builder.build()), null, nodeRef);
+
+ return null;
+ }
+ };
+ addGetAllStatJob(getAllGropConfStat);
+ }
+
+ public class TransactionCacheContainerImpl<T extends TransactionAware> implements TransactionCacheContainer<T> {
+
+ private final TransactionId id;
+ private final NodeId nId;
+ private final List<T> notifications;
+ private final Optional<? extends DataObject> confInput;
+
+ public <D extends DataObject> TransactionCacheContainerImpl (final TransactionId id, final D input, final NodeId nodeId) {
+ this.id = Preconditions.checkNotNull(id, "TransactionId can not be null!");
+ notifications = new CopyOnWriteArrayList<T>();
+ confInput = Optional.fromNullable(input);
+ nId = nodeId;
+ }
+
+ @Override
+ public void addNotif(final T notif) {
+ notifications.add(notif);
+ }
+
+ @Override
+ public TransactionId getId() {
+ return id;
+ }
+
+ @Override
+ public NodeId getNodeId() {
+ return nId;
+ }
+
+ @Override
+ public List<T> getNotifications() {
+ return notifications;
+ }
+
+ @Override
+ public Optional<? extends DataObject> getConfInput() {
+ return confInput;
+ }
+ }
+}
+
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ThreadFactory;
+
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
+import org.opendaylight.controller.md.statistics.manager.StatListeningCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
+import org.opendaylight.controller.md.statistics.manager.StatNotifyCommiter;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
+import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsListener;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+* statistics-manager
+* org.opendaylight.controller.md.statistics.manager.impl
+*
+* StatisticsManagerImpl
+* It represent a central point for whole module. Implementation
+* {@link StatisticsManager} registers all Operation/DS {@link StatNotifyCommiter} and
+* Config/DS {@StatListeningCommiter}, as well as {@link StatPermCollector}
+* for statistic collecting and {@link StatRpcMsgManager} as Device RPCs provider.
+* In next, StatisticsManager provides all DS contact Transaction services.
+*
+* @author avishnoi@in.ibm.com <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+*
+*/
+public class StatisticsManagerImpl implements StatisticsManager, Runnable {
+
+ private final static Logger LOG = LoggerFactory.getLogger(StatisticsManagerImpl.class);
+
+ private static final int QUEUE_DEPTH = 5000;
+ private static final int MAX_BATCH = 100;
+
+ private final BlockingQueue<StatDataStoreOperation> dataStoreOperQueue = new LinkedBlockingDeque<>(QUEUE_DEPTH);
+
+ private final DataBroker dataBroker;
+ private final int maxNodesForCollectors;
+ private long minReqNetMonitInt;
+ private final ExecutorService statRpcMsgManagerExecutor;
+ private final ExecutorService statDataStoreOperationServ;
+ private StatRpcMsgManager rpcMsgManager;
+ private List<StatPermCollector> statCollectors;
+ private final Object statCollectorLock = new Object();
+ private BindingTransactionChain txChain;
+ private volatile boolean finishing = false;
+
+ private StatNodeRegistration nodeRegistrator;
+ private StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> flowListeningCommiter;
+ private StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> meterListeningCommiter;
+ private StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> groupListeningCommiter;
+ private StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> queueNotifyCommiter;
+ private StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> tableNotifCommiter;
+ private StatNotifyCommiter<OpendaylightPortStatisticsListener> portNotifyCommiter;
+
+ public StatisticsManagerImpl (final DataBroker dataBroker, final int maxNodesForCollector) {
+ this.dataBroker = Preconditions.checkNotNull(dataBroker, "DataBroker can not be null!");
+ ThreadFactory threadFact;
+ threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-rpc-oper-thread-%d").build();
+ statRpcMsgManagerExecutor = Executors.newSingleThreadExecutor(threadFact);
+ threadFact = new ThreadFactoryBuilder().setNameFormat("odl-stat-ds-oper-thread-%d").build();
+ statDataStoreOperationServ = Executors.newSingleThreadExecutor(threadFact);
+ maxNodesForCollectors = maxNodesForCollector;
+ txChain = dataBroker.createTransactionChain(this);
+ }
+
+ @Override
+ public void start(final NotificationProviderService notifService,
+ final RpcConsumerRegistry rpcRegistry, final long minReqNetMonitInt) {
+ Preconditions.checkArgument(rpcRegistry != null, "RpcConsumerRegistry can not be null !");
+ this.minReqNetMonitInt = minReqNetMonitInt;
+ rpcMsgManager = new StatRpcMsgManagerImpl(this, rpcRegistry, minReqNetMonitInt);
+ statCollectors = Collections.emptyList();
+ nodeRegistrator = new StatNodeRegistrationImpl(this, dataBroker, notifService);
+ flowListeningCommiter = new StatListenCommitFlow(this, dataBroker, notifService);
+ meterListeningCommiter = new StatListenCommitMeter(this, dataBroker, notifService);
+ groupListeningCommiter = new StatListenCommitGroup(this, dataBroker, notifService);
+ tableNotifCommiter = new StatNotifyCommitTable(this, notifService);
+ portNotifyCommiter = new StatNotifyCommitPort(this, notifService);
+ queueNotifyCommiter = new StatListenCommitQueue(this, dataBroker, notifService);
+
+ statRpcMsgManagerExecutor.execute(rpcMsgManager);
+ statDataStoreOperationServ.execute(this);
+ LOG.info("Statistics Manager started successfully!");
+ }
+
+ @Override
+ public void close() throws Exception {
+ finishing = true;
+ if (nodeRegistrator != null) {
+ nodeRegistrator.close();
+ nodeRegistrator = null;
+ }
+ if (flowListeningCommiter != null) {
+ flowListeningCommiter.close();
+ flowListeningCommiter = null;
+ }
+ if (meterListeningCommiter != null) {
+ meterListeningCommiter.close();
+ meterListeningCommiter = null;
+ }
+ if (groupListeningCommiter != null) {
+ groupListeningCommiter.close();
+ groupListeningCommiter = null;
+ }
+ if (tableNotifCommiter != null) {
+ tableNotifCommiter.close();
+ tableNotifCommiter = null;
+ }
+ if (portNotifyCommiter != null) {
+ portNotifyCommiter.close();
+ portNotifyCommiter = null;
+ }
+ if (queueNotifyCommiter != null) {
+ queueNotifyCommiter.close();
+ queueNotifyCommiter = null;
+ }
+ if (statCollectors != null) {
+ for (StatPermCollector collector : statCollectors) {
+ collector.close();
+ collector = null;
+ }
+ statCollectors = null;
+ }
+ if (rpcMsgManager != null) {
+ rpcMsgManager.close();
+ rpcMsgManager = null;
+ }
+ statRpcMsgManagerExecutor.shutdown();
+ statDataStoreOperationServ.shutdown();
+ if (txChain != null) {
+ txChain.close();
+ txChain = null;
+ }
+ }
+
+ @Override
+ public void enqueue(final StatDataStoreOperation op) {
+ // we don't need to block anything - next statistics come soon
+ final boolean success = dataStoreOperQueue.offer(op);
+ if ( ! success) {
+ LOG.debug("Stat DS/Operational submiter Queue is full!");
+ }
+ }
+
+ @Override
+ public void run() {
+ /* Neverending cyle - wait for finishing */
+ while ( ! finishing) {
+ try {
+ StatDataStoreOperation op = dataStoreOperQueue.take();
+ final ReadWriteTransaction tx = txChain.newReadWriteTransaction();
+ LOG.trace("New operations available, starting transaction {}", tx.getIdentifier());
+
+ int ops = 0;
+ do {
+ op.applyOperation(tx);
+
+ ops++;
+ if (ops < MAX_BATCH) {
+ op = dataStoreOperQueue.poll();
+ } else {
+ op = null;
+ }
+ } while (op != null);
+
+ LOG.trace("Processed {} operations, submitting transaction {}", ops, tx.getIdentifier());
+
+ tx.submit().checkedGet();
+ } catch (final InterruptedException e) {
+ LOG.warn("Stat Manager DS Operation thread interupted!", e);
+ finishing = true;
+ } catch (final Exception e) {
+ LOG.warn("Unhandled exception during processing statistics. Restarting transaction chain.", e);
+ txChain.close();
+ txChain = dataBroker.createTransactionChain(StatisticsManagerImpl.this);
+ cleanDataStoreOperQueue();
+ }
+ }
+ // Drain all events, making sure any blocked threads are unblocked
+ cleanDataStoreOperQueue();
+ }
+
+ private synchronized void cleanDataStoreOperQueue() {
+ // Drain all events, making sure any blocked threads are unblocked
+ while (! dataStoreOperQueue.isEmpty()) {
+ dataStoreOperQueue.poll();
+ }
+ }
+
+ @Override
+ public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
+ final Throwable cause) {
+ LOG.warn("Failed to export Flow Capable Statistics, Transaction {} failed.",transaction.getIdentifier(),cause);
+ }
+
+ @Override
+ public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
+ // NOOP
+ }
+
+ @Override
+ public boolean isProvidedFlowNodeActive(final InstanceIdentifier<Node> nodeIdent) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.isProvidedFlowNodeActive(nodeIdent)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void collectNextStatistics(final InstanceIdentifier<Node> nodeIdent) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.isProvidedFlowNodeActive(nodeIdent)) {
+ collector.collectNextStatistics();
+ }
+ }
+ }
+
+ @Override
+ public void connectedNodeRegistration(final InstanceIdentifier<Node> nodeIdent,
+ final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables)) {
+ return;
+ }
+ }
+ synchronized (statCollectorLock) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables)) {
+ return;
+ }
+ }
+ final StatPermCollectorImpl newCollector = new StatPermCollectorImpl(this,
+ minReqNetMonitInt, statCollectors.size() + 1, maxNodesForCollectors);
+ final List<StatPermCollector> statCollectorsNew = new ArrayList<>(statCollectors);
+ newCollector.connectedNodeRegistration(nodeIdent, statTypes, nrOfSwitchTables);
+ statCollectorsNew.add(newCollector);
+ statCollectors = Collections.unmodifiableList(statCollectorsNew);
+ }
+ }
+
+ @Override
+ public void disconnectedNodeUnregistration(final InstanceIdentifier<Node> nodeIdent) {
+ flowListeningCommiter.cleanForDisconnect(nodeIdent);
+
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.disconnectedNodeUnregistration(nodeIdent)) {
+ if ( ! collector.hasActiveNodes()) {
+ synchronized (statCollectorLock) {
+ if (collector.hasActiveNodes()) {
+ return;
+ }
+ final List<StatPermCollector> newStatColl =
+ new ArrayList<>(statCollectors);
+ newStatColl.remove(collector);
+ statCollectors = Collections.unmodifiableList(newStatColl);
+ }
+ }
+ return;
+ }
+ }
+ LOG.debug("Node {} has not removed.", nodeIdent);
+ }
+
+ /* Getter internal Statistic Manager Job Classes */
+ @Override
+ public StatRpcMsgManager getRpcMsgManager() {
+ return rpcMsgManager;
+ }
+
+ @Override
+ public StatNodeRegistration getNodeRegistrator() {
+ return nodeRegistrator;
+ }
+
+ @Override
+ public StatListeningCommiter<Flow, OpendaylightFlowStatisticsListener> getFlowListenComit() {
+ return flowListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Meter, OpendaylightMeterStatisticsListener> getMeterListenCommit() {
+ return meterListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Group, OpendaylightGroupStatisticsListener> getGroupListenCommit() {
+ return groupListeningCommiter;
+ }
+
+ @Override
+ public StatListeningCommiter<Queue, OpendaylightQueueStatisticsListener> getQueueNotifyCommit() {
+ return queueNotifyCommiter;
+ }
+
+
+ @Override
+ public StatNotifyCommiter<OpendaylightFlowTableStatisticsListener> getTableNotifCommit() {
+ return tableNotifCommiter;
+ }
+
+ @Override
+ public StatNotifyCommiter<OpendaylightPortStatisticsListener> getPortNotifyCommit() {
+ return portNotifyCommiter;
+ }
+}
+
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.statistics.manager;
+package org.opendaylight.controller.md.statistics.manager.impl.helper;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
import java.net.Inet4Address;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Ipv4Prefix;
import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.MacAddress;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.MacAddressFilter;
import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.EthernetMatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-
/**
* Utility class for comparing flows.
*/
-final class FlowComparator {
- private final static Logger logger = LoggerFactory.getLogger(FlowComparator.class);
+public final class FlowComparator {
+ private final static Logger LOG = LoggerFactory.getLogger(FlowComparator.class);
private FlowComparator() {
-
+ throw new UnsupportedOperationException("Utilities class should not be instantiated");
}
- public static boolean flowEquals(Flow statsFlow, Flow storedFlow) {
+ public static boolean flowEquals(final Flow statsFlow, final Flow storedFlow) {
if (statsFlow == null || storedFlow == null) {
return false;
}
* @param storedFlow
* @return
*/
- public static boolean matchEquals(Match statsFlow, Match storedFlow) {
+ public static boolean matchEquals(final Match statsFlow, final Match storedFlow) {
if (statsFlow == storedFlow) {
return true;
}
* statistic data, openflow driver library returns AA:BB:CC:DD:EE:FF and default eqauls fails here.
*/
@VisibleForTesting
- static boolean ethernetMatchEquals(EthernetMatch statsEthernetMatch, EthernetMatch storedEthernetMatch){
+ static boolean ethernetMatchEquals(final EthernetMatch statsEthernetMatch, final EthernetMatch storedEthernetMatch){
boolean verdict = true;
- Boolean checkNullValues = checkNullValues(statsEthernetMatch, storedEthernetMatch);
+ final Boolean checkNullValues = checkNullValues(statsEthernetMatch, storedEthernetMatch);
if (checkNullValues != null) {
verdict = checkNullValues;
} else {
return verdict;
}
- private static boolean ethernetMatchFieldsEquals(MacAddressFilter statsEthernetMatchFields,
- MacAddressFilter storedEthernetMatchFields){
+ private static boolean ethernetMatchFieldsEquals(final MacAddressFilter statsEthernetMatchFields,
+ final MacAddressFilter storedEthernetMatchFields){
boolean verdict = true;
- Boolean checkNullValues = checkNullValues(statsEthernetMatchFields, storedEthernetMatchFields);
+ final Boolean checkNullValues = checkNullValues(statsEthernetMatchFields, storedEthernetMatchFields);
if (checkNullValues != null) {
verdict = checkNullValues;
} else {
return verdict;
}
- private static boolean macAddressEquals(MacAddress statsMacAddress, MacAddress storedMacAddress){
+ private static boolean macAddressEquals(final MacAddress statsMacAddress, final MacAddress storedMacAddress){
boolean verdict = true;
- Boolean checkNullValues = checkNullValues(statsMacAddress, storedMacAddress);
+ final Boolean checkNullValues = checkNullValues(statsMacAddress, storedMacAddress);
if (checkNullValues != null) {
verdict = checkNullValues;
} else {
}
@VisibleForTesting
- static boolean layer3MatchEquals(Layer3Match statsLayer3Match, Layer3Match storedLayer3Match){
+ static boolean layer3MatchEquals(final Layer3Match statsLayer3Match, final Layer3Match storedLayer3Match){
boolean verdict = true;
if(statsLayer3Match instanceof Ipv4Match && storedLayer3Match instanceof Ipv4Match){
- Ipv4Match statsIpv4Match = (Ipv4Match)statsLayer3Match;
- Ipv4Match storedIpv4Match = (Ipv4Match)storedLayer3Match;
+ final Ipv4Match statsIpv4Match = (Ipv4Match)statsLayer3Match;
+ final Ipv4Match storedIpv4Match = (Ipv4Match)storedLayer3Match;
if (verdict) {
verdict = compareNullSafe(
statsIpv4Match.getIpv4Source(), storedIpv4Match.getIpv4Source());
}
} else {
- Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
+ final Boolean nullCheckOut = checkNullValues(storedLayer3Match, statsLayer3Match);
if (nullCheckOut != null) {
verdict = nullCheckOut;
} else {
return verdict;
}
- private static boolean compareNullSafe(Ipv4Prefix statsIpv4, Ipv4Prefix storedIpv4) {
+ private static boolean compareNullSafe(final Ipv4Prefix statsIpv4, final Ipv4Prefix storedIpv4) {
boolean verdict = true;
- Boolean checkDestNullValuesOut = checkNullValues(storedIpv4, statsIpv4);
+ final Boolean checkDestNullValuesOut = checkNullValues(storedIpv4, statsIpv4);
if (checkDestNullValuesOut != null) {
verdict = checkDestNullValuesOut;
} else if(!IpAddressEquals(statsIpv4, storedIpv4)){
return verdict;
}
- private static Boolean checkNullValues(Object v1, Object v2) {
+ private static Boolean checkNullValues(final Object v1, final Object v2) {
Boolean verdict = null;
if (v1 == null && v2 != null) {
verdict = Boolean.FALSE;
* @param storedIpAddress
* @return true if IPv4prefixes equals
*/
- private static boolean IpAddressEquals(Ipv4Prefix statsIpAddress, Ipv4Prefix storedIpAddress) {
- IntegerIpAddress statsIpAddressInt = StrIpToIntIp(statsIpAddress.getValue());
- IntegerIpAddress storedIpAddressInt = StrIpToIntIp(storedIpAddress.getValue());
+ private static boolean IpAddressEquals(final Ipv4Prefix statsIpAddress, final Ipv4Prefix storedIpAddress) {
+ final IntegerIpAddress statsIpAddressInt = StrIpToIntIp(statsIpAddress.getValue());
+ final IntegerIpAddress storedIpAddressInt = StrIpToIntIp(storedIpAddress.getValue());
if(IpAndMaskBasedMatch(statsIpAddressInt,storedIpAddressInt)){
return true;
return false;
}
- private static boolean IpAndMaskBasedMatch(IntegerIpAddress statsIpAddressInt,IntegerIpAddress storedIpAddressInt){
+ private static boolean IpAndMaskBasedMatch(final IntegerIpAddress statsIpAddressInt,final IntegerIpAddress storedIpAddressInt){
return ((statsIpAddressInt.getIp() & statsIpAddressInt.getMask()) == (storedIpAddressInt.getIp() & storedIpAddressInt.getMask()));
}
- private static boolean IpBasedMatch(IntegerIpAddress statsIpAddressInt,IntegerIpAddress storedIpAddressInt){
+ private static boolean IpBasedMatch(final IntegerIpAddress statsIpAddressInt,final IntegerIpAddress storedIpAddressInt){
return (statsIpAddressInt.getIp() == storedIpAddressInt.getIp());
}
* Method return integer version of ip address. Converted int will be mask if
* mask specified
*/
- private static IntegerIpAddress StrIpToIntIp(String ipAddresss){
+ private static IntegerIpAddress StrIpToIntIp(final String ipAddresss){
- String[] parts = ipAddresss.split("/");
- String ip = parts[0];
+ final String[] parts = ipAddresss.split("/");
+ final String ip = parts[0];
int prefix;
if (parts.length < 2) {
}
IntegerIpAddress integerIpAddress = null;
- try {
- Inet4Address addr = (Inet4Address) InetAddress.getByName(ip);
- byte[] addrBytes = addr.getAddress();
- int ipInt = ((addrBytes[0] & 0xFF) << 24) |
+
+ final Inet4Address addr = ((Inet4Address) InetAddresses.forString(ip));
+ final byte[] addrBytes = addr.getAddress();
+ final int ipInt = ((addrBytes[0] & 0xFF) << 24) |
((addrBytes[1] & 0xFF) << 16) |
((addrBytes[2] & 0xFF) << 8) |
((addrBytes[3] & 0xFF) << 0);
- int mask = 0xffffffff << 32 - prefix;
+ // FIXME: Is this valid?
+ final int mask = 0xffffffff << 32 - prefix;
integerIpAddress = new IntegerIpAddress(ipInt, mask);
- } catch (UnknownHostException e){
- logger.error("Failed to determine host IP address by name: {}", e.getMessage(), e);
- }
+
return integerIpAddress;
}
private static class IntegerIpAddress{
int ip;
int mask;
- public IntegerIpAddress(int ip, int mask) {
+ public IntegerIpAddress(final int ip, final int mask) {
this.ip = ip;
this.mask = mask;
}
--- /dev/null
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.statistics.manager;
+
+/**
+ * statistics-manager
+ * org.opendaylight.controller.md.statistics.manager
+ *
+ *
+ *
+ * @author <a href="mailto:vdemcak@cisco.com">Vaclav Demcak</a>
+ *
+ * Created: Sep 6, 2014
+ */
+public class StatisticsManagerProvider {
+
+ private final StatisticsManagerActivator activator;
+
+ public StatisticsManagerProvider(final StatisticsManagerActivator activator) {
+ this.activator = activator;
+ }
+
+ /**
+ * Method provides Initialized {@link StatisticsManager}
+ * from {@link StatisticsManagerActivator} for all tests
+ * suites;
+ *
+ * @return
+ */
+ public StatisticsManager getStatisticsManager() {
+ return activator.getStatisticManager();
+ }
+}
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
-package org.opendaylight.controller.md.statistics.manager;
+package org.opendaylight.controller.md.statistics.manager.impl.helper;
import org.junit.Assert;
import org.junit.Test;
*/
@Test
public void testLayer3MatchEquals() {
- String[][][] matchSeeds = new String[][][] {
+ final String[][][] matchSeeds = new String[][][] {
{{"10.1.2.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
{{"10.1.2.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.1.0/24"}},
{{"10.1.1.0/24", "10.1.2.0/24"}, {"10.1.2.0/24", "10.1.2.0/24"}},
{{null, null}, {null, null}},
};
- boolean[] matches = new boolean[] {
+ final boolean[] matches = new boolean[] {
true,
false,
false,
* @param matches expected match output
*
*/
- private static void checkComparisonOfL3Match(String m1Source, String m1Destination,
- String m2Source, String msDestination, boolean matches) {
- Ipv4Match m1Layer3 = prepareIPv4Match(m1Source, m1Destination);
- Ipv4Match m2Layer3 = prepareIPv4Match(m2Source, msDestination);
+ private static void checkComparisonOfL3Match(final String m1Source, final String m1Destination,
+ final String m2Source, final String msDestination, final boolean matches) {
+ final Ipv4Match m1Layer3 = prepareIPv4Match(m1Source, m1Destination);
+ final Ipv4Match m2Layer3 = prepareIPv4Match(m2Source, msDestination);
boolean comparisonResult;
try {
comparisonResult = FlowComparator.layer3MatchEquals(m1Layer3, m2Layer3);
Assert.assertEquals("failed to compare: "+m1Layer3+" vs. "+m2Layer3,
matches, comparisonResult);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.error("failed to compare: {} vs. {}", m1Layer3, m2Layer3, e);
Assert.fail(e.getMessage());
}
}
- private static Ipv4Match prepareIPv4Match(String source, String destination) {
- Ipv4MatchBuilder ipv4MatchBuilder = new Ipv4MatchBuilder();
+ private static Ipv4Match prepareIPv4Match(final String source, final String destination) {
+ final Ipv4MatchBuilder ipv4MatchBuilder = new Ipv4MatchBuilder();
if (source != null) {
ipv4MatchBuilder.setIpv4Source(new Ipv4Prefix(source));
}
return ipv4MatchBuilder.build();
}
/**
- * Test method for {@link org.opendaylight.controller.md.statistics.manager.FlowComparator#ethernetMatchEquals(EthernetMatch, EthernetMatch)
+ * Test method for {@link org.opendaylight.controller.md.statistics.manager.impl.helper.FlowComparator#ethernetMatchEquals(EthernetMatch, EthernetMatch)
*/
@Test
public void testEthernetMatchEquals() {
- String[][][] ethernetMatchSeeds = new String[][][] {
+ final String[][][] ethernetMatchSeeds = new String[][][] {
{{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}},
{{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"aa:bb:bc:cd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}},
{{"aa:bb:cc:dd:ee:ff", "ff:ff:ff:ff:ff:ff","0800"}, {"AA:BB:CC:DD:EE:FF", "ff:ff:ff:ff:ff:ff","0800"}},
{{null, null,null}, {null, null,null}},
};
- boolean[] matches = new boolean[] {
+ final boolean[] matches = new boolean[] {
true,
false,
true,
* @param ethernetMatch1
* @param ethernetMatch2
*/
- private static void checkComparisonOfEthernetMatch(String macAddress1, String macAddressMask1,String etherType1,
- String macAddress2, String macAddressMask2,String etherType2, boolean expectedResult) {
- EthernetMatch ethernetMatch1 = prepareEthernetMatch(macAddress1, macAddressMask1,etherType1);
- EthernetMatch ethernetMatch2 = prepareEthernetMatch(macAddress2, macAddressMask2,etherType2);
+ private static void checkComparisonOfEthernetMatch(final String macAddress1, final String macAddressMask1,final String etherType1,
+ final String macAddress2, final String macAddressMask2,final String etherType2, final boolean expectedResult) {
+ final EthernetMatch ethernetMatch1 = prepareEthernetMatch(macAddress1, macAddressMask1,etherType1);
+ final EthernetMatch ethernetMatch2 = prepareEthernetMatch(macAddress2, macAddressMask2,etherType2);
boolean comparisonResult;
try {
comparisonResult = FlowComparator.ethernetMatchEquals(ethernetMatch1, ethernetMatch2);
Assert.assertEquals("failed to compare: "+ethernetMatch1+" vs. "+ethernetMatch2,
expectedResult, comparisonResult);
- } catch (Exception e) {
+ } catch (final Exception e) {
LOG.error("failed to compare: {} vs. {}", ethernetMatch1, ethernetMatch2, e);
Assert.fail(e.getMessage());
}
}
- private static EthernetMatch prepareEthernetMatch(String macAddress, String macAddressMask, String etherType) {
- EthernetMatchBuilder ethernetMatchBuilder = new EthernetMatchBuilder();
- EthernetSourceBuilder ethernetSourceBuilder = new EthernetSourceBuilder();
+ private static EthernetMatch prepareEthernetMatch(final String macAddress, final String macAddressMask, final String etherType) {
+ final EthernetMatchBuilder ethernetMatchBuilder = new EthernetMatchBuilder();
+ final EthernetSourceBuilder ethernetSourceBuilder = new EthernetSourceBuilder();
if (macAddress != null) {
ethernetSourceBuilder.setAddress(new MacAddress(macAddress));
}
ethernetSourceBuilder.setMask(new MacAddress(macAddressMask));
}
if(etherType != null){
- EthernetTypeBuilder ethernetType = new EthernetTypeBuilder();
+ final EthernetTypeBuilder ethernetType = new EthernetTypeBuilder();
ethernetType.setType(new EtherType(Long.parseLong(etherType,16)));
ethernetMatchBuilder.setEthernetType(ethernetType.build());
}
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityFlowStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+public class FlowStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 5000)
+ public void addedFlowOnDemandStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Flow flow = getFlow();
+
+ final InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()))
+ .child(Flow.class, flow.getKey());
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()));
+ final Table table = new TableBuilder().setKey(new TableKey(flow.getTableId())).setFlow(Collections.<Flow>emptyList()).build();
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, tableII, table);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowII, flow);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowII.augmentation(FlowStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowStatisticsData> flowStatDataOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, flowII.augmentation(FlowStatisticsData.class))
+ .checkedGet();
+ assertTrue(flowStatDataOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE, flowStatDataOptional.get().getFlowStatistics().getByteCount());
+
+ }
+
+// @Test(timeout = 5000)
+ public void deletedFlowStatsRemovalTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Flow flow = getFlow();
+
+ final InstanceIdentifier<Flow> flowII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()))
+ .child(Flow.class, flow.getKey());
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()));
+ final Table table = new TableBuilder().setKey(new TableKey(flow.getTableId())).setFlow(Collections.<Flow>emptyList()).build();
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, tableII, table);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowII, flow);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, tableII, table);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowII, flow);
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowII.augmentation(FlowStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ assertCommit(writeTx.submit());
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ Optional<Flow> flowStatDataOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, flowII).checkedGet();
+ assertTrue(flowStatDataOptional.isPresent());
+// assertEquals(COUNTER_64_TEST_VALUE, flowStatDataOptional.get().getFlowStatistics().getByteCount());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, flowII);
+ assertCommit(writeTx.submit());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ flowStatDataOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, flowII).checkedGet();
+ assertFalse(flowStatDataOptional.isPresent());
+ }
+
+// @Test(timeout = 23000)
+ public void getAllStatsWhenNodeIsConnectedTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityFlowStats.class);
+
+ final Flow flow = getFlow();
+
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(flow.getTableId()));
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ tableII.child(Flow.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Table> tableOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Table.class, new TableKey(flow.getTableId()))).checkedGet();
+ assertTrue(tableOptional.isPresent());
+ final FlowStatisticsData flowStats = tableOptional.get().getFlow().get(0).getAugmentation(FlowStatisticsData.class);
+ assertTrue(flowStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, flowStats.getFlowStatistics().getByteCount());
+ }
+
+ public class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityGroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+public class GroupStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 5000)
+ public void addedGroupOnDemandStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Group group = getGroup();
+
+ final InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, group.getKey());
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, groupII, group);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<NodeGroupStatistics> groupOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, groupII.augmentation(NodeGroupStatistics.class)).checkedGet();
+ assertTrue(groupOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE, groupOptional.get().getGroupStatistics().getByteCount());
+ }
+
+// @Test(timeout = 5000)
+ public void deletedGroupStasRemovalTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Group group = getGroup();
+ final InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, group.getKey());
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, groupII, group);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, groupII, group);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ Optional<NodeGroupStatistics> groupOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class)).checkedGet();
+ assertTrue(groupOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE, groupOptional.get().getGroupStatistics().getByteCount());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, groupII);
+ assertCommit(writeTx.submit());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ groupOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class)).checkedGet();
+ assertFalse(groupOptional.isPresent());
+
+ }
+
+// @Test(timeout = 23000)
+ public void getAllStatsFromConnectedNodeTest() throws ExecutionException, InterruptedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityGroupStats.class);
+
+ final InstanceIdentifier<Group> groupII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Group.class, getGroup().getKey());
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ groupII.augmentation(NodeGroupStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Group> optionalGroup = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Group.class, getGroup().getKey())).get();
+
+ assertTrue(optionalGroup.isPresent());
+ assertTrue(optionalGroup.get().getAugmentation(NodeGroupDescStats.class) != null);
+ final NodeGroupStatistics groupStats = optionalGroup.get().getAugmentation(NodeGroupStatistics.class);
+ assertTrue(groupStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, groupStats.getGroupStatistics().getByteCount());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<GroupFeatures> optionalGroupFeatures = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(NodeGroupFeatures.class).child(GroupFeatures.class)).get();
+ assertTrue(optionalGroupFeatures.isPresent());
+ assertEquals(1, optionalGroupFeatures.get().getMaxGroups().size());
+ assertEquals(MAX_GROUPS_TEST_VALUE, optionalGroupFeatures.get().getMaxGroups().get(0));
+ }
+
+ private class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+public class MeterStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 5000)
+ public void addedMeterOnDemandStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Meter meter = getMeter();
+ final InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meter.getKey());
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, meterII, meter);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<NodeMeterStatistics> meterStatsOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class)).checkedGet();
+ assertTrue(meterStatsOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStatsOptional.get().getMeterStatistics().getByteInCount());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStatsOptional.get().getMeterStatistics().getPacketInCount());
+ }
+
+// @Test(timeout = 5000)
+ public void deletedMeterStatsRemovalTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Meter meter = getMeter();
+ final InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, meter.getKey());
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, meterII, meter);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, meterII, meter);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<NodeMeterStatistics> meterStatsOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class)).checkedGet();
+ assertTrue(meterStatsOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStatsOptional.get().getMeterStatistics().getByteInCount());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStatsOptional.get().getMeterStatistics().getPacketInCount());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, meterII);
+ assertCommit(writeTx.submit());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Meter> meterOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, meterII).checkedGet();
+ assertFalse(meterOptional.isPresent());
+ }
+
+// @Test(timeout = 23000)
+ public void getAllStatsFromConnectedNodeTest() throws ExecutionException, InterruptedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, true);
+
+ final InstanceIdentifier<Meter> meterII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Meter.class, getMeter().getKey());
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ meterII.augmentation(NodeMeterStatistics.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Meter> optionalMeter = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(FlowCapableNode.class)
+ .child(Meter.class, getMeter().getKey())).get();
+
+ assertTrue(optionalMeter.isPresent());
+ assertTrue(optionalMeter.get().getAugmentation(NodeMeterConfigStats.class) != null);
+ final NodeMeterStatistics meterStats = optionalMeter.get().getAugmentation(NodeMeterStatistics.class);
+ assertTrue(meterStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE, meterStats.getMeterStatistics().getByteInCount());
+ assertEquals(COUNTER_64_TEST_VALUE, meterStats.getMeterStatistics().getPacketInCount());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<MeterFeatures> optionalMeterFeautures = readTx.read(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).augmentation(NodeMeterFeatures.class).child(MeterFeatures.class)).get();
+ assertTrue(optionalMeterFeautures.isPresent());
+ assertEquals(COUNTER_32_TEST_VALUE, optionalMeterFeautures.get().getMaxMeter());
+ }
+
+ private class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerProvider;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+public class NodeRegistrationTest extends StatisticsManagerTest {
+
+// @Test
+ public void nodeRegistrationTest() throws ExecutionException, InterruptedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ final StatisticsManagerProvider statisticsManagerProvider = new StatisticsManagerProvider(activator);
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+ Thread.sleep(1000);
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key);
+
+ assertTrue(statisticsManagerProvider.getStatisticsManager().isProvidedFlowNodeActive(nodeII));
+ }
+
+// @Test
+ public void nodeUnregistrationTest() throws ExecutionException, InterruptedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ final StatisticsManagerProvider statisticsManagerProvider = new StatisticsManagerProvider(activator);
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+ Thread.sleep(1000);
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key);
+
+ assertTrue(statisticsManagerProvider.getStatisticsManager().isProvidedFlowNodeActive(nodeII));
+
+ removeNode(s1Key);
+ Thread.sleep(1000);
+ assertFalse(statisticsManagerProvider.getStatisticsManager().isProvidedFlowNodeActive(nodeII));
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityPortStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.FlowCapableNodeConnectorStatisticsData;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+public class PortStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 23000)
+ public void getPortStatisticsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityPortStats.class);
+
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).child(NodeConnector.class, new NodeConnectorKey(getNodeConnectorId()));
+
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ nodeConnectorII.augmentation(FlowCapableNodeConnectorStatisticsData.class),
+ new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowCapableNodeConnectorStatisticsData> flowCapableNodeConnectorStatisticsDataOptional =
+ readTx.read(LogicalDatastoreType.OPERATIONAL,
+ nodeConnectorII.augmentation(FlowCapableNodeConnectorStatisticsData.class)).checkedGet();
+ assertTrue(flowCapableNodeConnectorStatisticsDataOptional.isPresent());
+ assertEquals(BIG_INTEGER_TEST_VALUE,
+ flowCapableNodeConnectorStatisticsDataOptional.get().getFlowCapableNodeConnectorStatistics()
+ .getReceiveDrops());
+ assertEquals(BIG_INTEGER_TEST_VALUE,
+ flowCapableNodeConnectorStatisticsDataOptional.get().getFlowCapableNodeConnectorStatistics()
+ .getCollisionCount());
+ }
+
+ private class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityQueueStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.Port;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.FlowCapableNodeConnectorQueueStatisticsData;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+
+public class QueueStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 5000)
+ public void addedQueueOnDemandStatsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Port port = getPort();
+
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ final FlowCapableNodeConnectorBuilder fcncBuilder = new FlowCapableNodeConnectorBuilder();
+ fcncBuilder.setConfiguration(port.getConfiguration());
+ fcncBuilder.setPortNumber(port.getPortNumber());
+ fcncBuilder.setQueue(Collections.<Queue>emptyList());
+ ncBuilder.setKey(new NodeConnectorKey(new NodeConnectorId("connector.1")));
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcncBuilder.build());
+
+
+ final Queue queue = getQueue();
+ final InstanceIdentifier<Queue> queueII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .child(NodeConnector.class, ncBuilder.getKey()).augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, queue.getKey());
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).child(NodeConnector.class, ncBuilder.getKey());
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, nodeConnectorII, ncBuilder.build());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, queueII, queue);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, nodeConnectorII, ncBuilder.build());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, queueII, queue);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowCapableNodeConnectorQueueStatisticsData> queueStatsOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class)).checkedGet();
+ assertTrue(queueStatsOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE,
+ queueStatsOptional.get().getFlowCapableNodeConnectorQueueStatistics().getTransmittedBytes());
+ }
+
+// @Test(timeout = 5000)
+ public void deletedQueueStatsRemovalTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNode(s1Key);
+
+ final Port port = getPort();
+
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ final FlowCapableNodeConnectorBuilder fcncBuilder = new FlowCapableNodeConnectorBuilder();
+ fcncBuilder.setConfiguration(port.getConfiguration());
+ fcncBuilder.setPortNumber(port.getPortNumber());
+ fcncBuilder.setQueue(Collections.<Queue>emptyList());
+ ncBuilder.setKey(new NodeConnectorKey(new NodeConnectorId("connector.1")));
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcncBuilder.build());
+
+
+ final Queue queue = getQueue();
+ final InstanceIdentifier<Queue> queueII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .child(NodeConnector.class, ncBuilder.getKey()).augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, queue.getKey());
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key).child(NodeConnector.class, ncBuilder.getKey());
+
+ WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, nodeConnectorII, ncBuilder.build());
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, queueII, queue);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, nodeConnectorII, ncBuilder.build());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, queueII, queue);
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class),
+ new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ Optional<FlowCapableNodeConnectorQueueStatisticsData> queueStatsOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class)).checkedGet();
+ assertTrue(queueStatsOptional.isPresent());
+ assertEquals(COUNTER_64_TEST_VALUE,
+ queueStatsOptional.get().getFlowCapableNodeConnectorQueueStatistics().getTransmittedBytes());
+
+ writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.CONFIGURATION, queueII);
+ assertCommit(writeTx.submit());
+
+ readTx = getDataBroker().newReadOnlyTransaction();
+ queueStatsOptional = readTx.read(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class)).checkedGet();
+ assertFalse(queueStatsOptional.isPresent());
+ }
+
+// @Test(timeout = 23000)
+ public void getAllStatsFromConnectedNodeTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityQueueStats.class);
+
+ final NodeConnectorBuilder ncBuilder = new NodeConnectorBuilder();
+ final FlowCapableNodeConnectorBuilder fcncBuilder = new FlowCapableNodeConnectorBuilder();
+ ncBuilder.setKey(new NodeConnectorKey(getNodeConnectorId()));
+ ncBuilder.addAugmentation(FlowCapableNodeConnector.class, fcncBuilder.build());
+
+ final InstanceIdentifier<NodeConnector> nodeConnectorII = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, s1Key)
+ .child(NodeConnector.class, ncBuilder.getKey());
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, nodeConnectorII, ncBuilder.build());
+ final InstanceIdentifier<Queue> queueII = nodeConnectorII.augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, getQueue().getKey());
+ final QueueBuilder qBuilder = new QueueBuilder(getQueue());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, queueII, qBuilder.build());
+ assertCommit(writeTx.submit());
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ queueII.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<Queue> queueOptional = readTx.read(LogicalDatastoreType.OPERATIONAL, queueII).checkedGet();
+ assertTrue(queueOptional.isPresent());
+ final FlowCapableNodeConnectorQueueStatisticsData queueStats =
+ queueOptional.get().getAugmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ assertTrue(queueStats != null);
+ assertEquals(COUNTER_64_TEST_VALUE,
+ queueStats.getFlowCapableNodeConnectorQueueStatistics().getTransmittedBytes());
+ }
+
+ private class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+package test.mock;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.concurrent.ExecutionException;
+
+import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatisticsManagerActivator;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowFeatureCapabilityTableStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsData;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import test.mock.util.StatisticsManagerTest;
+
+import com.google.common.base.Optional;
+
+public class TableStatisticsTest extends StatisticsManagerTest {
+ private final Object waitObject = new Object();
+
+// @Test(timeout = 23000)
+ public void getTableStatisticsTest() throws ExecutionException, InterruptedException, ReadFailedException {
+ final StatisticsManagerActivator activator = new StatisticsManagerActivator();
+ activator.onSessionInitiated(providerContext);
+
+ addFlowCapableNodeWithFeatures(s1Key, false, FlowFeatureCapabilityTableStats.class);
+
+ final TableId tableId = getTableId();
+ final InstanceIdentifier<Table> tableII = InstanceIdentifier.create(Nodes.class).child(Node.class, s1Key)
+ .augmentation(FlowCapableNode.class).child(Table.class, new TableKey(tableId.getValue()));
+
+ getDataBroker().registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ tableII.augmentation(FlowTableStatisticsData.class), new ChangeListener(), AsyncDataBroker.DataChangeScope.BASE);
+
+ synchronized (waitObject) {
+ waitObject.wait();
+ }
+
+ final ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
+ final Optional<FlowTableStatisticsData> flowTableStatisticsDataOptional = readTx.read(
+ LogicalDatastoreType.OPERATIONAL, tableII.augmentation(FlowTableStatisticsData.class)).checkedGet();
+ assertTrue(flowTableStatisticsDataOptional.isPresent());
+ assertEquals(COUNTER_32_TEST_VALUE,
+ flowTableStatisticsDataOptional.get().getFlowTableStatistics().getActiveFlows());
+ assertEquals(COUNTER_64_TEST_VALUE,
+ flowTableStatisticsDataOptional.get().getFlowTableStatistics().getPacketsLookedUp());
+ }
+
+ private class ChangeListener implements DataChangeListener {
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> change) {
+ synchronized (waitObject) {
+ waitObject.notify();
+ }
+ }
+ }
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+public class AbstractDataBrokerTest extends AbstractSchemaAwareTest {
+
+ private DataBrokerTestCustomizer testCustomizer;
+ private DataBroker dataBroker;
+ private DOMDataBroker domBroker;
+
+
+ @Override
+ protected void setupWithSchema(final SchemaContext context) {
+ testCustomizer = createDataBrokerTestCustomizer();
+ dataBroker = testCustomizer.createDataBroker();
+ domBroker = testCustomizer.createDOMDataBroker();
+ testCustomizer.updateSchema(context);
+ setupWithDataBroker(dataBroker);
+ }
+
+ protected void setupWithDataBroker(final DataBroker dataBroker) {
+ // Intentionally left No-op, subclasses may customize it
+ }
+
+ protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
+ return new DataBrokerTestCustomizer();
+ }
+
+ public DataBroker getDataBroker() {
+ return dataBroker;
+ }
+
+ public DOMDataBroker getDomBroker() {
+ return domBroker;
+ }
+
+ protected static final void assertCommit(final ListenableFuture<Void> commit) {
+ try {
+ commit.get(500, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.junit.Before;
+import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public abstract class AbstractSchemaAwareTest {
+
+ private Iterable<YangModuleInfo> moduleInfos;
+ private SchemaContext schemaContext;
+
+
+ protected Iterable<YangModuleInfo> getModuleInfos() {
+ return BindingReflections.loadModuleInfos();
+ }
+
+
+ @Before
+ public final void setup() {
+ moduleInfos = getModuleInfos();
+ ModuleInfoBackedContext moduleContext = ModuleInfoBackedContext.create();
+ moduleContext.addModuleInfos(moduleInfos);
+ schemaContext = moduleContext.tryToCreateSchemaContext().get();
+ setupWithSchema(schemaContext);
+ }
+
+ /**
+ * Setups test with Schema context.
+ * This method is called before {@link #setupWithSchemaService(SchemaService)}
+ *
+ * @param context
+ */
+ protected abstract void setupWithSchema(SchemaContext context);
+
+}
--- /dev/null
+package test.mock.util;
+
+import org.osgi.framework.Bundle;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.BundleException;
+import org.osgi.framework.BundleListener;
+import org.osgi.framework.Filter;
+import org.osgi.framework.FrameworkListener;
+import org.osgi.framework.InvalidSyntaxException;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.Collection;
+import java.util.Dictionary;
+
+public class BundleContextMock implements BundleContext {
+ @Override
+ public String getProperty(String s) {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle() {
+ return null;
+ }
+
+ @Override
+ public Bundle installBundle(String s, InputStream inputStream) throws BundleException {
+ return null;
+ }
+
+ @Override
+ public Bundle installBundle(String s) throws BundleException {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle(long l) {
+ return null;
+ }
+
+ @Override
+ public Bundle[] getBundles() {
+ return new Bundle[0];
+ }
+
+ @Override
+ public void addServiceListener(ServiceListener serviceListener, String s) throws InvalidSyntaxException {
+
+ }
+
+ @Override
+ public void addServiceListener(ServiceListener serviceListener) {
+
+ }
+
+ @Override
+ public void removeServiceListener(ServiceListener serviceListener) {
+
+ }
+
+ @Override
+ public void addBundleListener(BundleListener bundleListener) {
+
+ }
+
+ @Override
+ public void removeBundleListener(BundleListener bundleListener) {
+
+ }
+
+ @Override
+ public void addFrameworkListener(FrameworkListener frameworkListener) {
+
+ }
+
+ @Override
+ public void removeFrameworkListener(FrameworkListener frameworkListener) {
+
+ }
+
+ @Override
+ public ServiceRegistration<?> registerService(String[] strings, Object o, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public ServiceRegistration<?> registerService(String s, Object o, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public <S> ServiceRegistration<S> registerService(Class<S> sClass, S s, Dictionary<String, ?> stringDictionary) {
+ return null;
+ }
+
+ @Override
+ public ServiceReference<?>[] getServiceReferences(String s, String s2) throws InvalidSyntaxException {
+ return new ServiceReference<?>[0];
+ }
+
+ @Override
+ public ServiceReference<?>[] getAllServiceReferences(String s, String s2) throws InvalidSyntaxException {
+ return new ServiceReference<?>[0];
+ }
+
+ @Override
+ public ServiceReference<?> getServiceReference(String s) {
+ return null;
+ }
+
+ @Override
+ public <S> ServiceReference<S> getServiceReference(Class<S> sClass) {
+ return null;
+ }
+
+ @Override
+ public <S> Collection<ServiceReference<S>> getServiceReferences(Class<S> sClass, String s) throws InvalidSyntaxException {
+ return null;
+ }
+
+ @Override
+ public <S> S getService(ServiceReference<S> sServiceReference) {
+ return null;
+ }
+
+ @Override
+ public boolean ungetService(ServiceReference<?> serviceReference) {
+ return false;
+ }
+
+ @Override
+ public File getDataFile(String s) {
+ return null;
+ }
+
+ @Override
+ public Filter createFilter(String s) throws InvalidSyntaxException {
+ return null;
+ }
+
+ @Override
+ public Bundle getBundle(String s) {
+ return null;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import javassist.ClassPool;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBindingDataBroker;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.controller.sal.core.spi.data.DOMStore;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.DataObjectSerializerGenerator;
+import org.opendaylight.yangtools.binding.data.codec.gen.impl.StreamWriterGenerator;
+import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
+import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
+import org.opendaylight.yangtools.sal.binding.generator.impl.RuntimeGeneratedMappingServiceImpl;
+import org.opendaylight.yangtools.sal.binding.generator.util.JavassistUtils;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+public class DataBrokerTestCustomizer {
+
+ private DOMDataBroker domDataBroker;
+ private final RuntimeGeneratedMappingServiceImpl mappingService;
+ private final MockSchemaService schemaService;
+ private ImmutableMap<LogicalDatastoreType, DOMStore> datastores;
+ private final BindingToNormalizedNodeCodec bindingToNormalized ;
+
+ public ImmutableMap<LogicalDatastoreType, DOMStore> createDatastores() {
+ return ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
+ .put(LogicalDatastoreType.OPERATIONAL, createOperationalDatastore())
+ .put(LogicalDatastoreType.CONFIGURATION,createConfigurationDatastore())
+ .build();
+ }
+
+ public DataBrokerTestCustomizer() {
+ schemaService = new MockSchemaService();
+ ClassPool pool = ClassPool.getDefault();
+ mappingService = new RuntimeGeneratedMappingServiceImpl(pool);
+ DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(pool));
+ BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
+ GeneratedClassLoadingStrategy loading = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
+ bindingToNormalized = new BindingToNormalizedNodeCodec(loading, mappingService, codecRegistry);
+ schemaService.registerSchemaContextListener(bindingToNormalized);
+ }
+
+ public DOMStore createConfigurationDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMStore createOperationalDatastore() {
+ InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER",
+ MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+ schemaService.registerSchemaContextListener(store);
+ return store;
+ }
+
+ public DOMDataBroker createDOMDataBroker() {
+ return new DOMDataBrokerImpl(getDatastores(), getCommitCoordinatorExecutor());
+ }
+
+ public ListeningExecutorService getCommitCoordinatorExecutor() {
+ return MoreExecutors.sameThreadExecutor();
+ }
+
+ public DataBroker createDataBroker() {
+ return new ForwardedBindingDataBroker(getDOMDataBroker(), bindingToNormalized, schemaService );
+ }
+
+ public ForwardedBackwardsCompatibleDataBroker createBackwardsCompatibleDataBroker() {
+ return new ForwardedBackwardsCompatibleDataBroker(getDOMDataBroker(), bindingToNormalized, getSchemaService(), MoreExecutors.sameThreadExecutor());
+ }
+
+ private SchemaService getSchemaService() {
+ return schemaService;
+ }
+
+ private DOMDataBroker getDOMDataBroker() {
+ if(domDataBroker == null) {
+ domDataBroker = createDOMDataBroker();
+ }
+ return domDataBroker;
+ }
+
+ private synchronized ImmutableMap<LogicalDatastoreType, DOMStore> getDatastores() {
+ if (datastores == null) {
+ datastores = createDatastores();
+ }
+ return datastores;
+ }
+
+ public void updateSchema(final SchemaContext ctx) {
+ schemaService.changeSchema(ctx);
+ mappingService.onGlobalContextUpdated(ctx);
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.FlowKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.FlowCookie;
+
+import java.math.BigInteger;
+import java.util.Random;
+
+public class FlowMockGenerator {
+ private static final Random rnd = new Random();
+ private static final FlowBuilder flowBuilder = new FlowBuilder();
+
+ public static Flow getRandomFlow() {
+ flowBuilder.setKey(new FlowKey(new FlowId("flow." + rnd.nextInt(1000))));
+ flowBuilder.setOutGroup(TestUtils.nextLong(0, 4294967296L));
+ flowBuilder.setTableId((short) rnd.nextInt(256));
+ flowBuilder.setOutPort(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ flowBuilder.setStrict(rnd.nextBoolean());
+ flowBuilder.setContainerName("container." + rnd.nextInt(1000));
+ flowBuilder.setBarrier(rnd.nextBoolean());
+ flowBuilder.setMatch(MatchMockGenerator.getRandomMatch());
+ flowBuilder.setPriority(rnd.nextInt(65535));
+ flowBuilder.setCookie(new FlowCookie(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE))));
+ flowBuilder.setCookieMask(flowBuilder.getCookie());
+ return flowBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.GroupTypes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.GroupKey;
+
+import java.util.Random;
+
+public class GroupMockGenerator {
+ private static final Random rnd = new Random();
+ private static final GroupBuilder groupBuilder = new GroupBuilder();
+
+ public static Group getRandomGroup() {
+ groupBuilder.setKey(new GroupKey(new GroupId(TestUtils.nextLong(0, 4294967295L))));
+ groupBuilder.setContainerName("container." + rnd.nextInt(1000));
+ groupBuilder.setBarrier(rnd.nextBoolean());
+ groupBuilder.setGroupName("group." + rnd.nextInt(1000));
+ groupBuilder.setGroupType(GroupTypes.forValue(rnd.nextInt(4)));
+ return groupBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Dscp;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.Match;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.flow.MatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.IpMatchBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026.match.MetadataBuilder;
+
+import java.math.BigInteger;
+import java.util.Random;
+
+public class MatchMockGenerator {
+ private static final Random rnd = new Random();
+ private static final MatchBuilder matchBuilder = new MatchBuilder();
+ private static final IpMatchBuilder ipMatchBuilder = new IpMatchBuilder();
+ private static final MetadataBuilder metadataBuilder = new MetadataBuilder();
+
+ public static Match getRandomMatch() {
+ matchBuilder.setInPort(new NodeConnectorId("port." + rnd.nextInt(500)));
+ ipMatchBuilder.setIpDscp(new Dscp((short) rnd.nextInt(64))).build();
+ ipMatchBuilder.setIpEcn((short) rnd.nextInt(256));
+ ipMatchBuilder.setIpProtocol((short) rnd.nextInt(256));
+ matchBuilder.setIpMatch(ipMatchBuilder.build());
+ metadataBuilder.setMetadata(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ metadataBuilder.setMetadataMask(BigInteger.valueOf(TestUtils.nextLong(0, Long.MAX_VALUE)));
+ matchBuilder.setMetadata(metadataBuilder.build());
+ return matchBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.MeterKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.BandId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.MeterId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.MeterBandHeadersBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeader;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeaderBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.meter.band.headers.MeterBandHeaderKey;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+public class MeterMockGenerator {
+ private static final Random rnd = new Random();
+ private static final MeterBuilder meterBuilder = new MeterBuilder();
+ private static final MeterBandHeaderBuilder meterBandHeaderBuilder = new MeterBandHeaderBuilder();
+ private static final MeterBandHeadersBuilder meterBandHeadersBuilder = new MeterBandHeadersBuilder();
+
+ public static Meter getRandomMeter() {
+ meterBandHeaderBuilder.setKey(new MeterBandHeaderKey(new BandId(TestUtils.nextLong(0, 4294967295L))));
+ meterBandHeaderBuilder.setBandBurstSize(TestUtils.nextLong(0, 4294967295L));
+ meterBandHeaderBuilder.setBandRate(TestUtils.nextLong(0, 4294967295L));
+ List<MeterBandHeader> meterBandHeaders = new ArrayList<>();
+ meterBuilder.setKey(new MeterKey(new MeterId(TestUtils.nextLong(0, 4294967295L))));
+ meterBuilder.setBarrier(rnd.nextBoolean());
+ meterBuilder.setContainerName("container." + rnd.nextInt(1000));
+ meterBuilder.setMeterName("meter." + rnd.nextInt(1000));
+ meterBuilder.setMeterBandHeaders(meterBandHeadersBuilder.setMeterBandHeader(meterBandHeaders).build());
+ return meterBuilder.build();
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package test.mock.util;
+
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.controller.sal.dom.broker.impl.SchemaContextProvider;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.ListenerRegistry;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+
+@SuppressWarnings("deprecation")
+public final class MockSchemaService implements SchemaService, SchemaContextProvider {
+
+ private SchemaContext schemaContext;
+
+ ListenerRegistry<SchemaContextListener> listeners = ListenerRegistry.create();
+
+ @Override
+ public void addModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getGlobalContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public synchronized SchemaContext getSessionContext() {
+ return schemaContext;
+ }
+
+ @Override
+ public ListenerRegistration<SchemaContextListener> registerSchemaContextListener(
+ final SchemaContextListener listener) {
+ return listeners.register(listener);
+ }
+
+ @Override
+ public void removeModule(final Module module) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized SchemaContext getSchemaContext() {
+ return schemaContext;
+ }
+
+ public synchronized void changeSchema(final SchemaContext newContext) {
+ schemaContext = newContext;
+ for (ListenerRegistration<SchemaContextListener> listener : listeners) {
+ listener.getInstance().onGlobalContextUpdated(schemaContext);
+ }
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.sal.binding.codegen.impl.SingletonHolder;
+import org.opendaylight.controller.sal.binding.impl.NotificationBrokerImpl;
+import org.opendaylight.yangtools.yang.binding.Notification;
+
+import java.util.Timer;
+import java.util.TimerTask;
+
+public class NotificationProviderServiceHelper {
+ private NotificationBrokerImpl notifBroker = new NotificationBrokerImpl(SingletonHolder.getDefaultNotificationExecutor());
+
+ public NotificationBrokerImpl getNotifBroker() {
+ return notifBroker;
+ }
+
+ public void pushDelayedNotification(final Notification notification, int delay) {
+ new Timer().schedule(new TimerTask() {
+ @Override
+ public void run() {
+ notifBroker.publish(notification);
+ }
+ }, delay);
+ }
+
+ public void pushNotification(final Notification notification) {
+ notifBroker.publish(notification);
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowStatisticsFromFlowTableOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.flow.and.statistics.map.list.FlowAndStatisticsMapListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightFlowStatisticsServiceMock implements OpendaylightFlowStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightFlowStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutput>> getAggregateFlowStatisticsFromFlowTableForAllFlows(GetAggregateFlowStatisticsFromFlowTableForAllFlowsInput input) {
+ GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder builder = new GetAggregateFlowStatisticsFromFlowTableForAllFlowsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutput>> getAggregateFlowStatisticsFromFlowTableForGivenMatch(GetAggregateFlowStatisticsFromFlowTableForGivenMatchInput input) {
+ GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder builder = new GetAggregateFlowStatisticsFromFlowTableForGivenMatchOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ AggregateFlowStatisticsUpdateBuilder afsuBuilder = new AggregateFlowStatisticsUpdateBuilder();
+ afsuBuilder.setMoreReplies(false);
+ afsuBuilder.setTransactionId(transId);
+ afsuBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ afsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(afsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllFlowStatisticsFromFlowTableOutput>> getAllFlowStatisticsFromFlowTable(GetAllFlowStatisticsFromFlowTableInput input) {
+ GetAllFlowStatisticsFromFlowTableOutputBuilder builder = new GetAllFlowStatisticsFromFlowTableOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllFlowsStatisticsFromAllFlowTablesOutput>> getAllFlowsStatisticsFromAllFlowTables(GetAllFlowsStatisticsFromAllFlowTablesInput input) {
+ GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder builder = new GetAllFlowsStatisticsFromAllFlowTablesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapLists = new ArrayList<>();
+ FlowsStatisticsUpdateBuilder flowsStatisticsUpdateBuilder = new FlowsStatisticsUpdateBuilder();
+ flowsStatisticsUpdateBuilder.setTransactionId(transId);
+ flowsStatisticsUpdateBuilder.setMoreReplies(false);
+ flowsStatisticsUpdateBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ FlowAndStatisticsMapListBuilder flowAndStatisticsMapListBuilder = new FlowAndStatisticsMapListBuilder(StatisticsManagerTest.getFlow());
+ flowAndStatisticsMapListBuilder.setTableId(StatisticsManagerTest.getFlow().getTableId());
+ flowAndStatisticsMapListBuilder.setContainerName(StatisticsManagerTest.getFlow().getContainerName());
+ flowAndStatisticsMapListBuilder.setBarrier(StatisticsManagerTest.getFlow().isBarrier());
+ flowAndStatisticsMapListBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ flowAndStatisticsMapLists.add(flowAndStatisticsMapListBuilder.build());
+ flowsStatisticsUpdateBuilder.setFlowAndStatisticsMapList(flowAndStatisticsMapLists);
+ notifService.pushDelayedNotification(flowsStatisticsUpdateBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetFlowStatisticsFromFlowTableOutput>> getFlowStatisticsFromFlowTable(GetFlowStatisticsFromFlowTableInput input) {
+ GetFlowStatisticsFromFlowTableOutputBuilder builder = new GetFlowStatisticsFromFlowTableOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ List<FlowAndStatisticsMapList> flowAndStatisticsMapLists = new ArrayList<>();
+ FlowsStatisticsUpdateBuilder flowsStatisticsUpdateBuilder = new FlowsStatisticsUpdateBuilder();
+ flowsStatisticsUpdateBuilder.setTransactionId(transId);
+ flowsStatisticsUpdateBuilder.setMoreReplies(false);
+ flowsStatisticsUpdateBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ FlowAndStatisticsMapListBuilder flowAndStatisticsMapListBuilder = new FlowAndStatisticsMapListBuilder(input);
+ flowAndStatisticsMapListBuilder.setTableId(input.getTableId());
+ flowAndStatisticsMapListBuilder.setContainerName(input.getContainerName());
+ flowAndStatisticsMapListBuilder.setBarrier(input.isBarrier());
+ flowAndStatisticsMapListBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ flowAndStatisticsMapLists.add(flowAndStatisticsMapListBuilder.build());
+ flowsStatisticsUpdateBuilder.setFlowAndStatisticsMapList(flowAndStatisticsMapLists);
+ notifService.pushDelayedNotification(flowsStatisticsUpdateBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.FlowTableStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.GetFlowTablesStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.flow.table.and.statistics.map.FlowTableAndStatisticsMapKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightFlowTableStatisticsServiceMock implements OpendaylightFlowTableStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightFlowTableStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetFlowTablesStatisticsOutput>> getFlowTablesStatistics(GetFlowTablesStatisticsInput input) {
+ GetFlowTablesStatisticsOutputBuilder builder = new GetFlowTablesStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ FlowTableStatisticsUpdateBuilder ftsBuilder = new FlowTableStatisticsUpdateBuilder();
+ FlowTableAndStatisticsMapBuilder ftasmBuilder = new FlowTableAndStatisticsMapBuilder();
+ List<FlowTableAndStatisticsMap> tableAndStatisticsMaps = new ArrayList<>();
+ ftasmBuilder.setKey(new FlowTableAndStatisticsMapKey(StatisticsManagerTest.getTableId()));
+ ftasmBuilder.setActiveFlows(StatisticsManagerTest.COUNTER_32_TEST_VALUE);
+ tableAndStatisticsMaps.add(ftasmBuilder.build());
+ ftsBuilder.setTransactionId(transId);
+ ftsBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ ftsBuilder.setFlowTableAndStatisticsMap(tableAndStatisticsMaps);
+ ftsBuilder.setMoreReplies(true);
+ notifService.pushDelayedNotification(ftsBuilder.build(), 0); // 1st notification
+ ftsBuilder.setMoreReplies(false);
+ ftasmBuilder.setPacketsLookedUp(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ tableAndStatisticsMaps.clear();
+ tableAndStatisticsMaps.add(ftasmBuilder.build());
+ ftsBuilder.setFlowTableAndStatisticsMap(tableAndStatisticsMaps);
+ notifService.pushDelayedNotification(ftsBuilder.build(), 0); // 2nd notification
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetAllGroupStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupDescriptionOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupFeaturesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GetGroupStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupDescStatsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupFeaturesUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.GroupStatisticsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.desc.stats.reply.GroupDescStatsKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.group.statistics.reply.GroupStatsKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightGroupStatisticsServiceMock implements OpendaylightGroupStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightGroupStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllGroupStatisticsOutput>> getAllGroupStatistics(GetAllGroupStatisticsInput input) {
+ GetAllGroupStatisticsOutputBuilder builder = new GetAllGroupStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ List<GroupStats> groupStats = new ArrayList<>();
+ GroupStatsBuilder gsBuilder = new GroupStatsBuilder();
+ GroupStatisticsUpdatedBuilder gsuBuilder = new GroupStatisticsUpdatedBuilder();
+ gsBuilder.setKey(new GroupStatsKey(StatisticsManagerTest.getGroup().getGroupId()));
+ gsBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ groupStats.add(gsBuilder.build());
+ builder.setGroupStats(groupStats);
+ gsuBuilder.setTransactionId(transId);
+ gsuBuilder.setMoreReplies(false);
+ gsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gsuBuilder.setGroupStats(groupStats);
+ notifService.pushDelayedNotification(gsuBuilder.build(), 500);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupDescriptionOutput>> getGroupDescription(GetGroupDescriptionInput input) {
+ GetGroupDescriptionOutputBuilder builder = new GetGroupDescriptionOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ List<GroupDescStats> groupDescStats = new ArrayList<>();
+ GroupDescStatsUpdatedBuilder gdsuBuilder = new GroupDescStatsUpdatedBuilder();
+ GroupDescStatsBuilder gdsBuilder = new GroupDescStatsBuilder();
+ gdsBuilder.setKey(new GroupDescStatsKey(StatisticsManagerTest.getGroup().getGroupId()));
+ gdsBuilder.setBuckets(StatisticsManagerTest.getGroup().getBuckets());
+ gdsBuilder.setContainerName(StatisticsManagerTest.getGroup().getContainerName());
+ gdsBuilder.setGroupName(StatisticsManagerTest.getGroup().getGroupName());
+ gdsBuilder.setGroupType(StatisticsManagerTest.getGroup().getGroupType());
+ groupDescStats.add(gdsBuilder.build());
+ builder.setGroupDescStats(groupDescStats);
+ gdsuBuilder.setTransactionId(transId);
+ gdsuBuilder.setMoreReplies(false);
+ gdsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gdsuBuilder.setGroupDescStats(groupDescStats);
+ notifService.pushDelayedNotification(gdsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupFeaturesOutput>> getGroupFeatures(GetGroupFeaturesInput input) {
+ GetGroupFeaturesOutputBuilder builder = new GetGroupFeaturesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ GroupFeaturesUpdatedBuilder gfuBuilder = new GroupFeaturesUpdatedBuilder();
+ gfuBuilder.setTransactionId(transId);
+ gfuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gfuBuilder.setMoreReplies(false);
+ List<Long> maxGroups = new ArrayList<>();
+ maxGroups.add(StatisticsManagerTest.MAX_GROUPS_TEST_VALUE);
+ gfuBuilder.setMaxGroups(maxGroups);
+ notifService.pushDelayedNotification(gfuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetGroupStatisticsOutput>> getGroupStatistics(GetGroupStatisticsInput input) {
+ GetGroupStatisticsOutputBuilder builder = new GetGroupStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ GroupStatsBuilder gsBuilder = new GroupStatsBuilder();
+ List<GroupStats> groupStats = new ArrayList<>();
+ gsBuilder.setKey(new GroupStatsKey(input.getGroupId()));
+ gsBuilder.setByteCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ groupStats.add(gsBuilder.build());
+ GroupStatisticsUpdatedBuilder gsuBuilder = new GroupStatisticsUpdatedBuilder();
+ gsuBuilder.setTransactionId(transId);
+ gsuBuilder.setMoreReplies(false);
+ gsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ gsuBuilder.setGroupStats(groupStats);
+ notifService.pushDelayedNotification(gsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterConfigStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetAllMeterStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterFeaturesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.GetMeterStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterConfigStatsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterFeaturesUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.MeterStatisticsUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStatsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStatsKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightMeterStatisticsServiceMock implements OpendaylightMeterStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightMeterStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllMeterConfigStatisticsOutput>> getAllMeterConfigStatistics(GetAllMeterConfigStatisticsInput input) {
+ GetAllMeterConfigStatisticsOutputBuilder builder = new GetAllMeterConfigStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ List<MeterConfigStats> meterConfigStats = new ArrayList<>();
+ MeterConfigStatsBuilder mcsBuilder = new MeterConfigStatsBuilder();
+ mcsBuilder.setMeterId(StatisticsManagerTest.getMeter().getMeterId());
+ mcsBuilder.setMeterName(StatisticsManagerTest.getMeter().getMeterName());
+ mcsBuilder.setContainerName(StatisticsManagerTest.getMeter().getContainerName());
+ meterConfigStats.add(mcsBuilder.build());
+ builder.setMeterConfigStats(meterConfigStats);
+ MeterConfigStatsUpdatedBuilder mscuBuilder = new MeterConfigStatsUpdatedBuilder();
+ mscuBuilder.setTransactionId(transId);
+ mscuBuilder.setMoreReplies(false);
+ mscuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ mscuBuilder.setMeterConfigStats(meterConfigStats);
+ notifService.pushDelayedNotification(mscuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllMeterStatisticsOutput>> getAllMeterStatistics(GetAllMeterStatisticsInput input) {
+ GetAllMeterStatisticsOutputBuilder builder = new GetAllMeterStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ MeterStatsBuilder msBuilder = new MeterStatsBuilder();
+ msBuilder.setByteInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setPacketInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setKey(new MeterStatsKey(StatisticsManagerTest.getMeter().getMeterId()));
+ List<MeterStats> meterStats = new ArrayList<>();
+ meterStats.add(msBuilder.build());
+ MeterStatisticsUpdatedBuilder msuBuilder = new MeterStatisticsUpdatedBuilder();
+ msuBuilder.setTransactionId(transId);
+ msuBuilder.setMoreReplies(false);
+ msuBuilder.setMeterStats(meterStats);
+ msuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(msuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetMeterFeaturesOutput>> getMeterFeatures(GetMeterFeaturesInput input) {
+ GetMeterFeaturesOutputBuilder builder = new GetMeterFeaturesOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ MeterFeaturesUpdatedBuilder mfuBuilder = new MeterFeaturesUpdatedBuilder();
+ mfuBuilder.setTransactionId(transId);
+ mfuBuilder.setMoreReplies(false);
+ mfuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ mfuBuilder.setMaxMeter(StatisticsManagerTest.COUNTER_32_TEST_VALUE);
+ notifService.pushDelayedNotification(mfuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetMeterStatisticsOutput>> getMeterStatistics(GetMeterStatisticsInput input) {
+ GetMeterStatisticsOutputBuilder builder = new GetMeterStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ MeterStatsBuilder msBuilder = new MeterStatsBuilder();
+ msBuilder.setKey(new MeterStatsKey(input.getMeterId()));
+ msBuilder.setByteInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ msBuilder.setPacketInCount(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ List<MeterStats> meterStats = new ArrayList<>();
+ meterStats.add(msBuilder.build());
+ MeterStatisticsUpdatedBuilder msuBuilder = new MeterStatisticsUpdatedBuilder();
+ msuBuilder.setTransactionId(transId);
+ msuBuilder.setMoreReplies(false);
+ msuBuilder.setMeterStats(meterStats);
+ msuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ notifService.pushDelayedNotification(msuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetAllNodeConnectorsStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.GetNodeConnectorStatisticsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.NodeConnectorStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.node.connector.statistics.and.port.number.map.NodeConnectorStatisticsAndPortNumberMapKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightPortStatisticsServiceMock implements OpendaylightPortStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightPortStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllNodeConnectorsStatisticsOutput>> getAllNodeConnectorsStatistics(GetAllNodeConnectorsStatisticsInput input) {
+ GetAllNodeConnectorsStatisticsOutputBuilder builder = new GetAllNodeConnectorsStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ NodeConnectorStatisticsUpdateBuilder ncsuBuilder = new NodeConnectorStatisticsUpdateBuilder();
+ NodeConnectorStatisticsAndPortNumberMapBuilder ncsapnmBuilder = new NodeConnectorStatisticsAndPortNumberMapBuilder();
+ List<NodeConnectorStatisticsAndPortNumberMap> nodeConnectorStatisticsAndPortNumberMaps = new ArrayList<>();
+ ncsapnmBuilder.setKey(new NodeConnectorStatisticsAndPortNumberMapKey(StatisticsManagerTest.getNodeConnectorId()));
+ ncsapnmBuilder.setReceiveDrops(StatisticsManagerTest.BIG_INTEGER_TEST_VALUE);
+ nodeConnectorStatisticsAndPortNumberMaps.add(ncsapnmBuilder.build());
+ ncsuBuilder.setTransactionId(new TransactionId(BigInteger.valueOf(1)));
+ ncsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ ncsuBuilder.setNodeConnectorStatisticsAndPortNumberMap(nodeConnectorStatisticsAndPortNumberMaps);
+ ncsuBuilder.setMoreReplies(true);
+ notifService.pushDelayedNotification(ncsuBuilder.build(), 0); // 1st notification
+ ncsuBuilder.setMoreReplies(false);
+ ncsapnmBuilder.setCollisionCount(StatisticsManagerTest.BIG_INTEGER_TEST_VALUE);
+ nodeConnectorStatisticsAndPortNumberMaps.clear();
+ nodeConnectorStatisticsAndPortNumberMaps.add(ncsapnmBuilder.build());
+ ncsuBuilder.setNodeConnectorStatisticsAndPortNumberMap(nodeConnectorStatisticsAndPortNumberMaps);
+ notifService.pushDelayedNotification(ncsuBuilder.build(), 10); // 2nd notification
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetNodeConnectorStatisticsOutput>> getNodeConnectorStatistics(GetNodeConnectorStatisticsInput input) {
+ GetNodeConnectorStatisticsOutputBuilder builder = new GetNodeConnectorStatisticsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromAllPortsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetAllQueuesStatisticsFromGivenPortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.GetQueueStatisticsFromGivenPortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.QueueStatisticsUpdateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMap;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMapBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.queue.id.and.statistics.map.QueueIdAndStatisticsMapKey;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class OpendaylightQueueStatisticsServiceMock implements OpendaylightQueueStatisticsService {
+ NotificationProviderServiceHelper notifService;
+ AtomicLong transNum = new AtomicLong();
+
+ public OpendaylightQueueStatisticsServiceMock(NotificationProviderServiceHelper notifService) {
+ this.notifService = notifService;
+ }
+
+ @Override
+ public Future<RpcResult<GetAllQueuesStatisticsFromAllPortsOutput>> getAllQueuesStatisticsFromAllPorts(GetAllQueuesStatisticsFromAllPortsInput input) {
+ GetAllQueuesStatisticsFromAllPortsOutputBuilder builder = new GetAllQueuesStatisticsFromAllPortsOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ QueueStatisticsUpdateBuilder qsuBuilder = new QueueStatisticsUpdateBuilder();
+ QueueIdAndStatisticsMapBuilder qiasmBuilder = new QueueIdAndStatisticsMapBuilder();
+ List<QueueIdAndStatisticsMap> queueIdAndStatisticsMaps = new ArrayList<>();
+ qsuBuilder.setMoreReplies(false);
+ qsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ qsuBuilder.setTransactionId(transId);
+ qiasmBuilder.setTransmittedBytes(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ qiasmBuilder.setKey(new QueueIdAndStatisticsMapKey(StatisticsManagerTest.getNodeConnectorId(), StatisticsManagerTest.getQueue().getQueueId()));
+ queueIdAndStatisticsMaps.add(qiasmBuilder.build());
+ qsuBuilder.setQueueIdAndStatisticsMap(queueIdAndStatisticsMaps);
+ notifService.pushDelayedNotification(qsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetAllQueuesStatisticsFromGivenPortOutput>> getAllQueuesStatisticsFromGivenPort(GetAllQueuesStatisticsFromGivenPortInput input) {
+ GetAllQueuesStatisticsFromGivenPortOutputBuilder builder = new GetAllQueuesStatisticsFromGivenPortOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+
+ @Override
+ public Future<RpcResult<GetQueueStatisticsFromGivenPortOutput>> getQueueStatisticsFromGivenPort(GetQueueStatisticsFromGivenPortInput input) {
+ GetQueueStatisticsFromGivenPortOutputBuilder builder = new GetQueueStatisticsFromGivenPortOutputBuilder();
+ TransactionId transId = new TransactionId(BigInteger.valueOf(transNum.incrementAndGet()));
+ builder.setTransactionId(transId);
+ QueueIdAndStatisticsMapBuilder qiasmBuilder = new QueueIdAndStatisticsMapBuilder();
+ List<QueueIdAndStatisticsMap> queueIdAndStatisticsMaps = new ArrayList<>();
+ qiasmBuilder.setKey(new QueueIdAndStatisticsMapKey(input.getNodeConnectorId(), input.getQueueId()));
+ qiasmBuilder.setTransmittedBytes(StatisticsManagerTest.COUNTER_64_TEST_VALUE);
+ queueIdAndStatisticsMaps.add(qiasmBuilder.build());
+ QueueStatisticsUpdateBuilder qsuBuilder = new QueueStatisticsUpdateBuilder();
+ qsuBuilder.setMoreReplies(false);
+ qsuBuilder.setTransactionId(transId);
+ qsuBuilder.setId(input.getNode().getValue().firstKeyOf(Node.class, NodeKey.class).getId());
+ qsuBuilder.setQueueIdAndStatisticsMap(queueIdAndStatisticsMaps);
+ notifService.pushDelayedNotification(qsuBuilder.build(), 100);
+ return Futures.immediateFuture(RpcResultBuilder.success(builder.build()).build());
+ }
+}
--- /dev/null
+package test.mock.util;
+
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.CommonPort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.Port;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.PortBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.PortKey;
+
+import java.util.Random;
+
+public class PortMockGenerator {
+ private static final Random rnd = new Random();
+ private static final PortBuilder portBuilder = new PortBuilder();
+
+ public static Port getRandomPort() {
+ portBuilder.setKey(new PortKey(TestUtils.nextLong(0, 4294967295L)));
+ portBuilder.setBarrier(rnd.nextBoolean());
+ portBuilder.setPortNumber(new CommonPort.PortNumber(TestUtils.nextLong(0, 4294967295L)));
+ portBuilder.setConfiguration(new PortConfig(rnd.nextBoolean(), rnd.nextBoolean(), rnd.nextBoolean(), rnd.nextBoolean()));
+ return portBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
+import org.opendaylight.controller.sal.binding.api.BindingAwareService;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+
+public class ProviderContextMock implements BindingAwareBroker.ProviderContext {
+
+ RpcProviderRegistry rpcProviderMock;
+ NotificationProviderService notificationProviderService;
+ DataBroker dataBroker;
+
+ public ProviderContextMock(RpcProviderRegistry rpcProviderMock, DataBroker dataBroker,
+ NotificationProviderService notificationProviderServiceMock) {
+ this.rpcProviderMock = rpcProviderMock;
+ this.dataBroker = dataBroker;
+ this.notificationProviderService = notificationProviderServiceMock;
+ }
+
+ @Override
+ public void registerFunctionality(BindingAwareProvider.ProviderFunctionality functionality) {
+
+ }
+
+ @Override
+ public void unregisterFunctionality(BindingAwareProvider.ProviderFunctionality functionality) {
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <T extends BindingAwareService> T getSALService(Class<T> service) {
+ if (service.equals(DataBroker.class)) {
+ return (T) dataBroker;
+ }
+ else if (service.equals(NotificationProviderService.class)) {
+ return (T) notificationProviderService;
+ }
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(L listener) {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> T getRpcService(Class<T> serviceInterface) {
+ return rpcProviderMock.getRpcService(serviceInterface);
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.queue.rev130925.QueueId;
+
+import java.util.Random;
+
+public class QueueMockGenerator {
+ private static final Random rnd = new Random();
+ private static final QueueBuilder queueBuilder = new QueueBuilder();
+
+ public static Queue getRandomQueue() {
+ queueBuilder.setKey(new QueueKey(new QueueId(TestUtils.nextLong(0, 4294967295L))));
+ queueBuilder.setPort(TestUtils.nextLong(0, 4294967295L));
+ queueBuilder.setProperty(rnd.nextInt(65535));
+ return queueBuilder.build();
+ }
+
+ public static Queue getRandomQueueWithPortNum(long portNum) {
+ queueBuilder.setKey(new QueueKey(new QueueId(TestUtils.nextLong(0, 4294967295L))));
+ queueBuilder.setPort(portNum);
+ queueBuilder.setProperty(rnd.nextInt(65535));
+ return queueBuilder.build();
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.table.statistics.rev131215.OpendaylightFlowTableStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.port.statistics.rev131214.OpendaylightPortStatisticsService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.queue.statistics.rev131216.OpendaylightQueueStatisticsService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.RpcService;
+
+public class RpcProviderRegistryMock implements RpcProviderRegistry {
+
+ OpendaylightFlowStatisticsServiceMock flowStatisticsServiceMock;
+ OpendaylightFlowTableStatisticsServiceMock flowTableStatisticsServiceMock;
+ OpendaylightGroupStatisticsServiceMock groupStatisticsServiceMock;
+ OpendaylightMeterStatisticsServiceMock meterStatisticsServiceMock;
+ OpendaylightPortStatisticsServiceMock portStatisticsServiceMock;
+ OpendaylightQueueStatisticsServiceMock queueStatisticsServiceMock;
+
+ public RpcProviderRegistryMock(NotificationProviderServiceHelper notificationProviderService) {
+ this.flowStatisticsServiceMock = new OpendaylightFlowStatisticsServiceMock(notificationProviderService);
+ this.flowTableStatisticsServiceMock = new OpendaylightFlowTableStatisticsServiceMock(notificationProviderService);
+ this.groupStatisticsServiceMock = new OpendaylightGroupStatisticsServiceMock(notificationProviderService);
+ this.meterStatisticsServiceMock = new OpendaylightMeterStatisticsServiceMock(notificationProviderService);
+ this.portStatisticsServiceMock = new OpendaylightPortStatisticsServiceMock(notificationProviderService);
+ this.queueStatisticsServiceMock = new OpendaylightQueueStatisticsServiceMock(notificationProviderService);
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <T extends RpcService> BindingAwareBroker.RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface, T implementation) throws IllegalStateException {
+ return null;
+ }
+
+ @Override
+ public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L> registerRouteChangeListener(L listener) {
+ return null;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public <T extends RpcService> T getRpcService(Class<T> serviceInterface) {
+ if (serviceInterface.equals(OpendaylightFlowStatisticsService.class)) {
+ return (T)flowStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightFlowTableStatisticsService.class)) {
+ return (T) flowTableStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightGroupStatisticsService.class)) {
+ return (T) groupStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightMeterStatisticsService.class)) {
+ return (T) meterStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightPortStatisticsService.class)) {
+ return (T) portStatisticsServiceMock;
+ } else if (serviceInterface.equals(OpendaylightQueueStatisticsService.class)) {
+ return (T) queueStatisticsServiceMock;
+ } else {
+ return null;
+ }
+ }
+
+ public OpendaylightFlowStatisticsServiceMock getFlowStatisticsServiceMock() {
+ return flowStatisticsServiceMock;
+ }
+
+ public OpendaylightFlowTableStatisticsServiceMock getFlowTableStatisticsServiceMock() {
+ return flowTableStatisticsServiceMock;
+ }
+
+ public OpendaylightGroupStatisticsServiceMock getGroupStatisticsServiceMock() {
+ return groupStatisticsServiceMock;
+ }
+
+ public OpendaylightMeterStatisticsServiceMock getMeterStatisticsServiceMock() {
+ return meterStatisticsServiceMock;
+ }
+
+ public OpendaylightPortStatisticsServiceMock getPortStatisticsServiceMock() {
+ return portStatisticsServiceMock;
+ }
+
+ public OpendaylightQueueStatisticsServiceMock getQueueStatisticsServiceMock() {
+ return queueStatisticsServiceMock;
+ }
+}
--- /dev/null
+package test.mock.util;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.Counter32;
+import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.rev100924.Counter64;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FeatureCapability;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.flow.node.SwitchFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.meters.Meter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.Table;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.TableBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.tables.table.Flow;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.port.mod.port.Port;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.types.rev131018.groups.Group;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.table.types.rev131026.TableId;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+public abstract class StatisticsManagerTest extends AbstractDataBrokerTest {
+
+ public static final Counter64 COUNTER_64_TEST_VALUE = new Counter64(BigInteger.valueOf(128));
+ public static final Counter32 COUNTER_32_TEST_VALUE = new Counter32(64L);
+ public static final Long MAX_GROUPS_TEST_VALUE = 2000L;
+ public static final BigInteger BIG_INTEGER_TEST_VALUE = BigInteger.valueOf(1000);
+
+ private static Flow flow;
+ private static Group group;
+ private static Meter meter;
+ private static Port port;
+ private static Queue queue;
+ private static TableId tableId;
+ private static NodeConnectorId nodeConnectorId;
+
+ private final NotificationProviderServiceHelper notificationMock = new NotificationProviderServiceHelper();
+ protected final NodeKey s1Key = new NodeKey(new NodeId("S1"));
+ protected RpcProviderRegistryMock rpcRegistry;
+ protected ProviderContextMock providerContext;
+
+ @BeforeClass
+ public static void setupTests() {
+ flow = FlowMockGenerator.getRandomFlow();
+ group = GroupMockGenerator.getRandomGroup();
+ meter = MeterMockGenerator.getRandomMeter();
+ port = PortMockGenerator.getRandomPort();
+ queue = QueueMockGenerator.getRandomQueueWithPortNum(port.getPortNumber().getUint32());
+ tableId = new TableId((short) 2);
+ nodeConnectorId = new NodeConnectorId("connector.1");
+ }
+
+ @Before
+ public void init() {
+ rpcRegistry = new RpcProviderRegistryMock(notificationMock);
+ providerContext = new ProviderContextMock(rpcRegistry, getDataBroker(), notificationMock.getNotifBroker());
+ }
+
+ // node with statistics capabilities will enable cyclic statistics collection
+ @SafeVarargs
+ protected final void addFlowCapableNodeWithFeatures(final NodeKey nodeKey, final Boolean hasMeterCapabilities,
+ final Class<? extends FeatureCapability>... capabilities)
+ throws ExecutionException, InterruptedException {
+ final Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
+ final InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, nodeKey);
+
+ final FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
+ final SwitchFeaturesBuilder sfBuilder = new SwitchFeaturesBuilder();
+ final List<Class<? extends FeatureCapability>> capabilitiyList = new ArrayList<>();
+ for (final Class<? extends FeatureCapability> capability : capabilities) {
+ capabilitiyList.add(capability);
+ }
+ sfBuilder.setCapabilities(capabilitiyList);
+ sfBuilder.setMaxTables((short) 2);
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(nodeKey);
+ fcnBuilder.setSwitchFeatures(sfBuilder.build());
+ final List<Table> tables = new ArrayList<>();
+ final TableBuilder tBuilder = new TableBuilder();
+ tBuilder.setId(getFlow().getTableId());
+ tables.add(tBuilder.build());
+ fcnBuilder.setTable(tables);
+ final FlowCapableNode flowCapableNode = fcnBuilder.build();
+ nodeBuilder.addAugmentation(FlowCapableNode.class, flowCapableNode);
+ final Node node = nodeBuilder.build();
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, nodeBuilder.build());
+ if (hasMeterCapabilities) {
+ final NodeMeterFeaturesBuilder nmfBuilder = new NodeMeterFeaturesBuilder();
+ final MeterFeaturesBuilder mfBuilder = new MeterFeaturesBuilder();
+ mfBuilder.setMaxBands((short) 4);
+ nmfBuilder.setMeterFeatures(mfBuilder.build());
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier.augmentation(NodeMeterFeatures.class),
+ nmfBuilder.build());
+ }
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, node);
+ assertCommit(writeTx.submit());
+
+ final NodeUpdatedBuilder nuBuilder = new NodeUpdatedBuilder(node);
+ final FlowCapableNodeUpdatedBuilder fcnuBuilder = new FlowCapableNodeUpdatedBuilder(flowCapableNode);
+ nuBuilder.setNodeRef(new NodeRef(flowNodeIdentifier));
+ nuBuilder.addAugmentation(FlowCapableNodeUpdated.class, fcnuBuilder.build());
+ notificationMock.pushNotification(nuBuilder.build());
+ }
+
+ public void addFlowCapableNode(final NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ final Nodes nodes = new NodesBuilder().setNode(Collections.<Node>emptyList()).build();
+ final InstanceIdentifier<Node> flowNodeIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, nodeKey);
+
+ final FlowCapableNodeBuilder fcnBuilder = new FlowCapableNodeBuilder();
+ final NodeBuilder nodeBuilder = new NodeBuilder();
+ nodeBuilder.setKey(nodeKey);
+ final FlowCapableNode flowCapableNode = fcnBuilder.build();
+ nodeBuilder.addAugmentation(FlowCapableNode.class, flowCapableNode);
+ final Node node = nodeBuilder.build();
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.OPERATIONAL, flowNodeIdentifier, node);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.create(Nodes.class), nodes);
+ writeTx.put(LogicalDatastoreType.CONFIGURATION, flowNodeIdentifier, node);
+ assertCommit(writeTx.submit());
+
+ final NodeUpdatedBuilder nuBuilder = new NodeUpdatedBuilder(node);
+ final FlowCapableNodeUpdatedBuilder fcnuBuilder = new FlowCapableNodeUpdatedBuilder(flowCapableNode);
+ nuBuilder.setNodeRef(new NodeRef(flowNodeIdentifier));
+ nuBuilder.addAugmentation(FlowCapableNodeUpdated.class, fcnuBuilder.build());
+ notificationMock.pushNotification(nuBuilder.build());
+ }
+
+ protected void removeNode(final NodeKey nodeKey) throws ExecutionException, InterruptedException {
+ final InstanceIdentifier<Node> nodeII = InstanceIdentifier.create(Nodes.class).child(Node.class, nodeKey);
+
+ final WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
+ writeTx.delete(LogicalDatastoreType.OPERATIONAL, nodeII);
+ writeTx.submit().get();
+
+ final NodeRemovedBuilder nrBuilder = new NodeRemovedBuilder();
+ nrBuilder.setNodeRef(new NodeRef(nodeII));
+ notificationMock.pushNotification(nrBuilder.build());
+ }
+
+ public static Flow getFlow() {
+ return flow;
+ }
+
+ public static Group getGroup() {
+ return group;
+ }
+
+ public static Meter getMeter() {
+ return meter;
+ }
+
+ public static Port getPort() {
+ return port;
+ }
+
+ public static Queue getQueue() {
+ return queue;
+ }
+
+ public static TableId getTableId() {
+ return tableId;
+ }
+
+ public static NodeConnectorId getNodeConnectorId() {
+ return nodeConnectorId;
+ }
+}
+
--- /dev/null
+package test.mock.util;
+
+import java.util.Random;
+
+public class TestUtils {
+ private static Random rnd = new Random();
+
+ public static long nextLong(long RangeBottom, long rangeTop) {
+ return RangeBottom + ((long)(rnd.nextDouble()*(rangeTop - RangeBottom)));
+ }
+}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<groupId>org.opendaylight.controller.md</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>sal-parent</artifactId>
- <version>1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
</parent>
<groupId>org.opendaylight.controller.md</groupId>
<artifactId>topology-manager</artifactId>
*/
package org.opendaylight.md.controller.topology.manager;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeConnectorKey;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeKey;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPoint;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPointId;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyLink;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
-import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
+import java.util.Collections;
+import java.util.List;
+
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeConnectorKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.getNodeKey;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPoint;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTerminationPointId;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyLink;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
+import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
- removeAffectedLinks(nodeId, transaction);
- transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+ removeAffectedLinks(nodeId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
}
@Override
final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
notification.getNodeConnectorRef());
+ final InstanceIdentifier<Node> node = tpInstance.firstIdentifierOf(Node.class);
+
final TpId tpId = toTerminationPointId(getNodeConnectorKey(
notification.getNodeConnectorRef()).getId());
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(ReadWriteTransaction transaction) {
- removeAffectedLinks(tpId, transaction);
- transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
+ Optional<Node> nodeOptional = Optional.absent();
+ try {
+ nodeOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, node).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read NodeConnector ", e);
+ }
+ if (nodeOptional.isPresent()) {
+ removeAffectedLinks(tpId, transaction);
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
+ }
}
@Override
public void applyOperation(final ReadWriteTransaction transaction) {
final Link link = toTopologyLink(notification);
final InstanceIdentifier<Link> path = linkPath(link);
- transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
+ transaction.put(LogicalDatastoreType.OPERATIONAL, path, link, true);
}
@Override
processor.enqueueOperation(new TopologyOperation() {
@Override
public void applyOperation(final ReadWriteTransaction transaction) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
+ Optional<Link> linkOptional = Optional.absent();
+ try {
+ // read that checks if link exists (if we do not do this we might get an exception on delete)
+ linkOptional = transaction.read(LogicalDatastoreType.OPERATIONAL,
+ linkPath(toTopologyLink(notification))).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error occured when trying to read Link ", e);
+ }
+ if (linkOptional.isPresent()) {
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
+ }
}
@Override
}
private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
- transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- removeAffectedLinks(id, topologyOptional);
- }
-
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
}
- private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+ private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
if (!topologyOptional.isPresent()) {
return;
}
List<Link> linkList = topologyOptional.get().getLink() != null ?
topologyOptional.get().getLink() : Collections.<Link> emptyList();
- final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
for (Link link : linkList) {
if (id.equals(link.getSource().getSourceNode()) ||
id.equals(link.getDestination().getDestNode())) {
- linkIDsToDelete.add(linkPath(link));
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
}
}
-
- enqueueLinkDeletes(linkIDsToDelete);
- }
-
- private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
- if(!linkIDsToDelete.isEmpty()) {
- processor.enqueueOperation(new TopologyOperation() {
- @Override
- public void applyOperation(ReadWriteTransaction transaction) {
- for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
- transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
- }
- }
-
- @Override
- public String toString() {
- return "Delete Links " + linkIDsToDelete.size();
- }
- });
- }
}
private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
- CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
- transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
- Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
- @Override
- public void onSuccess(Optional<Topology> topologyOptional) {
- removeAffectedLinks(id, topologyOptional);
- }
-
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Error reading topology data for topology {}", topology, throwable);
- }
- });
+ Optional<Topology> topologyOptional = Optional.absent();
+ try {
+ topologyOptional = transaction.read(LogicalDatastoreType.OPERATIONAL, topology).checkedGet();
+ } catch (ReadFailedException e) {
+ LOG.error("Error reading topology data for topology {}", topology, e);
+ }
+ if (topologyOptional.isPresent()) {
+ removeAffectedLinks(id, topologyOptional, transaction);
+ }
}
- private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+ private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional, ReadWriteTransaction transaction) {
if (!topologyOptional.isPresent()) {
return;
}
List<Link> linkList = topologyOptional.get().getLink() != null
? topologyOptional.get().getLink() : Collections.<Link> emptyList();
- final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
for (Link link : linkList) {
if (id.equals(link.getSource().getSourceTp()) ||
id.equals(link.getDestination().getDestTp())) {
- linkIDsToDelete.add(linkPath(link));
+ transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
}
}
-
- enqueueLinkDeletes(linkIDsToDelete);
}
private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
package org.opendaylight.md.controller.topology.manager;
import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
final class OperationProcessor implements AutoCloseable, Runnable, TransactionChainListener {
private static final Logger LOG = LoggerFactory.getLogger(OperationProcessor.class);
private static final int MAX_TRANSACTION_OPERATIONS = 100;
private final BlockingQueue<TopologyOperation> queue = new LinkedBlockingQueue<>(OPERATION_QUEUE_DEPTH);
private final DataBroker dataBroker;
- private final BindingTransactionChain transactionChain;
+ private BindingTransactionChain transactionChain;
+ private volatile boolean finishing = false;
OperationProcessor(final DataBroker dataBroker) {
this.dataBroker = Preconditions.checkNotNull(dataBroker);
@Override
public void run() {
- try {
- for (; ; ) {
- TopologyOperation op = queue.take();
+ while (!finishing) {
+ try {
+ TopologyOperation op = queue.take();
- LOG.debug("New {} operation available, starting transaction", op);
+ LOG.debug("New {} operation available, starting transaction", op);
- final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+ final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
- int ops = 0;
- do {
- op.applyOperation(tx);
+ int ops = 0;
+ do {
+ op.applyOperation(tx);
- ops++;
- if (ops < MAX_TRANSACTION_OPERATIONS) {
- op = queue.poll();
- } else {
- op = null;
- }
+ ops++;
+ if (ops < MAX_TRANSACTION_OPERATIONS) {
+ op = queue.poll();
+ } else {
+ op = null;
+ }
- LOG.debug("Next operation {}", op);
- } while (op != null);
+ LOG.debug("Next operation {}", op);
+ } while (op != null);
- LOG.debug("Processed {} operations, submitting transaction", ops);
+ LOG.debug("Processed {} operations, submitting transaction", ops);
- CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
- Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
- @Override
- public void onSuccess(Void notUsed) {
- LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
+ try {
+ tx.submit().checkedGet();
+ } catch (final TransactionCommitFailedException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
}
- @Override
- public void onFailure(Throwable throwable) {
- LOG.error("Topology export transaction {} failed", tx.getIdentifier(), throwable.getCause());
- }
- });
+ } catch (final IllegalStateException e) {
+ LOG.warn("Stat DataStoreOperation unexpected State!", e);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
+ } catch (final InterruptedException e) {
+ LOG.warn("Stat Manager DS Operation thread interupted!", e);
+ finishing = true;
+ } catch (final Exception e) {
+ LOG.warn("Stat DataStore Operation executor fail!", e);
+ }
}
- } catch (InterruptedException e) {
- LOG.info("Interrupted processing, terminating", e);
- }
-
// Drain all events, making sure any blocked threads are unblocked
+ cleanDataStoreOperQueue();
+ }
+
+ private void cleanDataStoreOperQueue() {
while (!queue.isEmpty()) {
queue.poll();
}
@Override
public void onTransactionChainFailed(TransactionChain<?, ?> chain, AsyncTransaction<?, ?> transaction, Throwable cause) {
LOG.error("Failed to export Topology manager operations, Transaction {} failed.", transaction.getIdentifier(), cause);
+ transactionChain.close();
+ transactionChain = dataBroker.createTransactionChain(this);
+ cleanDataStoreOperQueue();
}
@Override
package org.opendaylight.md.controller.topology.manager;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeBuilder;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
public class FlowCapableTopologyExporterTest {
@Test
public void testOnNodeRemoved() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
nodeKey);
};
SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
int expDeleteCalls = expDeletedIIDs.length;
ArgumentCaptor.forClass(InstanceIdentifier.class);
setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
- ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
- setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
- CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
-
- doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
waitForDeletes(expDeleteCalls, deleteLatch);
- waitForSubmit(submitLatch2);
-
assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
verifyMockTx(mockTx1);
- verifyMockTx(mockTx2);
}
@SuppressWarnings({ "rawtypes" })
@Test
public void testOnNodeRemovedWithNoTopology() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
nodeKey);
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch deleteLatch = new CountDownLatch(1);
ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
ArgumentCaptor.forClass(InstanceIdentifier.class);
@Test
public void testOnNodeConnectorRemoved() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
- newInvNodeConnKey("tp1");
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
};
final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+ readFuture.set(Optional.of(topology));
ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx1)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
int expDeleteCalls = expDeletedIIDs.length;
ArgumentCaptor.forClass(InstanceIdentifier.class);
setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
- ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
- setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
- CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
-
- doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+ doReturn(mockTx1).when(mockTxChain).newReadWriteTransaction();
exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
new NodeConnectorRef(invNodeConnID)).build());
waitForDeletes(expDeleteCalls, deleteLatch);
- waitForSubmit(submitLatch2);
-
assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
verifyMockTx(mockTx1);
- verifyMockTx(mockTx2);
}
@SuppressWarnings("rawtypes")
@Test
public void testOnNodeConnectorRemovedWithNoTopology() {
+ NodeKey topoNodeKey = new NodeKey(new NodeId("node1"));
+ TerminationPointKey terminationPointKey = new TerminationPointKey(new TpId("tp1"));
+
+ InstanceIdentifier<Node> topoNodeII = topologyIID.child(Node.class, topoNodeKey);
+ Node topoNode = new NodeBuilder().setKey(topoNodeKey).build();
+
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
- nodeKey = newInvNodeKey("node1");
+ nodeKey = newInvNodeKey(topoNodeKey.getNodeId().getValue());
org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
- newInvNodeConnKey("tp1");
+ newInvNodeConnKey(terminationPointKey.getTpId().getValue());
InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
.read(LogicalDatastoreType.OPERATIONAL, topologyIID);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ SettableFuture<Optional<Node>> readFutureNode = SettableFuture.create();
+ readFutureNode.set(Optional.of(topoNode));
+ doReturn(Futures.makeChecked(readFutureNode, ReadFailedException.MAPPER)).when(mockTx)
+ .read(LogicalDatastoreType.OPERATIONAL, topoNodeII);
+
CountDownLatch deleteLatch = new CountDownLatch(1);
ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
ArgumentCaptor.forClass(InstanceIdentifier.class);
waitForSubmit(submitLatch);
ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
- verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
- Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+ verify(mockTx).put(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
mergedNode.capture(), eq(true));
assertEquals("Source node ID", "sourceNode",
mergedNode.getValue().getSource().getSourceNode().getValue());
destNodeConnKey = newInvNodeConnKey("destTP");
InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+ Link link = newLink(sourceNodeConnKey.getId().getValue(), newSourceTp(sourceNodeConnKey.getId().getValue()),
+ newDestTp(destNodeConnKey.getId().getValue()));
+
ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.of(link))).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
new NodeConnectorRef(sourceConnID)).setDestination(
- new NodeConnectorRef(destConnID)).build());
+ new NodeConnectorRef(destConnID)).build());
waitForSubmit(submitLatch);
Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
}
+ @Test
+ public void testOnLinkRemovedLinkDoesNotExist() {
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ sourceNodeKey = newInvNodeKey("sourceNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+ InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+ destNodeKey = newInvNodeKey("destNode");
+ org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+ destNodeConnKey = newInvNodeConnKey("destTP");
+ InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+ ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+ CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+ doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+ doReturn(Futures.immediateCheckedFuture(Optional.<Link>absent())).when(mockTx).read(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+
+ exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+ new NodeConnectorRef(sourceConnID)).setDestination(
+ new NodeConnectorRef(destConnID)).build());
+
+ waitForSubmit(submitLatch);
+
+ verify(mockTx, never()).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+ Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+ }
+
private void verifyMockTx(ReadWriteTransaction mockTx) {
InOrder inOrder = inOrder(mockTx);
inOrder.verify(mockTx, atLeast(0)).submit();
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>config-netconf-connector</artifactId>
<packaging>bundle</packaging>
String elementName = jmxToYangChildRbeMapping.get(childMappingEntry.getKey());
- Element innerXml = XmlUtil.createElement(document, elementName, Optional.<String>absent());
+ Element innerXml = XmlUtil.createElement(document, elementName, Optional.of(namespace));
childMappingEntry.getValue().toXml(objectName, innerChildRbeOns, document,
runtimeInstanceIndex, innerXml, namespace);
xml.appendChild(innerXml);
assertEquals(8 * 4, getElementsSize(response, "inner-inner-running-data"));
assertEquals(8 * 4, getElementsSize(response, "deep3"));
assertEquals(8 * 4 * 2, getElementsSize(response, "list-of-strings"));
- assertEquals(8, getElementsSize(response, "inner-running-data-additional"));
+ assertEquals(8, getElementsSize(response, "inner-running-data-additional", "urn:opendaylight:params:xml:ns:yang:controller:test:impl"));
assertEquals(8, getElementsSize(response, "deep4"));
// TODO assert keys
return response.getElementsByTagName(elementName).getLength();
}
+ private int getElementsSize(Document response, String elementName, String namespace) {
+ return response.getElementsByTagNameNS(namespace, elementName).getLength();
+ }
+
private Document executeOp(final NetconfOperation op, final String filename) throws ParserConfigurationException,
SAXException, IOException, NetconfDocumentedException {
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>config-persister-impl</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf-monitoring-extension</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>ietf-netconf-monitoring</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-api</artifactId>
<packaging>bundle</packaging>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>netconf-artifacts</artifactId>
+ <version>0.3.0-SNAPSHOT</version>
+ <packaging>pom</packaging>
+
+ <dependencyManagement>
+ <dependencies>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-config-dispatcher</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-netconf-connector</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>config-persister-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-auth</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-cli</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-client</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-connector-config</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-impl</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-mapping-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-monitoring</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-netty-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-ssh</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-tcp</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-testtool</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-usermanager</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-monitoring</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>ietf-netconf-monitoring-extension</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-client</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-impl</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-netty-util</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-ssh</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-util</artifactId>
+ <version>${project.version}</version>
+ <type>test-jar</type>
+ </dependency>
+
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>features-netconf</artifactId>
+ <version>${project.version}</version>
+ <classifier>features</classifier>
+ <type>xml</type>
+ <scope>runtime</scope>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
+</project>
+
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-auth</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-cli</artifactId>
<packaging>jar</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-client</artifactId>
<packaging>bundle</packaging>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-config</artifactId>
<description>Configuration files for netconf</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-connector-config</artifactId>
<description>Configuration files for netconf-connector</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-impl</artifactId>
<packaging>bundle</packaging>
import com.google.common.base.Optional;
import java.io.IOException;
+import java.util.Map;
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.util.mapping.AbstractNetconfOperation.OperationNameAndNamespace;
return result;
}
- private static void addSubtree(XmlElement filter, XmlElement src, XmlElement dst) {
+ private static void addSubtree(XmlElement filter, XmlElement src, XmlElement dst) throws NetconfDocumentedException {
for (XmlElement srcChild : src.getChildElements()) {
for (XmlElement filterChild : filter.getChildElements()) {
addSubtree2(filterChild, srcChild, dst);
}
}
- private static MatchingResult addSubtree2(XmlElement filter, XmlElement src, XmlElement dstParent) {
+ private static MatchingResult addSubtree2(XmlElement filter, XmlElement src, XmlElement dstParent) throws NetconfDocumentedException {
Document document = dstParent.getDomElement().getOwnerDocument();
MatchingResult matches = matches(src, filter);
if (matches != MatchingResult.NO_MATCH && matches != MatchingResult.CONTENT_MISMATCH) {
* Shallow compare src node to filter: tag name and namespace must match.
* If filter node has no children and has text content, it also must match.
*/
- private static MatchingResult matches(XmlElement src, XmlElement filter) {
+ private static MatchingResult matches(XmlElement src, XmlElement filter) throws NetconfDocumentedException {
boolean tagMatch = src.getName().equals(filter.getName()) &&
src.getNamespaceOptionally().equals(filter.getNamespaceOptionally());
MatchingResult result = null;
// match text content
Optional<String> maybeText = filter.getOnlyTextContentOptionally();
if (maybeText.isPresent()) {
- if (maybeText.equals(src.getOnlyTextContentOptionally())) {
+ if (maybeText.equals(src.getOnlyTextContentOptionally()) || prefixedContentMatches(filter, src)) {
result = MatchingResult.CONTENT_MATCH;
} else {
result = MatchingResult.CONTENT_MISMATCH;
if (result == null) {
result = MatchingResult.NO_MATCH;
}
- logger.debug("Matching {} to {} resulted in {}", src, filter, tagMatch);
+ logger.debug("Matching {} to {} resulted in {}", src, filter, result);
return result;
}
+ private static boolean prefixedContentMatches(final XmlElement filter, final XmlElement src) throws NetconfDocumentedException {
+ final Map.Entry<String, String> prefixToNamespaceOfFilter = filter.findNamespaceOfTextContent();
+ final Map.Entry<String, String> prefixToNamespaceOfSrc = src.findNamespaceOfTextContent();
+
+ final String prefix = prefixToNamespaceOfFilter.getKey();
+ // If this is not a prefixed content, we do not need to continue since content do not match
+ if(prefix.equals(XmlElement.DEFAULT_NAMESPACE_PREFIX)) {
+ return false;
+ }
+ // Namespace mismatch
+ if(!prefixToNamespaceOfFilter.getValue().equals(prefixToNamespaceOfSrc.getValue())) {
+ return false;
+ }
+
+ final String unprefixedFilterContent = filter.getTextContent().substring(prefix.length());
+ final String unprefixedSrcCOntnet = src.getTextContent().substring(prefix.length());
+ // Finally compare unprefixed content
+ return unprefixedFilterContent.equals(unprefixedSrcCOntnet);
+ }
+
enum MatchingResult {
NO_MATCH, TAG_MATCH, CONTENT_MATCH, CONTENT_MISMATCH
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import io.netty.channel.Channel;
+import java.util.List;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.NetconfServerSessionListener;
+import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
+import org.opendaylight.controller.netconf.mapping.api.Capability;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfMonitoringServiceImplTest {
+
+ private NetconfMonitoringServiceImpl service;
+
+ @Mock
+ private NetconfOperationProvider operationProvider;
+ @Mock
+ private NetconfManagementSession managementSession;
+ @Mock
+ private NetconfOperationServiceSnapshot snapshot;
+ @Mock
+ private NetconfOperationService operationService;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ service = new NetconfMonitoringServiceImpl(operationProvider);
+ }
+
+ @Test
+ public void testSessions() throws Exception {
+ doReturn("sessToStr").when(managementSession).toString();
+ service.onSessionUp(managementSession);
+ List list = Lists.newArrayList(managementSession);
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void testGetSchemas() throws Exception {
+ doThrow(RuntimeException.class).when(operationProvider).openSnapshot(anyString());
+ service.getSchemas();
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testGetSchemas2() throws Exception {
+ doThrow(Exception.class).when(operationProvider).openSnapshot(anyString());
+ service.getSchemas();
+ }
+
+ @Test
+ public void testGetSchemas3() throws Exception {
+ doReturn("").when(managementSession).toString();
+ Capability cap = mock(Capability.class);
+ Set caps = Sets.newHashSet(cap);
+ Set services = Sets.newHashSet(operationService);
+ doReturn(snapshot).when(operationProvider).openSnapshot(anyString());
+ doReturn(services).when(snapshot).getServices();
+ doReturn(caps).when(operationService).getCapabilities();
+ Optional opt = mock(Optional.class);
+ doReturn(opt).when(cap).getCapabilitySchema();
+ doReturn(true).when(opt).isPresent();
+ doReturn(opt).when(cap).getModuleNamespace();
+ doReturn("namespace").when(opt).get();
+ Optional optRev = Optional.of("rev");
+ doReturn(optRev).when(cap).getRevision();
+ doReturn(Optional.of("modName")).when(cap).getModuleName();
+ doReturn(Optional.of(Lists.newArrayList("loc"))).when(cap).getLocation();
+ doNothing().when(snapshot).close();
+
+ assertNotNull(service.getSchemas());
+ verify(snapshot, times(1)).close();
+
+ NetconfServerSessionListener sessionListener = mock(NetconfServerSessionListener.class);
+ Channel channel = mock(Channel.class);
+ NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("name", "addr", "2", "tcp", "id");
+ NetconfServerSession sm = new NetconfServerSession(sessionListener, channel, 10, header);
+ doNothing().when(sessionListener).onSessionUp(any(NetconfServerSession.class));
+ sm.sessionUp();
+ service.onSessionUp(sm);
+ assertEquals(1, service.getSessions().getSession().size());
+
+ assertEquals(Long.valueOf(10), service.getSessions().getSession().get(0).getSessionId());
+
+ service.onSessionDown(sm);
+ assertEquals(0, service.getSessions().getSession().size());
+ }
+}
@Parameters
public static Collection<Object[]> data() {
List<Object[]> result = new ArrayList<>();
- for (int i = 0; i <= 8; i++) {
+ for (int i = 0; i <= 9; i++) {
result.add(new Object[]{i});
}
return result;
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
+public class DefaultCloseSessionTest {
+ @Test
+ public void testDefaultCloseSession() throws Exception {
+ AutoCloseable res = mock(AutoCloseable.class);
+ doNothing().when(res).close();
+ DefaultCloseSession session = new DefaultCloseSession("", res);
+ Document doc = XmlUtil.newDocument();
+ XmlElement elem = XmlElement.fromDomElement(XmlUtil.readXmlToElement("<elem/>"));
+ session.handleWithNoSubsequentOperations(doc, elem);
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testDefaultCloseSession2() throws Exception {
+ AutoCloseable res = mock(AutoCloseable.class);
+ doThrow(NetconfDocumentedException.class).when(res).close();
+ DefaultCloseSession session = new DefaultCloseSession("", res);
+ Document doc = XmlUtil.newDocument();
+ XmlElement elem = XmlElement.fromDomElement(XmlUtil.readXmlToElement("<elem/>"));
+ session.handleWithNoSubsequentOperations(doc, elem);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import com.google.common.collect.Sets;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
+import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationRouter;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.mockito.Mockito.*;
+
+public class DefaultCommitTest {
+
+ private NetconfOperationChainedExecution operation;
+ private Document requestMessage;
+ private NetconfOperationRouter router;
+ private DefaultCommitNotificationProducer notifier;
+ private CapabilityProvider cap;
+ private DefaultCommit commit;
+
+ @Before
+ public void setUp() throws Exception {
+ operation = mock(NetconfOperationChainedExecution.class);
+ doReturn(XmlUtil.newDocument()).when(operation).execute(any(Document.class));
+ router = mock(NetconfOperationRouter.class);
+ doReturn(false).when(operation).isExecutionTermination();
+ notifier = mock(DefaultCommitNotificationProducer.class);
+ doNothing().when(notifier).sendCommitNotification(anyString(), any(Element.class), anySetOf(String.class));
+ cap = mock(CapabilityProvider.class);
+ doReturn(Sets.newHashSet()).when(cap).getCapabilities();
+ Document rpcData = XmlFileLoader.xmlFileToDocument("netconfMessages/editConfig_expectedResult.xml");
+ doReturn(rpcData).when(router).onNetconfMessage(any(Document.class), any(NetconfServerSession.class));
+ commit = new DefaultCommit(notifier, cap, "", router);
+ }
+
+ @Test
+ public void testHandleWithNotification() throws Exception {
+ requestMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/commit.xml");
+ commit.handle(requestMessage, operation);
+ verify(operation, times(1)).execute(requestMessage);
+ verify(notifier, times(1)).sendCommitNotification(anyString(), any(Element.class), anySetOf(String.class));
+ }
+
+ @Test
+ public void testHandleWithoutNotification() throws Exception {
+ requestMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/commit.xml");
+ Element elem = requestMessage.getDocumentElement();
+ elem.setAttribute("notify", "false");
+ commit.handle(requestMessage, operation);
+ verify(operation, times(1)).execute(requestMessage);
+ verify(notifier, never()).sendCommitNotification(anyString(), any(Element.class), anySetOf(String.class));
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testHandle() throws Exception {
+ Document rpcData = XmlFileLoader.xmlFileToDocument("netconfMessages/get.xml");
+ doReturn(rpcData).when(router).onNetconfMessage(any(Document.class), any(NetconfServerSession.class));
+ requestMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/commit.xml");
+ commit.handle(requestMessage, operation);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import com.google.common.base.Optional;
+import junit.framework.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.impl.mapping.CapabilityProvider;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
+public class DefaultGetSchemaTest {
+
+ private CapabilityProvider cap;
+ private Document doc;
+ private String getSchema;
+
+ @Before
+ public void setUp() throws Exception {
+ cap = mock(CapabilityProvider.class);
+ doc = XmlUtil.newDocument();
+ getSchema = "<get-schema xmlns=\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\">\n" +
+ " <identifier>threadpool-api</identifier>\n" +
+ " <version>2010-09-24</version>\n" +
+ " <format\n" +
+ " xmlns:ncm=\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\">ncm:yang\n" +
+ " </format>\n" +
+ " </get-schema>";
+ }
+
+ @Test(expected = NetconfDocumentedException.class)
+ public void testDefaultGetSchema() throws Exception {
+ DefaultGetSchema schema = new DefaultGetSchema(cap, "");
+ doThrow(IllegalStateException.class).when(cap).getSchemaForCapability(anyString(), any(Optional.class));
+ schema.handleWithNoSubsequentOperations(doc, XmlElement.fromDomElement(XmlUtil.readXmlToElement(getSchema)));
+ }
+
+ @Test
+ public void handleWithNoSubsequentOperations() throws Exception {
+ DefaultGetSchema schema = new DefaultGetSchema(cap, "");
+ doReturn("").when(cap).getSchemaForCapability(anyString(), any(Optional.class));
+ assertNotNull(schema.handleWithNoSubsequentOperations(doc, XmlElement.fromDomElement(XmlUtil.readXmlToElement(getSchema))));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.mapping.operations;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.impl.NetconfServerSession;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.*;
+
+public class DefaultStopExiTest {
+ @Test
+ public void testHandleWithNoSubsequentOperations() throws Exception {
+ DefaultStopExi exi = new DefaultStopExi("");
+ Document doc = XmlUtil.newDocument();
+ Channel channel = mock(Channel.class);
+ ChannelPipeline pipeline = mock(ChannelPipeline.class);
+ doReturn(pipeline).when(channel).pipeline();
+ ChannelHandler channelHandler = mock(ChannelHandler.class);
+ doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+
+ NetconfServerSession serverSession = new NetconfServerSession(null, channel, 2L, null);
+ exi.setNetconfSession(serverSession);
+
+ assertNotNull(exi.handleWithNoSubsequentOperations(doc, XmlElement.fromDomElement(XmlUtil.readXmlToElement("<elem/>"))));
+ verify(pipeline, times(1)).replace(anyString(), anyString(), any(ChannelHandler.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.osgi;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Dictionary;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
+import org.osgi.framework.*;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class NetconfImplActivatorTest {
+
+ @Mock
+ private BundleContext bundle;
+ @Mock
+ private Filter filter;
+ @Mock
+ private ServiceReference reference;
+ @Mock
+ private ServiceRegistration registration;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(filter).when(bundle).createFilter(anyString());
+ doNothing().when(bundle).addServiceListener(any(ServiceListener.class), anyString());
+
+ ServiceReference[] refs = new ServiceReference[0];
+ doReturn(refs).when(bundle).getServiceReferences(anyString(), anyString());
+ doReturn(Arrays.asList(refs)).when(bundle).getServiceReferences(any(Class.class), anyString());
+ doReturn("").when(bundle).getProperty(anyString());
+ doReturn(registration).when(bundle).registerService(any(Class.class), any(NetconfOperationServiceFactoryListenerImpl.class), any(Dictionary.class));
+ doNothing().when(registration).unregister();
+ doNothing().when(bundle).removeServiceListener(any(ServiceListener.class));
+ }
+
+ @Test
+ public void testStart() throws Exception {
+ NetconfImplActivator activator = new NetconfImplActivator();
+ activator.start(bundle);
+ verify(bundle, times(2)).registerService(any(Class.class), any(NetconfOperationServiceFactoryListenerImpl.class), any(Dictionary.class));
+ activator.stop(bundle);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.osgi;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceReference;
+
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class NetconfOperationServiceFactoryTrackerTest {
+
+ @Mock
+ private Filter filter;
+ @Mock
+ private BundleContext context;
+ @Mock
+ private NetconfOperationServiceFactoryListener listener;
+ @Mock
+ private NetconfOperationServiceFactory factory;
+ @Mock
+ private ServiceReference reference;
+
+ private NetconfOperationServiceFactoryTracker tracker;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doNothing().when(listener).onRemoveNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
+ doReturn(filter).when(context).createFilter(anyString());
+ doReturn("").when(reference).toString();
+ doReturn(factory).when(context).getService(any(ServiceReference.class));
+ doReturn("").when(factory).toString();
+ doNothing().when(listener).onAddNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
+ tracker = new NetconfOperationServiceFactoryTracker(context, listener);
+ }
+
+ @Test
+ public void testNetconfOperationServiceFactoryTracker() throws Exception {
+ tracker.removedService(null, factory);
+ verify(listener, times(1)).onRemoveNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
+ }
+
+ @Test
+ public void testAddingService() throws Exception {
+ assertNotNull(tracker.addingService(reference));
+ verify(listener, times(1)).onAddNetconfOperationServiceFactory(any(NetconfOperationServiceFactory.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.impl.util;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.mockito.Mockito.*;
+
+public class DeserializerExceptionHandlerTest {
+
+ private DeserializerExceptionHandler handler;
+ private ChannelFuture channelFuture;
+ private ChannelHandlerContext context;
+ private Channel channel;
+
+ @Before
+ public void setUp() throws Exception {
+ handler = new DeserializerExceptionHandler();
+ context = mock(ChannelHandlerContext.class);
+ channel = mock(Channel.class);
+ doReturn(channel).when(context).channel();
+ channelFuture = mock(ChannelFuture.class);
+ doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+ doReturn(channelFuture).when(channel).writeAndFlush(anyObject());
+ }
+
+ @Test
+ public void testExceptionCaught() throws Exception {
+ handler.exceptionCaught(context, new Exception());
+ verify(context, times(1)).channel();
+ }
+}
--- /dev/null
+<rpc-reply message-id="5"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <data>
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>fred</name>
+ <type xmlns:x="http://java.sun.com/dtd/properties.dtd">x:admin</type>
+ <full-name>Fred Flintstone</full-name>
+ </user>
+ </users>
+ </top>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<rpc-reply message-id="5" xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <data>
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>root</name>
+ <type>superuser</type>
+ <full-name>Charlie Root</full-name>
+ <company-info>
+ <dept>1</dept>
+ <id>1</id>
+ </company-info>
+ </user>
+ <user>
+ <name>fred</name>
+ <type xmlns:x="http://java.sun.com/dtd/properties.dtd">x:admin</type>
+ <full-name>Fred Flintstone</full-name>
+ <company-info>
+ <dept>2</dept>
+ <id>2</id>
+ </company-info>
+ </user>
+ <user>
+ <name>barney</name>
+ <type>admin</type>
+ <full-name>Barney Rubble</full-name>
+ <company-info>
+ <dept>2</dept>
+ <id>3</id>
+ </company-info>
+ </user>
+ </users>
+ <groups>
+ <group>
+ <name>admin</name>
+ </group>
+ </groups>
+ </top>
+ </data>
+</rpc-reply>
\ No newline at end of file
--- /dev/null
+<rpc message-id="5"
+ xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <get-config>
+ <source>
+ <running/>
+ </source>
+ <filter type="subtree">
+ <top xmlns="http://example.com/schema/1.2/config">
+ <users>
+ <user>
+ <name>fred</name>
+ <type xmlns:a="http://java.sun.com/dtd/properties.dtd">a:admin</type>
+ <full-name/>
+ </user>
+ </users>
+ </top>
+ </filter>
+ </get-config>
+</rpc>
\ No newline at end of file
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-it</artifactId>
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
+import io.netty.channel.EventLoopGroup;
import io.netty.channel.local.LocalAddress;
+import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.nio.file.Files;
import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.TestingNetconfClient;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
public static final String USERNAME = "user";
public static final String PASSWORD = "pwd";
- private NetconfSSHServer sshServer;
+ private SshProxyServer sshProxyServer;
+
+ private ExecutorService nioExec;
+ private EventLoopGroup clientGroup;
+ private ScheduledExecutorService minaTimerEx;
@Before
public void setUp() throws Exception {
- final char[] pem = PEMGenerator.generate().toCharArray();
- sshServer = NetconfSSHServer.start(TLS_ADDRESS.getPort(), NetconfConfigUtil.getNetconfLocalAddress(), getNettyThreadgroup(), pem);
- sshServer.setAuthProvider(getAuthProvider());
+ nioExec = Executors.newFixedThreadPool(1);
+ clientGroup = new NioEventLoopGroup();
+ minaTimerEx = Executors.newScheduledThreadPool(1);
+ sshProxyServer = new SshProxyServer(minaTimerEx, clientGroup, nioExec);
+ sshProxyServer.bind(TLS_ADDRESS, NetconfConfigUtil.getNetconfLocalAddress(), new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ }, new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()));
}
@After
public void tearDown() throws Exception {
- sshServer.close();
- sshServer.join();
+ sshProxyServer.close();
+ clientGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
@Test
/**
* Test all requests are handled properly and no mismatch occurs in listener
*/
- /* Disabled until fixed
- @Test(timeout = 5*60*1000)
+ @Test(timeout = 6*60*1000)
public void testSecureStress() throws Exception {
- final int requests = 10000;
+ final int requests = 4000;
final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
final NetconfDeviceCommunicator sessionListener = getSessionListener();
assertEquals(requests, responseCounter.get());
}
}
- */
+
private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
String s = XmlUtil.toString(getConfig.getDocument(), false);
s = s.replace("101", Integer.toString(i));
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-mapping-api</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-monitoring</artifactId>
<packaging>bundle</packaging>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.monitoring.osgi;
+
+import java.util.Arrays;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceListener;
+import org.osgi.framework.ServiceReference;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfMonitoringActivatorTest {
+
+ @Mock
+ BundleContext context;
+ @Mock
+ Filter filter;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(filter).when(context).createFilter(anyString());
+ doNothing().when(context).addServiceListener(any(ServiceListener.class), anyString());
+ ServiceReference[] refs = new ServiceReference[2];
+ doReturn(Arrays.asList(refs)).when(context).getServiceReferences(any(Class.class), anyString());
+ doReturn(refs).when(context).getServiceReferences(anyString(), anyString());
+ }
+
+ @Test
+ public void testNetconfMonitoringActivator() throws Exception {
+ NetconfMonitoringActivator activator = new NetconfMonitoringActivator();
+ activator.start(context);
+ verify(context, times(1)).addServiceListener(any(ServiceListener.class), anyString());
+
+ activator.stop(context);
+ verify(context, times(1)).removeServiceListener(any(ServiceListener.class));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.monitoring.osgi;
+
+import com.google.common.base.Optional;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.monitoring.MonitoringConstants;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
+public class NetconfMonitoringOperationServiceTest {
+ @Test
+ public void testGetters() throws Exception {
+ NetconfMonitoringService monitor = mock(NetconfMonitoringService.class);
+ NetconfMonitoringOperationService service = new NetconfMonitoringOperationService(monitor);
+
+ assertEquals(1, service.getNetconfOperations().size());
+
+ assertEquals(Optional.absent(), service.getCapabilities().iterator().next().getCapabilitySchema());
+ assertEquals(Optional.absent(), service.getCapabilities().iterator().next().getLocation());
+ assertEquals(Optional.of(MonitoringConstants.MODULE_REVISION), service.getCapabilities().iterator().next().getRevision());
+ assertEquals(Optional.of(MonitoringConstants.MODULE_NAME), service.getCapabilities().iterator().next().getModuleName());
+ assertEquals(Optional.of(MonitoringConstants.NAMESPACE), service.getCapabilities().iterator().next().getModuleNamespace());
+ assertEquals(MonitoringConstants.URI, service.getCapabilities().iterator().next().getCapabilityUri());
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.monitoring.osgi;
+
+import java.util.Hashtable;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.Filter;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyCollection;
+import static org.mockito.Mockito.*;
+
+public class NetconfMonitoringServiceTrackerTest {
+
+ @Mock
+ private ServiceReference reference;
+ @Mock
+ private BundleContext context;
+ @Mock
+ private ServiceRegistration serviceRegistration;
+ @Mock
+ private Filter filter;
+ @Mock
+ private NetconfMonitoringService monitoringService;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ doReturn(serviceRegistration).when(context).registerService(any(Class.class), any(NetconfOperationServiceFactory.class), any(Hashtable.class));
+ doNothing().when(serviceRegistration).unregister();
+ doReturn(filter).when(context).createFilter(anyString());
+ doReturn("").when(reference).toString();
+ doReturn(monitoringService).when(context).getService(any(ServiceReference.class));
+ }
+
+ @Test
+ public void testAddingService() throws Exception {
+ NetconfMonitoringServiceTracker tracker = new NetconfMonitoringServiceTracker(context);
+ tracker.addingService(reference);
+ verify(context, times(1)).registerService(any(Class.class), any(NetconfOperationServiceFactory.class), any(Hashtable.class));
+ tracker.removedService(reference, null);
+ verify(serviceRegistration, times(1)).unregister();
+ }
+}
"<session-id>1</session-id>" +
"<in-bad-rpcs>0</in-bad-rpcs>" +
"<in-rpcs>0</in-rpcs>" +
- "<login-time>loginTime</login-time>" +
+ "<login-time>2010-10-10T12:32:32Z</login-time>" +
"<out-notifications>0</out-notifications>" +
"<out-rpc-errors>0</out-rpc-errors>" +
"<ncme:session-identifier>client</ncme:session-identifier>" +
- "<source-host>address/port</source-host>" +
+ "<source-host>192.168.1.1</source-host>" +
"<transport>ncme:netconf-tcp</transport>" +
"<username>username</username>" +
"</session>"));
final Session1 mockedSession1 = mock(Session1.class);
doReturn("client").when(mockedSession1).getSessionIdentifier();
doReturn(1L).when(mocked).getSessionId();
- doReturn(new DateAndTime("loginTime")).when(mocked).getLoginTime();
- doReturn(new Host(new DomainName("address/port"))).when(mocked).getSourceHost();
+ doReturn(new DateAndTime("2010-10-10T12:32:32Z")).when(mocked).getLoginTime();
+ doReturn(new Host(new DomainName("192.168.1.1"))).when(mocked).getSourceHost();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInBadRpcs();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInRpcs();
doReturn(new ZeroBasedCounter32(0L)).when(mocked).getOutNotifications();
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-netty-util</artifactId>
<packaging>bundle</packaging>
<groupId>org.opendaylight.controller</groupId>
<artifactId>protocol-framework</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<artifactId>maven-bundle-plugin</artifactId>
<configuration>
<instructions>
- <Import-Package>org.apache.sshd.*, ch.ethz.ssh2, com.google.common.base, com.google.common.collect, io.netty.buffer,
+ <Import-Package>org.apache.sshd.*, com.google.common.base, com.google.common.collect, io.netty.buffer,
io.netty.channel, io.netty.channel.socket, io.netty.handler.codec, io.netty.handler.ssl, io.netty.util,
io.netty.util.concurrent, javax.xml.transform, javax.xml.transform.dom, javax.xml.transform.sax,
javax.xml.transform.stream, org.opendaylight.controller.netconf.api,
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
-import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandler;
-import io.netty.channel.ChannelOutboundHandlerAdapter;
-import io.netty.channel.ChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
+
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.apache.sshd.client.future.OpenFuture;
import org.apache.sshd.common.future.CloseFuture;
import org.apache.sshd.common.future.SshFutureListener;
-import org.apache.sshd.common.io.IoInputStream;
-import org.apache.sshd.common.io.IoOutputStream;
-import org.apache.sshd.common.io.IoReadFuture;
-import org.apache.sshd.common.io.IoWriteFuture;
-import org.apache.sshd.common.io.WritePendingException;
-import org.apache.sshd.common.util.Buffer;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import com.google.common.base.Preconditions;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandlerAdapter;
+import io.netty.channel.ChannelPromise;
+
/**
* Netty SSH handler class. Acts as interface between Netty and SSH library.
*/
private final AuthenticationHandler authenticationHandler;
private final SshClient sshClient;
- private SshReadAsyncListener sshReadAsyncListener;
- private SshWriteAsyncHandler sshWriteAsyncHandler;
+ private AsyncSshHandlerReader sshReadAsyncListener;
+ private AsyncSshHandlerWriter sshWriteAsyncHandler;
private ClientChannel channel;
private ClientSession session;
connectPromise.setSuccess();
connectPromise = null;
- sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
+ // TODO we should also read from error stream and at least log from that
+
+ sshReadAsyncListener = new AsyncSshHandlerReader(new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ AsyncSshHandler.this.disconnect(ctx, ctx.newPromise());
+ }
+ }, new AsyncSshHandlerReader.ReadMsgHandler() {
+ @Override
+ public void onMessageRead(final ByteBuf msg) {
+ ctx.fireChannelRead(msg);
+ }
+ }, channel.toString(), channel.getAsyncOut());
+
// if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
if(channel != null) {
- sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+ sshWriteAsyncHandler = new AsyncSshHandlerWriter(channel.getAsyncIn());
ctx.fireChannelActive();
}
}
ctx.fireChannelInactive();
}
- /**
- * Listener over async input stream from SSH session.
- * This listeners schedules reads in a loop until the session is closed or read fails.
- */
- private static class SshReadAsyncListener implements SshFutureListener<IoReadFuture>, AutoCloseable {
- private static final int BUFFER_SIZE = 8192;
-
- private final ChannelOutboundHandler asyncSshHandler;
- private final ChannelHandlerContext ctx;
-
- private IoInputStream asyncOut;
- private Buffer buf;
- private IoReadFuture currentReadFuture;
-
- public SshReadAsyncListener(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
- this.asyncSshHandler = asyncSshHandler;
- this.ctx = ctx;
- this.asyncOut = asyncOut;
- buf = new Buffer(BUFFER_SIZE);
- asyncOut.read(buf).addListener(this);
- }
-
- @Override
- public synchronized void operationComplete(final IoReadFuture future) {
- if(future.getException() != null) {
- if(asyncOut.isClosed() || asyncOut.isClosing()) {
- // Ssh dropped
- logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
- } else {
- logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
- }
- invokeDisconnect();
- return;
- }
-
- if (future.getRead() > 0) {
- ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
-
- // Schedule next read
- buf = new Buffer(BUFFER_SIZE);
- currentReadFuture = asyncOut.read(buf);
- currentReadFuture.addListener(this);
- }
- }
-
- private void invokeDisconnect() {
- try {
- asyncSshHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception e) {
- // This should not happen
- throw new IllegalStateException(e);
- }
- }
-
- @Override
- public synchronized void close() {
- // Remove self as listener on close to prevent reading from closed input
- if(currentReadFuture != null) {
- currentReadFuture.removeListener(this);
- }
-
- asyncOut = null;
- }
- }
-
- private static final class SshWriteAsyncHandler implements AutoCloseable {
- public static final int MAX_PENDING_WRITES = 100;
-
- private final ChannelOutboundHandler channelHandler;
- private IoOutputStream asyncIn;
-
- // Counter that holds the amount of pending write messages
- // Pending write can occur in case remote window is full
- // In such case, we need to wait for the pending write to finish
- private int pendingWriteCounter;
- // Last write future, that can be pending
- private IoWriteFuture lastWriteFuture;
-
- public SshWriteAsyncHandler(final ChannelOutboundHandler channelHandler, final IoOutputStream asyncIn) {
- this.channelHandler = channelHandler;
- this.asyncIn = asyncIn;
- }
-
- int c = 0;
-
- public synchronized void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
- try {
- if(asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
- // If we are closed/closing, set immediate fail
- promise.setFailure(new IllegalStateException("Channel closed"));
- } else {
- lastWriteFuture = asyncIn.write(toBuffer(msg));
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
-
- @Override
- public void operationComplete(final IoWriteFuture future) {
- ((ByteBuf) msg).release();
-
- // Notify success or failure
- if (future.isWritten()) {
- promise.setSuccess();
- } else {
- promise.setFailure(future.getException());
- }
-
- // Reset last pending future
- synchronized (SshWriteAsyncHandler.this) {
- lastWriteFuture = null;
- }
- }
- });
- }
- } catch (final WritePendingException e) {
- // Check limit for pending writes
- pendingWriteCounter++;
- if(pendingWriteCounter > MAX_PENDING_WRITES) {
- promise.setFailure(e);
- handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
- ", remote window is not getting read or is too small"));
- }
-
- // We need to reset buffer read index, since we've already read it when we tried to write it the first time
- ((ByteBuf) msg).resetReaderIndex();
- logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
-
- // In case of pending, re-invoke write after pending is finished
- Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
- lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
- @Override
- public void operationComplete(final IoWriteFuture future) {
- // FIXME possible minor race condition, we cannot guarantee that this callback when pending is finished will be executed first
- // External thread could trigger write on this instance while we are on this line
- // Verify
- if (future.isWritten()) {
- synchronized (SshWriteAsyncHandler.this) {
- // Pending done, decrease counter
- pendingWriteCounter--;
- write(ctx, msg, promise);
- }
- } else {
- // Cannot reschedule pending, fail
- handlePendingFailed(ctx, e);
- }
- }
-
- });
- }
- }
-
- private void handlePendingFailed(final ChannelHandlerContext ctx, final Exception e) {
- logger.warn("Exception while writing to SSH remote on channel {}", ctx.channel(), e);
- try {
- channelHandler.disconnect(ctx, ctx.newPromise());
- } catch (final Exception ex) {
- // This should not happen
- throw new IllegalStateException(ex);
- }
- }
-
- @Override
- public void close() {
- asyncIn = null;
- }
-
- private Buffer toBuffer(final Object msg) {
- // TODO Buffer vs ByteBuf translate, Can we handle that better ?
- Preconditions.checkState(msg instanceof ByteBuf);
- final ByteBuf byteBuf = (ByteBuf) msg;
- final byte[] temp = new byte[byteBuf.readableBytes()];
- byteBuf.readBytes(temp, 0, byteBuf.readableBytes());
- return new Buffer(temp);
- }
-
- }
}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Listener on async input stream from SSH session.
+ * This listeners schedules reads in a loop until the session is closed or read fails.
+ */
+public final class AsyncSshHandlerReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
+
+ private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+
+ private static final int BUFFER_SIZE = 8192;
+
+ private final AutoCloseable connectionClosedCallback;
+ private final ReadMsgHandler readHandler;
+
+ private final String channelId;
+ private IoInputStream asyncOut;
+ private Buffer buf;
+ private IoReadFuture currentReadFuture;
+
+ public AsyncSshHandlerReader(final AutoCloseable connectionClosedCallback, final ReadMsgHandler readHandler, final String channelId, final IoInputStream asyncOut) {
+ this.connectionClosedCallback = connectionClosedCallback;
+ this.readHandler = readHandler;
+ this.channelId = channelId;
+ this.asyncOut = asyncOut;
+ buf = new Buffer(BUFFER_SIZE);
+ asyncOut.read(buf).addListener(this);
+ }
+
+ @Override
+ public synchronized void operationComplete(final IoReadFuture future) {
+ if(future.getException() != null) {
+ if(asyncOut.isClosed() || asyncOut.isClosing()) {
+ // Ssh dropped
+ logger.debug("Ssh session dropped on channel: {}", channelId, future.getException());
+ } else {
+ logger.warn("Exception while reading from SSH remote on channel {}", channelId, future.getException());
+ }
+ invokeDisconnect();
+ return;
+ }
+
+ if (future.getRead() > 0) {
+ final ByteBuf msg = Unpooled.wrappedBuffer(buf.array(), 0, future.getRead());
+ if(logger.isTraceEnabled()) {
+ logger.trace("Reading message on channel: {}, message: {}", channelId, AsyncSshHandlerWriter.byteBufToString(msg));
+ }
+ readHandler.onMessageRead(msg);
+
+ // Schedule next read
+ buf = new Buffer(BUFFER_SIZE);
+ currentReadFuture = asyncOut.read(buf);
+ currentReadFuture.addListener(this);
+ }
+ }
+
+ private void invokeDisconnect() {
+ try {
+ connectionClosedCallback.close();
+ } catch (final Exception e) {
+ // This should not happen
+ throw new IllegalStateException(e);
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ // Remove self as listener on close to prevent reading from closed input
+ if(currentReadFuture != null) {
+ currentReadFuture.removeListener(this);
+ currentReadFuture = null;
+ }
+
+ asyncOut = null;
+ }
+
+ public interface ReadMsgHandler {
+
+ void onMessageRead(ByteBuf msg);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Queue;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.io.WritePendingException;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async Ssh writer. Takes messages(byte arrays) and sends them encrypted to remote server.
+ * Also handles pending writes by caching requests until pending state is over.
+ */
+public final class AsyncSshHandlerWriter implements AutoCloseable {
+
+ private static final Logger logger = LoggerFactory
+ .getLogger(AsyncSshHandlerWriter.class);
+
+ // public static final int MAX_PENDING_WRITES = 1000;
+ // TODO implement Limiting mechanism for pending writes
+ // But there is a possible issue with limiting:
+ // 1. What to do when queue is full ? Immediate Fail for every request ?
+ // 2. At this level we might be dealing with Chunks of messages(not whole messages) and unexpected behavior might occur
+ // when we send/queue 1 chunk and fail the other chunks
+
+ private IoOutputStream asyncIn;
+
+ // Order has to be preserved for queued writes
+ private final Deque<PendingWriteRequest> pending = new LinkedList<>();
+
+ public AsyncSshHandlerWriter(final IoOutputStream asyncIn) {
+ this.asyncIn = asyncIn;
+ }
+
+ public synchronized void write(final ChannelHandlerContext ctx,
+ final Object msg, final ChannelPromise promise) {
+ // TODO check for isClosed, isClosing might be performed by mina SSH internally and is not required here
+ // If we are closed/closing, set immediate fail
+ if (asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
+ promise.setFailure(new IllegalStateException("Channel closed"));
+ } else {
+ final ByteBuf byteBufMsg = (ByteBuf) msg;
+ if (pending.isEmpty() == false) {
+ queueRequest(ctx, byteBufMsg, promise);
+ return;
+ }
+
+ writeWithPendingDetection(ctx, promise, byteBufMsg);
+ }
+ }
+
+ private void writeWithPendingDetection(final ChannelHandlerContext ctx, final ChannelPromise promise, final ByteBuf byteBufMsg) {
+ try {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
+ }
+ asyncIn.write(toBuffer(byteBufMsg)).addListener(new SshFutureListener<IoWriteFuture>() {
+
+ @Override
+ public void operationComplete(final IoWriteFuture future) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
+ ctx.channel(), future.isWritten(), future.getException(), byteBufToString(byteBufMsg));
+ }
+
+ // Notify success or failure
+ if (future.isWritten()) {
+ promise.setSuccess();
+ } else {
+ logger.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
+ promise.setFailure(future.getException());
+ }
+
+ // Not needed anymore, release
+ byteBufMsg.release();
+
+ // Check pending queue and schedule next
+ // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
+ writePendingIfAny();
+ }
+ });
+ } catch (final WritePendingException e) {
+ queueRequest(ctx, byteBufMsg, promise);
+ }
+ }
+
+ private synchronized void writePendingIfAny() {
+ if (pending.peek() == null) {
+ return;
+ }
+
+ // In case of pending, reschedule next message from queue
+ final PendingWriteRequest pendingWrite = pending.poll();
+ final ByteBuf msg = pendingWrite.msg;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
+ }
+
+ writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
+ }
+
+ public static String byteBufToString(final ByteBuf msg) {
+ msg.resetReaderIndex();
+ final String s = msg.toString(Charsets.UTF_8);
+ msg.resetReaderIndex();
+ return s;
+ }
+
+ private void queueRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+// try {
+ logger.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
+ if (logger.isTraceEnabled()) {
+ logger.trace("Queueing request due to pending: {}", byteBufToString(msg));
+ }
+ new PendingWriteRequest(ctx, msg, promise).pend(pending);
+// } catch (final Exception ex) {
+// logger.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
+// msg.release();
+// promise.setFailure(ex);
+// }
+ }
+
+ @Override
+ public synchronized void close() {
+ asyncIn = null;
+ }
+
+ private Buffer toBuffer(final ByteBuf msg) {
+ // TODO Buffer vs ByteBuf translate, Can we handle that better ?
+ final byte[] temp = new byte[msg.readableBytes()];
+ msg.readBytes(temp, 0, msg.readableBytes());
+ return new Buffer(temp);
+ }
+
+ private static final class PendingWriteRequest {
+ private final ChannelHandlerContext ctx;
+ private final ByteBuf msg;
+ private final ChannelPromise promise;
+
+ public PendingWriteRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+ this.ctx = ctx;
+ // Reset reader index, last write (failed) attempt moved index to the end
+ msg.resetReaderIndex();
+ this.msg = msg;
+ this.promise = promise;
+ }
+
+ public void pend(final Queue<PendingWriteRequest> pending) {
+ // Preconditions.checkState(pending.size() < MAX_PENDING_WRITES,
+ // "Too much pending writes(%s) on channel: %s, remote window is not getting read or is too small",
+ // pending.size(), ctx.channel());
+ Preconditions.checkState(pending.offer(this), "Cannot pend another request write (pending count: %s) on channel: %s",
+ pending.size(), ctx.channel());
+ }
+ }
+}
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
import java.io.IOException;
import java.net.SocketAddress;
-import java.nio.channels.WritePendingException;
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.apache.sshd.common.util.Buffer;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Matchers;
import org.mockito.Mock;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
// make first write stop pending
firstWriteListener.operationComplete(ioWriteFuture);
- // intercept third listener, this is regular listener for second write to determine success or failure
- final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
// notify listener for second write that pending has ended
pendingListener.get().operationComplete(ioWriteFuture);
- // Notify third listener (regular listener for second write) that second write succeeded
- afterPendingListener.get().operationComplete(ioWriteFuture);
// verify both write promises successful
verify(firstWritePromise).setSuccess();
verify(secondWritePromise).setSuccess();
}
+ @Ignore("Pending queue is not limited")
@Test
public void testWritePendingMax() throws Exception {
asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
final ChannelPromise secondWritePromise = getMockedPromise();
// now make write throw pending exception
doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < 1001; i++) {
asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
}
- verify(ctx).fireChannelInactive();
+ verify(secondWritePromise, times(1)).setFailure(any(Throwable.class));
}
@Test
private ChannelSubsystem getMockedSubsystemChannel(final IoInputStream asyncOut, final IoOutputStream asyncIn) throws IOException {
final ChannelSubsystem subsystemChannel = mock(ChannelSubsystem.class);
+ doReturn("subsystemChannel").when(subsystemChannel).toString();
+
doNothing().when(subsystemChannel).setStreaming(any(ClientChannel.Streaming.class));
final OpenFuture openFuture = mock(OpenFuture.class);
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-ssh</artifactId>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
</dependency>
- <dependency>
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- </dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-netty-util</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.ssh;
-
-import com.google.common.base.Preconditions;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.ThreadSafe;
-
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.threads.Handshaker;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-
-/**
- * Thread that accepts client connections. Accepted socket is forwarded to {@link org.opendaylight.controller.netconf.ssh.threads.Handshaker},
- * which is executed in {@link #handshakeExecutor}.
- */
-@ThreadSafe
-public final class NetconfSSHServer extends Thread implements AutoCloseable {
-
- private static final Logger logger = LoggerFactory.getLogger(NetconfSSHServer.class);
- private static final AtomicLong sessionIdCounter = new AtomicLong();
-
- private final ServerSocket serverSocket;
- private final LocalAddress localAddress;
- private final EventLoopGroup bossGroup;
- private Optional<AuthProvider> authProvider = Optional.absent();
- private final ExecutorService handshakeExecutor;
- private final char[] pem;
- private volatile boolean up;
-
- private NetconfSSHServer(final int serverPort, final LocalAddress localAddress, final EventLoopGroup bossGroup, final char[] pem) throws IOException {
- super(NetconfSSHServer.class.getSimpleName());
- this.bossGroup = bossGroup;
- this.pem = pem;
- logger.trace("Creating SSH server socket on port {}", serverPort);
- this.serverSocket = new ServerSocket(serverPort);
- if (serverSocket.isBound() == false) {
- throw new IllegalStateException("Socket can't be bound to requested port :" + serverPort);
- }
- logger.trace("Server socket created.");
- this.localAddress = localAddress;
- this.up = true;
- handshakeExecutor = Executors.newFixedThreadPool(10);
- }
-
- public static NetconfSSHServer start(final int serverPort, final LocalAddress localAddress, final EventLoopGroup bossGroup, final char[] pemArray) throws IOException {
- final NetconfSSHServer netconfSSHServer = new NetconfSSHServer(serverPort, localAddress, bossGroup, pemArray);
- netconfSSHServer.start();
- return netconfSSHServer;
- }
-
- public synchronized AuthProvider getAuthProvider() {
- Preconditions.checkState(authProvider.isPresent(), "AuthenticationProvider is not set up, cannot authenticate user");
- return authProvider.get();
- }
-
- public synchronized void setAuthProvider(final AuthProvider authProvider) {
- if(this.authProvider != null) {
- logger.debug("Changing auth provider to {}", authProvider);
- }
- this.authProvider = Optional.fromNullable(authProvider);
- }
-
- @Override
- public void close() throws IOException {
- up = false;
- logger.trace("Closing SSH server socket.");
- serverSocket.close();
- bossGroup.shutdownGracefully();
- logger.trace("SSH server socket closed.");
- }
-
- @VisibleForTesting
- public InetSocketAddress getLocalSocketAddress() {
- return (InetSocketAddress) serverSocket.getLocalSocketAddress();
- }
-
- @Override
- public void run() {
- while (up) {
- Socket acceptedSocket = null;
- try {
- acceptedSocket = serverSocket.accept();
- } catch (final IOException e) {
- if (up == false) {
- logger.trace("Exiting server thread", e);
- } else {
- logger.warn("Exception occurred during socket.accept", e);
- }
- }
- if (acceptedSocket != null) {
- try {
- final Handshaker task = new Handshaker(acceptedSocket, localAddress, sessionIdCounter.incrementAndGet(), getAuthProvider(), bossGroup, pem);
- handshakeExecutor.submit(task);
- } catch (final IOException e) {
- logger.warn("Cannot set PEMHostKey, closing connection", e);
- closeSocket(acceptedSocket);
- } catch (final IllegalStateException e) {
- logger.warn("Cannot accept connection, closing", e);
- closeSocket(acceptedSocket);
- }
- }
- }
- logger.debug("Server thread is exiting");
- }
-
- private void closeSocket(final Socket acceptedSocket) {
- try {
- acceptedSocket.close();
- } catch (final IOException e) {
- logger.warn("Ignoring exception while closing socket", e);
- }
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import com.google.common.base.Preconditions;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.local.LocalChannel;
+import io.netty.util.concurrent.GenericFutureListener;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.server.AsyncCommand;
+import org.apache.sshd.server.Command;
+import org.apache.sshd.server.Environment;
+import org.apache.sshd.server.ExitCallback;
+import org.apache.sshd.server.SessionAware;
+import org.apache.sshd.server.session.ServerSession;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This command handles all netconf related rpc and forwards to delegate server.
+ * Uses netty to make a local connection to delegate server.
+ *
+ * Command is Apache Mina SSH terminology for objects handling ssh data.
+ */
+public class RemoteNetconfCommand implements AsyncCommand, SessionAware {
+
+ private static final Logger logger = LoggerFactory.getLogger(RemoteNetconfCommand.class);
+
+ private final EventLoopGroup clientEventGroup;
+ private final LocalAddress localAddress;
+
+ private IoInputStream in;
+ private IoOutputStream out;
+ private ExitCallback callback;
+ private NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader;
+
+ private Channel clientChannel;
+ private ChannelFuture clientChannelFuture;
+
+ public RemoteNetconfCommand(final EventLoopGroup clientEventGroup, final LocalAddress localAddress) {
+ this.clientEventGroup = clientEventGroup;
+ this.localAddress = localAddress;
+ }
+
+ @Override
+ public void setIoInputStream(final IoInputStream in) {
+ this.in = in;
+ }
+
+ @Override
+ public void setIoOutputStream(final IoOutputStream out) {
+ this.out = out;
+ }
+
+ @Override
+ public void setIoErrorStream(final IoOutputStream err) {
+ // TODO do we want to use error stream in some way ?
+ }
+
+ @Override
+ public void setInputStream(final InputStream in) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+ }
+
+ @Override
+ public void setOutputStream(final OutputStream out) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+
+ }
+
+ @Override
+ public void setErrorStream(final OutputStream err) {
+ throw new UnsupportedOperationException("Synchronous IO is unsupported");
+
+ }
+
+ @Override
+ public void setExitCallback(final ExitCallback callback) {
+ this.callback = callback;
+ }
+
+ @Override
+ public void start(final Environment env) throws IOException {
+ logger.trace("Establishing internal connection to netconf server for client: {}", getClientAddress());
+
+ final Bootstrap clientBootstrap = new Bootstrap();
+ clientBootstrap.group(clientEventGroup).channel(LocalChannel.class);
+
+ clientBootstrap
+ .handler(new ChannelInitializer<LocalChannel>() {
+ @Override
+ public void initChannel(final LocalChannel ch) throws Exception {
+ ch.pipeline().addLast(new SshProxyClientHandler(in, out, netconfHelloMessageAdditionalHeader, callback));
+ }
+ });
+ clientChannelFuture = clientBootstrap.connect(localAddress);
+ clientChannelFuture.addListener(new GenericFutureListener<ChannelFuture>() {
+
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ if(future.isSuccess()) {
+ clientChannel = clientChannelFuture.channel();
+ } else {
+ logger.warn("Unable to establish internal connection to netconf server for client: {}", getClientAddress());
+ Preconditions.checkNotNull(callback, "Exit callback must be set");
+ callback.onExit(1, "Unable to establish internal connection to netconf server for client: "+ getClientAddress());
+ }
+ }
+ });
+ }
+
+ @Override
+ public void destroy() {
+ logger.trace("Releasing internal connection to netconf server for client: {} on channel: {}",
+ getClientAddress(), clientChannel);
+
+ clientChannelFuture.cancel(true);
+ if(clientChannel != null) {
+ clientChannel.close().addListener(new GenericFutureListener<ChannelFuture>() {
+
+ @Override
+ public void operationComplete(final ChannelFuture future) throws Exception {
+ if (future.isSuccess() == false) {
+ logger.warn("Unable to release internal connection to netconf server on channel: {}", clientChannel);
+ }
+ }
+ });
+ }
+ }
+
+ private String getClientAddress() {
+ return netconfHelloMessageAdditionalHeader.getAddress();
+ }
+
+ @Override
+ public void setSession(final ServerSession session) {
+ final SocketAddress remoteAddress = session.getIoSession().getRemoteAddress();
+ String hostName = "";
+ String port = "";
+ if(remoteAddress instanceof InetSocketAddress) {
+ hostName = ((InetSocketAddress) remoteAddress).getAddress().getHostAddress();
+ port = Integer.toString(((InetSocketAddress) remoteAddress).getPort());
+ }
+ netconfHelloMessageAdditionalHeader = new NetconfHelloMessageAdditionalHeader(
+ session.getUsername(), hostName, port, "ssh", "client");
+ }
+
+ public static class NetconfCommandFactory implements NamedFactory<Command> {
+
+ public static final String NETCONF = "netconf";
+
+ private final EventLoopGroup clientBootstrap;
+ private final LocalAddress localAddress;
+
+ public NetconfCommandFactory(final EventLoopGroup clientBootstrap, final LocalAddress localAddress) {
+
+ this.clientBootstrap = clientBootstrap;
+ this.localAddress = localAddress;
+ }
+
+ @Override
+ public String getName() {
+ return NETCONF;
+ }
+
+ @Override
+ public RemoteNetconfCommand create() {
+ return new RemoteNetconfCommand(clientBootstrap, localAddress);
+ }
+ }
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.server.ExitCallback;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandlerReader;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandlerWriter;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Netty handler that reads SSH from remote client and writes to delegate server and reads from delegate server and writes to remote client
+ */
+final class SshProxyClientHandler extends ChannelInboundHandlerAdapter {
+
+ private static final Logger logger = LoggerFactory.getLogger(SshProxyClientHandler.class);
+
+ private final IoInputStream in;
+ private final IoOutputStream out;
+
+ private AsyncSshHandlerReader asyncSshHandlerReader;
+ private AsyncSshHandlerWriter asyncSshHandlerWriter;
+
+ private final NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader;
+ private final ExitCallback callback;
+
+ public SshProxyClientHandler(final IoInputStream in, final IoOutputStream out,
+ final NetconfHelloMessageAdditionalHeader netconfHelloMessageAdditionalHeader,
+ final ExitCallback callback) {
+ this.in = in;
+ this.out = out;
+ this.netconfHelloMessageAdditionalHeader = netconfHelloMessageAdditionalHeader;
+ this.callback = callback;
+ }
+
+ @Override
+ public void channelActive(final ChannelHandlerContext ctx) throws Exception {
+ writeAdditionalHeader(ctx);
+
+ asyncSshHandlerWriter = new AsyncSshHandlerWriter(out);
+ asyncSshHandlerReader = new AsyncSshHandlerReader(new AutoCloseable() {
+ @Override
+ public void close() throws Exception {
+ // Close both sessions (delegate server and remote client)
+ ctx.fireChannelInactive();
+ ctx.disconnect();
+ ctx.close();
+ asyncSshHandlerReader.close();
+ asyncSshHandlerWriter.close();
+ }
+ }, new AsyncSshHandlerReader.ReadMsgHandler() {
+ @Override
+ public void onMessageRead(final ByteBuf msg) {
+ if(logger.isTraceEnabled()) {
+ logger.trace("Forwarding message for client: {} on channel: {}, message: {}",
+ netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel(), AsyncSshHandlerWriter.byteBufToString(msg));
+ }
+ // Just forward to delegate
+ ctx.writeAndFlush(msg);
+ }
+ }, "ssh" + netconfHelloMessageAdditionalHeader.getAddress(), in);
+
+
+ super.channelActive(ctx);
+ }
+
+ private void writeAdditionalHeader(final ChannelHandlerContext ctx) {
+ ctx.writeAndFlush(Unpooled.copiedBuffer(netconfHelloMessageAdditionalHeader.toFormattedString().getBytes()));
+ }
+
+ @Override
+ public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception {
+ asyncSshHandlerWriter.write(ctx, msg, ctx.newPromise());
+ }
+
+ @Override
+ public void channelInactive(final ChannelHandlerContext ctx) throws Exception {
+ logger.debug("Internal connection to netconf server was dropped for client: {} on channel: ",
+ netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel());
+ callback.onExit(1, "Internal connection to netconf server was dropped for client: " +
+ netconfHelloMessageAdditionalHeader.getAddress() + " on channel: " + ctx.channel());
+ super.channelInactive(ctx);
+ }
+
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.local.LocalAddress;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.channels.AsynchronousChannelGroup;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.sshd.SshServer;
+import org.apache.sshd.common.FactoryManager;
+import org.apache.sshd.common.KeyPairProvider;
+import org.apache.sshd.common.NamedFactory;
+import org.apache.sshd.common.RuntimeSshException;
+import org.apache.sshd.common.io.IoAcceptor;
+import org.apache.sshd.common.io.IoConnector;
+import org.apache.sshd.common.io.IoHandler;
+import org.apache.sshd.common.io.IoServiceFactory;
+import org.apache.sshd.common.io.IoServiceFactoryFactory;
+import org.apache.sshd.common.io.nio2.Nio2Acceptor;
+import org.apache.sshd.common.io.nio2.Nio2Connector;
+import org.apache.sshd.common.io.nio2.Nio2ServiceFactoryFactory;
+import org.apache.sshd.common.util.CloseableUtils;
+import org.apache.sshd.server.Command;
+import org.apache.sshd.server.PasswordAuthenticator;
+
+/**
+ * Proxy SSH server that just delegates decrypted content to a delegate server within same VM.
+ * Implemented using Apache Mina SSH lib.
+ */
+public class SshProxyServer implements AutoCloseable {
+
+ private final SshServer sshServer;
+ private final ScheduledExecutorService minaTimerExecutor;
+ private final EventLoopGroup clientGroup;
+ private final IoServiceFactoryFactory nioServiceWithPoolFactoryFactory;
+
+ public SshProxyServer(final ScheduledExecutorService minaTimerExecutor, final EventLoopGroup clientGroup, final ExecutorService nioExecutor) {
+ this.minaTimerExecutor = minaTimerExecutor;
+ this.clientGroup = clientGroup;
+ this.nioServiceWithPoolFactoryFactory = new NioServiceWithPoolFactory.NioServiceWithPoolFactoryFactory(nioExecutor);
+ this.sshServer = SshServer.setUpDefaultServer();
+ }
+
+ public void bind(final InetSocketAddress bindingAddress, final LocalAddress localAddress, final PasswordAuthenticator authenticator, final KeyPairProvider keyPairProvider) throws IOException {
+ sshServer.setHost(bindingAddress.getHostString());
+ sshServer.setPort(bindingAddress.getPort());
+
+ sshServer.setPasswordAuthenticator(authenticator);
+ sshServer.setKeyPairProvider(keyPairProvider);
+
+ sshServer.setIoServiceFactoryFactory(nioServiceWithPoolFactoryFactory);
+ sshServer.setScheduledExecutorService(minaTimerExecutor);
+
+ final RemoteNetconfCommand.NetconfCommandFactory netconfCommandFactory =
+ new RemoteNetconfCommand.NetconfCommandFactory(clientGroup, localAddress);
+ sshServer.setSubsystemFactories(Lists.<NamedFactory<Command>>newArrayList(netconfCommandFactory));
+ sshServer.start();
+ }
+
+ @Override
+ public void close() {
+ try {
+ sshServer.stop(true);
+ } catch (final InterruptedException e) {
+ throw new RuntimeException("Interrupted while stopping sshServer", e);
+ } finally {
+ sshServer.close(true);
+ }
+ }
+
+ /**
+ * Based on Nio2ServiceFactory with one addition: injectable executor
+ */
+ private static final class NioServiceWithPoolFactory extends CloseableUtils.AbstractCloseable implements IoServiceFactory {
+
+ private final FactoryManager manager;
+ private final AsynchronousChannelGroup group;
+
+ public NioServiceWithPoolFactory(final FactoryManager manager, final ExecutorService executor) {
+ this.manager = manager;
+ try {
+ group = AsynchronousChannelGroup.withThreadPool(executor);
+ } catch (final IOException e) {
+ throw new RuntimeSshException(e);
+ }
+ }
+
+ public IoConnector createConnector(final IoHandler handler) {
+ return new Nio2Connector(manager, handler, group);
+ }
+
+ public IoAcceptor createAcceptor(final IoHandler handler) {
+ return new Nio2Acceptor(manager, handler, group);
+ }
+
+ @Override
+ protected void doCloseImmediately() {
+ try {
+ group.shutdownNow();
+ group.awaitTermination(5, TimeUnit.SECONDS);
+ } catch (final Exception e) {
+ log.debug("Exception caught while closing channel group", e);
+ } finally {
+ super.doCloseImmediately();
+ }
+ }
+
+ private static final class NioServiceWithPoolFactoryFactory extends Nio2ServiceFactoryFactory {
+
+ private final ExecutorService nioExecutor;
+
+ private NioServiceWithPoolFactoryFactory(final ExecutorService nioExecutor) {
+ this.nioExecutor = nioExecutor;
+ }
+
+ @Override
+ public IoServiceFactory create(final FactoryManager manager) {
+ return new NioServiceWithPoolFactory(manager, nioExecutor);
+ }
+ }
+ }
+
+}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.netconf.ssh.authentication;
-
-import com.google.common.annotations.VisibleForTesting;
-import java.io.FileInputStream;
-import java.security.NoSuchAlgorithmException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.bouncycastle.openssl.PEMWriter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.StringWriter;
-import java.security.Key;
-import java.security.KeyPair;
-import java.security.KeyPairGenerator;
-import java.security.SecureRandom;
-
-public class PEMGenerator {
- private static final Logger logger = LoggerFactory.getLogger(PEMGenerator.class);
- private static final int KEY_SIZE = 4096;
-
-
- public static String readOrGeneratePK(File privateKeyFile) throws IOException {
- if (privateKeyFile.exists() == false) {
- // generate & save to file
- try {
- return generateTo(privateKeyFile);
- } catch (Exception e) {
- logger.error("Exception occurred while generating PEM string to {}", privateKeyFile, e);
- throw new IllegalStateException("Error generating RSA key from file " + privateKeyFile);
- }
- } else {
- // read from file
- try (FileInputStream fis = new FileInputStream(privateKeyFile)) {
- return IOUtils.toString(fis);
- } catch (final IOException e) {
- logger.error("Error reading RSA key from file {}", privateKeyFile, e);
- throw new IOException("Error reading RSA key from file " + privateKeyFile, e);
- }
- }
- }
-
- /**
- * Generate private key to a file and return its content as string.
- *
- * @param privateFile path where private key should be generated
- * @return String representation of private key
- * @throws IOException
- * @throws NoSuchAlgorithmException
- */
- @VisibleForTesting
- public static String generateTo(File privateFile) throws IOException, NoSuchAlgorithmException {
- logger.info("Generating private key to {}", privateFile.getAbsolutePath());
- String privatePEM = generate();
- FileUtils.write(privateFile, privatePEM);
- return privatePEM;
- }
-
- @VisibleForTesting
- public static String generate() throws NoSuchAlgorithmException, IOException {
- KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA");
- SecureRandom sr = new SecureRandom();
- keyGen.initialize(KEY_SIZE, sr);
- KeyPair keypair = keyGen.generateKeyPair();
- return toString(keypair.getPrivate());
- }
-
- /**
- * Get string representation of a key.
- */
- private static String toString(Key key) throws IOException {
- try (StringWriter writer = new StringWriter()) {
- try (PEMWriter pemWriter = new PEMWriter(writer)) {
- pemWriter.writeObject(key);
- }
- return writer.toString();
- }
- }
-
-}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.ssh.osgi;
+
+import com.google.common.base.Preconditions;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.session.ServerSession;
+import org.opendaylight.controller.netconf.auth.AuthConstants;
+import org.opendaylight.controller.netconf.auth.AuthProvider;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class AuthProviderTracker implements ServiceTrackerCustomizer<AuthProvider, AuthProvider>, PasswordAuthenticator {
+ private static final Logger logger = LoggerFactory.getLogger(AuthProviderTracker.class);
+
+ private final BundleContext bundleContext;
+
+ private Integer maxPreference;
+ private final ServiceTracker<AuthProvider, AuthProvider> listenerTracker;
+ private AuthProvider authProvider;
+
+ public AuthProviderTracker(final BundleContext bundleContext) {
+ this.bundleContext = bundleContext;
+ listenerTracker = new ServiceTracker<>(bundleContext, AuthProvider.class, this);
+ listenerTracker.open();
+ }
+
+ @Override
+ public AuthProvider addingService(final ServiceReference<AuthProvider> reference) {
+ logger.trace("Service {} added", reference);
+ final AuthProvider authService = bundleContext.getService(reference);
+ final Integer newServicePreference = getPreference(reference);
+ if(isBetter(newServicePreference)) {
+ maxPreference = newServicePreference;
+ this.authProvider = authService;
+ }
+ return authService;
+ }
+
+ private Integer getPreference(final ServiceReference<AuthProvider> reference) {
+ final Object preferenceProperty = reference.getProperty(AuthConstants.SERVICE_PREFERENCE_KEY);
+ return preferenceProperty == null ? Integer.MIN_VALUE : Integer.valueOf(preferenceProperty.toString());
+ }
+
+ private boolean isBetter(final Integer newServicePreference) {
+ Preconditions.checkNotNull(newServicePreference);
+ if(maxPreference == null) {
+ return true;
+ }
+
+ return newServicePreference > maxPreference;
+ }
+
+ @Override
+ public void modifiedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
+ final AuthProvider authService = bundleContext.getService(reference);
+ final Integer newServicePreference = getPreference(reference);
+ if(isBetter(newServicePreference)) {
+ logger.trace("Replacing modified service {} in netconf SSH.", reference);
+ this.authProvider = authService;
+ }
+ }
+
+ @Override
+ public void removedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
+ logger.trace("Removing service {} from netconf SSH. " +
+ "SSH won't authenticate users until AuthProvider service will be started.", reference);
+ maxPreference = null;
+ this.authProvider = null;
+ }
+
+ public void stop() {
+ listenerTracker.close();
+ // sshThread should finish normally since sshServer.close stops processing
+ }
+
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return authProvider == null ? false : authProvider.authenticated(username, password);
+ }
+}
import static com.google.common.base.Preconditions.checkState;
-import com.google.common.base.Preconditions;
-import java.io.File;
+import com.google.common.base.Optional;
+import com.google.common.base.Strings;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.nio.NioEventLoopGroup;
import java.io.IOException;
import java.net.InetSocketAddress;
-
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
import org.apache.commons.io.FilenameUtils;
-import org.opendaylight.controller.netconf.auth.AuthConstants;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.apache.sshd.common.util.ThreadUtils;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil.InfixProp;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
-import org.osgi.framework.ServiceReference;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Optional;
-import com.google.common.base.Strings;
-
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-import io.netty.channel.nio.NioEventLoopGroup;
-
-/**
- * Activator for netconf SSH bundle which creates SSH bridge between netconf client and netconf server. Activator
- * starts SSH Server in its own thread. This thread is closed when activator calls stop() method. Server opens socket
- * and listens for client connections. Each client connection creation is handled in separate
- * {@link org.opendaylight.controller.netconf.ssh.threads.Handshaker} thread.
- * This thread creates two additional threads {@link org.opendaylight.controller.netconf.ssh.threads.IOThread}
- * forwarding data from/to client.IOThread closes servers session and server connection when it gets -1 on input stream.
- * {@link org.opendaylight.controller.netconf.ssh.threads.IOThread}'s run method waits for -1 on input stream to finish.
- * All threads are daemons.
- */
public class NetconfSSHActivator implements BundleActivator {
private static final Logger logger = LoggerFactory.getLogger(NetconfSSHActivator.class);
- private static AuthProviderTracker authProviderTracker;
- private NetconfSSHServer server;
+ private static final java.lang.String ALGORITHM = "RSA";
+ private static final int KEY_SIZE = 4096;
+ public static final int POOL_SIZE = 8;
+
+ private ScheduledExecutorService minaTimerExecutor;
+ private NioEventLoopGroup clientGroup;
+ private ExecutorService nioExecutor;
+ private AuthProviderTracker authProviderTracker;
+
+ private SshProxyServer server;
@Override
public void start(final BundleContext bundleContext) throws IOException {
+ minaTimerExecutor = Executors.newScheduledThreadPool(POOL_SIZE, new ThreadFactory() {
+ @Override
+ public Thread newThread(final Runnable r) {
+ return new Thread(r, "netconf-ssh-server-mina-timers");
+ }
+ });
+ clientGroup = new NioEventLoopGroup();
+ nioExecutor = ThreadUtils.newFixedThreadPool("netconf-ssh-server-nio-group", POOL_SIZE);
server = startSSHServer(bundleContext);
}
if(authProviderTracker != null) {
authProviderTracker.stop();
}
+
+ if(nioExecutor!=null) {
+ nioExecutor.shutdownNow();
+ }
+
+ if(clientGroup != null) {
+ clientGroup.shutdownGracefully();
+ }
+
+ if(minaTimerExecutor != null) {
+ minaTimerExecutor.shutdownNow();
+ }
}
- private static NetconfSSHServer startSSHServer(final BundleContext bundleContext) throws IOException {
- final Optional<InetSocketAddress> maybeSshSocketAddress = NetconfConfigUtil.extractNetconfServerAddress(bundleContext,
- InfixProp.ssh);
+ private SshProxyServer startSSHServer(final BundleContext bundleContext) throws IOException {
+ final Optional<InetSocketAddress> maybeSshSocketAddress = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, InfixProp.ssh);
if (maybeSshSocketAddress.isPresent() == false) {
logger.trace("SSH bridge not configured");
final LocalAddress localAddress = NetconfConfigUtil.getNetconfLocalAddress();
- final String path = FilenameUtils.separatorsToSystem(NetconfConfigUtil.getPrivateKeyPath(bundleContext));
- checkState(!Strings.isNullOrEmpty(path), "Path to ssh private key is blank. Reconfigure %s", NetconfConfigUtil.getPrivateKeyKey());
- final String privateKeyPEMString = PEMGenerator.readOrGeneratePK(new File(path));
-
- final EventLoopGroup bossGroup = new NioEventLoopGroup();
- final NetconfSSHServer server = NetconfSSHServer.start(sshSocketAddress.getPort(), localAddress, bossGroup, privateKeyPEMString.toCharArray());
-
- authProviderTracker = new AuthProviderTracker(bundleContext, server);
+ authProviderTracker = new AuthProviderTracker(bundleContext);
- return server;
- }
+ final String path = FilenameUtils.separatorsToSystem(NetconfConfigUtil.getPrivateKeyPath(bundleContext));
+ checkState(!Strings.isNullOrEmpty(path), "Path to ssh private key is blank. Reconfigure %s",
+ NetconfConfigUtil.getPrivateKeyKey());
- private static Thread runNetconfSshThread(final NetconfSSHServer server) {
- final Thread serverThread = new Thread(server, "netconf SSH server thread");
- serverThread.setDaemon(true);
- serverThread.start();
- logger.trace("Netconf SSH bridge up and running.");
- return serverThread;
+ final SshProxyServer sshProxyServer = new SshProxyServer(minaTimerExecutor, clientGroup, nioExecutor);
+ sshProxyServer.bind(sshSocketAddress, localAddress, authProviderTracker, new PEMGeneratorHostKeyProvider(path, ALGORITHM, KEY_SIZE));
+ return sshProxyServer;
}
- private static class AuthProviderTracker implements ServiceTrackerCustomizer<AuthProvider, AuthProvider> {
- private final BundleContext bundleContext;
- private final NetconfSSHServer server;
-
- private Integer maxPreference;
- private Thread sshThread;
- private final ServiceTracker<AuthProvider, AuthProvider> listenerTracker;
-
- public AuthProviderTracker(final BundleContext bundleContext, final NetconfSSHServer server) {
- this.bundleContext = bundleContext;
- this.server = server;
- listenerTracker = new ServiceTracker<>(bundleContext, AuthProvider.class, this);
- listenerTracker.open();
- }
-
- @Override
- public AuthProvider addingService(final ServiceReference<AuthProvider> reference) {
- logger.trace("Service {} added", reference);
- final AuthProvider authService = bundleContext.getService(reference);
- final Integer newServicePreference = getPreference(reference);
- if(isBetter(newServicePreference)) {
- maxPreference = newServicePreference;
- server.setAuthProvider(authService);
- if(sshThread == null) {
- sshThread = runNetconfSshThread(server);
- }
- }
- return authService;
- }
-
- private Integer getPreference(final ServiceReference<AuthProvider> reference) {
- final Object preferenceProperty = reference.getProperty(AuthConstants.SERVICE_PREFERENCE_KEY);
- return preferenceProperty == null ? Integer.MIN_VALUE : Integer.valueOf(preferenceProperty.toString());
- }
-
- private boolean isBetter(final Integer newServicePreference) {
- Preconditions.checkNotNull(newServicePreference);
- if(maxPreference == null) {
- return true;
- }
-
- return newServicePreference > maxPreference;
- }
-
- @Override
- public void modifiedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
- final AuthProvider authService = bundleContext.getService(reference);
- final Integer newServicePreference = getPreference(reference);
- if(isBetter(newServicePreference)) {
- logger.trace("Replacing modified service {} in netconf SSH.", reference);
- server.setAuthProvider(authService);
- }
- }
-
- @Override
- public void removedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
- logger.trace("Removing service {} from netconf SSH. " +
- "SSH won't authenticate users until AuthProvider service will be started.", reference);
- maxPreference = null;
- server.setAuthProvider(null);
- }
-
- public void stop() {
- listenerTracker.close();
- // sshThread should finish normally since sshServer.close stops processing
- }
-
- }
}
+++ /dev/null
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.netconf.ssh.threads;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-import static com.google.common.base.Preconditions.checkState;
-
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.Socket;
-
-import javax.annotation.concurrent.NotThreadSafe;
-import javax.annotation.concurrent.ThreadSafe;
-
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import ch.ethz.ssh2.AuthenticationResult;
-import ch.ethz.ssh2.PtySettings;
-import ch.ethz.ssh2.ServerAuthenticationCallback;
-import ch.ethz.ssh2.ServerConnection;
-import ch.ethz.ssh2.ServerConnectionCallback;
-import ch.ethz.ssh2.ServerSession;
-import ch.ethz.ssh2.ServerSessionCallback;
-import ch.ethz.ssh2.SimpleServerSessionCallback;
-
-import com.google.common.base.Supplier;
-
-import io.netty.bootstrap.Bootstrap;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufProcessor;
-import io.netty.buffer.Unpooled;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelFuture;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.local.LocalAddress;
-import io.netty.channel.local.LocalChannel;
-import io.netty.handler.stream.ChunkedStream;
-
-/**
- * One instance represents per connection, responsible for ssh handshake.
- * Once auth succeeds and correct subsystem is chosen, backend connection with
- * netty netconf server is made. This task finishes right after negotiation is done.
- */
-@ThreadSafe
-public class Handshaker implements Runnable {
- private static final Logger logger = LoggerFactory.getLogger(Handshaker.class);
-
- private final ServerConnection ganymedConnection;
- private final String session;
-
-
- public Handshaker(Socket socket, LocalAddress localAddress, long sessionId, AuthProvider authProvider,
- EventLoopGroup bossGroup, final char[] pem) throws IOException {
-
- this.session = "Session " + sessionId;
-
- String remoteAddressWithPort = socket.getRemoteSocketAddress().toString().replace("/", "");
- logger.debug("{} started with {}", session, remoteAddressWithPort);
- String remoteAddress, remotePort;
- if (remoteAddressWithPort.contains(":")) {
- String[] split = remoteAddressWithPort.split(":");
- remoteAddress = split[0];
- remotePort = split[1];
- } else {
- remoteAddress = remoteAddressWithPort;
- remotePort = "";
- }
- ServerAuthenticationCallbackImpl serverAuthenticationCallback = new ServerAuthenticationCallbackImpl(
- authProvider, session);
-
- ganymedConnection = new ServerConnection(socket);
-
- ServerConnectionCallbackImpl serverConnectionCallback = new ServerConnectionCallbackImpl(
- serverAuthenticationCallback, remoteAddress, remotePort, session,
- getGanymedAutoCloseable(ganymedConnection), localAddress, bossGroup);
-
- // initialize ganymed
- ganymedConnection.setPEMHostKey(pem, null);
- ganymedConnection.setAuthenticationCallback(serverAuthenticationCallback);
- ganymedConnection.setServerConnectionCallback(serverConnectionCallback);
- }
-
-
- private static AutoCloseable getGanymedAutoCloseable(final ServerConnection ganymedConnection) {
- return new AutoCloseable() {
- @Override
- public void close() throws Exception {
- ganymedConnection.close();
- }
- };
- }
-
- @Override
- public void run() {
- // let ganymed process handshake
- logger.trace("{} is started", session);
- try {
- // TODO this should be guarded with a timer to prevent resource exhaustion
- ganymedConnection.connect();
- } catch (IOException e) {
- logger.debug("{} connection error", session, e);
- }
- logger.trace("{} is exiting", session);
- }
-}
-
-/**
- * Netty client handler that forwards bytes from backed server to supplied output stream.
- * When backend server closes the connection, remoteConnection.close() is called to tear
- * down ssh connection.
- */
-class SSHClientHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(SSHClientHandler.class);
- private final AutoCloseable remoteConnection;
- private final BufferedOutputStream remoteOutputStream;
- private final String session;
- private ChannelHandlerContext channelHandlerContext;
-
- public SSHClientHandler(AutoCloseable remoteConnection, OutputStream remoteOutputStream,
- String session) {
- this.remoteConnection = remoteConnection;
- this.remoteOutputStream = new BufferedOutputStream(remoteOutputStream);
- this.session = session;
- }
-
- @Override
- public void channelActive(ChannelHandlerContext ctx) {
- this.channelHandlerContext = ctx;
- logger.debug("{} Client active", session);
- }
-
- @Override
- public void channelRead(ChannelHandlerContext ctx, Object msg) throws IOException {
- ByteBuf bb = (ByteBuf) msg;
- // we can block the server here so that slow client does not cause memory pressure
- try {
- bb.forEachByte(new ByteBufProcessor() {
- @Override
- public boolean process(byte value) throws Exception {
- remoteOutputStream.write(value);
- return true;
- }
- });
- } finally {
- bb.release();
- }
- }
-
- @Override
- public void channelReadComplete(ChannelHandlerContext ctx) throws IOException {
- logger.trace("{} Flushing", session);
- remoteOutputStream.flush();
- }
-
- @Override
- public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- // Close the connection when an exception is raised.
- logger.warn("{} Unexpected exception from downstream", session, cause);
- ctx.close();
- }
-
- @Override
- public void channelInactive(ChannelHandlerContext ctx) throws Exception {
- logger.trace("{} channelInactive() called, closing remote client ctx", session);
- remoteConnection.close();//this should close socket and all threads created for this client
- this.channelHandlerContext = null;
- }
-
- public ChannelHandlerContext getChannelHandlerContext() {
- return checkNotNull(channelHandlerContext, "Channel is not active");
- }
-}
-
-/**
- * Ganymed handler that gets unencrypted input and output streams, connects them to netty.
- * Checks that 'netconf' subsystem is chosen by user.
- * Launches new ClientInputStreamPoolingThread thread once session is established.
- * Writes custom header to netty server, to inform it about IP address and username.
- */
-class ServerConnectionCallbackImpl implements ServerConnectionCallback {
- private static final Logger logger = LoggerFactory.getLogger(ServerConnectionCallbackImpl.class);
- public static final String NETCONF_SUBSYSTEM = "netconf";
-
- private final Supplier<String> currentUserSupplier;
- private final String remoteAddress;
- private final String remotePort;
- private final String session;
- private final AutoCloseable ganymedConnection;
- private final LocalAddress localAddress;
- private final EventLoopGroup bossGroup;
-
- ServerConnectionCallbackImpl(Supplier<String> currentUserSupplier, String remoteAddress, String remotePort, String session,
- AutoCloseable ganymedConnection, LocalAddress localAddress, EventLoopGroup bossGroup) {
- this.currentUserSupplier = currentUserSupplier;
- this.remoteAddress = remoteAddress;
- this.remotePort = remotePort;
- this.session = session;
- this.ganymedConnection = ganymedConnection;
- // initialize netty local connection
- this.localAddress = localAddress;
- this.bossGroup = bossGroup;
- }
-
- private static ChannelFuture initializeNettyConnection(LocalAddress localAddress, EventLoopGroup bossGroup,
- final SSHClientHandler sshClientHandler) {
- Bootstrap clientBootstrap = new Bootstrap();
- clientBootstrap.group(bossGroup).channel(LocalChannel.class);
-
- clientBootstrap.handler(new ChannelInitializer<LocalChannel>() {
- @Override
- public void initChannel(LocalChannel ch) throws Exception {
- ch.pipeline().addLast(sshClientHandler);
- }
- });
- // asynchronously initialize local connection to netconf server
- return clientBootstrap.connect(localAddress);
- }
-
- @Override
- public ServerSessionCallback acceptSession(final ServerSession serverSession) {
- String currentUser = currentUserSupplier.get();
- final String additionalHeader = new NetconfHelloMessageAdditionalHeader(currentUser, remoteAddress,
- remotePort, "ssh", "client").toFormattedString();
-
-
- return new SimpleServerSessionCallback() {
- @Override
- public Runnable requestSubsystem(final ServerSession ss, final String subsystem) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- if (NETCONF_SUBSYSTEM.equals(subsystem)) {
- // connect
- final SSHClientHandler sshClientHandler = new SSHClientHandler(ganymedConnection, ss.getStdin(), session);
- ChannelFuture clientChannelFuture = initializeNettyConnection(localAddress, bossGroup, sshClientHandler);
- // get channel
- final Channel channel = clientChannelFuture.awaitUninterruptibly().channel();
-
- // write additional header before polling thread is started
- // polling thread could process and forward data before additional header is written
- // This will result into unexpected state: hello message without additional header and the next message with additional header
- channel.writeAndFlush(Unpooled.copiedBuffer(additionalHeader.getBytes()));
-
- new ClientInputStreamPoolingThread(session, ss.getStdout(), channel, new AutoCloseable() {
- @Override
- public void close() throws Exception {
- logger.trace("Closing both ganymed and local connection");
- try {
- ganymedConnection.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing ganymed", e);
- }
- try {
- channel.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing channel", e);
- }
- }
- }, sshClientHandler.getChannelHandlerContext()).start();
- } else {
- logger.debug("{} Wrong subsystem requested:'{}', closing ssh session", serverSession, subsystem);
- String reason = "Only netconf subsystem is supported, requested:" + subsystem;
- closeSession(ss, reason);
- }
- }
- };
- }
-
- public void closeSession(ServerSession ss, String reason) {
- logger.trace("{} Closing session - {}", serverSession, reason);
- try {
- ss.getStdin().write(reason.getBytes());
- } catch (IOException e) {
- logger.warn("{} Exception while closing session", serverSession, e);
- }
- ss.close();
- }
-
- @Override
- public Runnable requestPtyReq(final ServerSession ss, final PtySettings pty) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- closeSession(ss, "PTY request not supported");
- }
- };
- }
-
- @Override
- public Runnable requestShell(final ServerSession ss) throws IOException {
- return new Runnable() {
- @Override
- public void run() {
- closeSession(ss, "Shell not supported");
- }
- };
- }
- };
- }
-}
-
-/**
- * Only thread that is required during ssh session, forwards client's input to netty.
- * When user closes connection, onEndOfInput.close() is called to tear down the local channel.
- */
-class ClientInputStreamPoolingThread extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(ClientInputStreamPoolingThread.class);
-
- private final InputStream fromClientIS;
- private final Channel serverChannel;
- private final AutoCloseable onEndOfInput;
- private final ChannelHandlerContext channelHandlerContext;
-
- ClientInputStreamPoolingThread(String session, InputStream fromClientIS, Channel serverChannel, AutoCloseable onEndOfInput,
- ChannelHandlerContext channelHandlerContext) {
- super(ClientInputStreamPoolingThread.class.getSimpleName() + " " + session);
- this.fromClientIS = fromClientIS;
- this.serverChannel = serverChannel;
- this.onEndOfInput = onEndOfInput;
- this.channelHandlerContext = channelHandlerContext;
- }
-
- @Override
- public void run() {
- ChunkedStream chunkedStream = new ChunkedStream(fromClientIS);
- try {
- ByteBuf byteBuf;
- while ((byteBuf = chunkedStream.readChunk(channelHandlerContext/*only needed for ByteBuf alloc */)) != null) {
- serverChannel.writeAndFlush(byteBuf);
- }
- } catch (Exception e) {
- logger.warn("Exception", e);
- } finally {
- logger.trace("End of input");
- // tear down connection
- try {
- onEndOfInput.close();
- } catch (Exception e) {
- logger.warn("Ignoring exception while closing socket", e);
- }
- }
- }
-}
-
-/**
- * Authentication handler for ganymed.
- * Provides current user name after authenticating using supplied AuthProvider.
- */
-@NotThreadSafe
-class ServerAuthenticationCallbackImpl implements ServerAuthenticationCallback, Supplier<String> {
- private static final Logger logger = LoggerFactory.getLogger(ServerAuthenticationCallbackImpl.class);
- private final AuthProvider authProvider;
- private final String session;
- private String currentUser;
-
- ServerAuthenticationCallbackImpl(AuthProvider authProvider, String session) {
- this.authProvider = authProvider;
- this.session = session;
- }
-
- @Override
- public String initAuthentication(ServerConnection sc) {
- logger.trace("{} Established connection", session);
- return "Established connection" + "\r\n";
- }
-
- @Override
- public String[] getRemainingAuthMethods(ServerConnection sc) {
- return new String[]{ServerAuthenticationCallback.METHOD_PASSWORD};
- }
-
- @Override
- public AuthenticationResult authenticateWithNone(ServerConnection sc, String username) {
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public AuthenticationResult authenticateWithPassword(ServerConnection sc, String username, String password) {
- checkState(currentUser == null);
- try {
- if (authProvider.authenticated(username, password)) {
- currentUser = username;
- logger.trace("{} user {} authenticated", session, currentUser);
- return AuthenticationResult.SUCCESS;
- }
- } catch (Exception e) {
- logger.warn("{} Authentication failed", session, e);
- }
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public AuthenticationResult authenticateWithPublicKey(ServerConnection sc, String username, String algorithm,
- byte[] publicKey, byte[] signature) {
- return AuthenticationResult.FAILURE;
- }
-
- @Override
- public String get() {
- return currentUser;
- }
-}
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
import com.google.common.base.Stopwatch;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.HashedWheelTimer;
import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.Test;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.netconf.netty.EchoClientHandler.State;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.netconf.nettyutil.handler.ssh.client.AsyncSshHandler;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SSHTest {
public static final Logger logger = LoggerFactory.getLogger(SSHTest.class);
public static final String AHOJ = "ahoj\n";
- private EventLoopGroup nettyGroup;
- HashedWheelTimer hashedWheelTimer;
- @Before
- public void setUp() throws Exception {
+ private static EventLoopGroup nettyGroup;
+ private static HashedWheelTimer hashedWheelTimer;
+ private static ExecutorService nioExec;
+ private static ScheduledExecutorService minaTimerEx;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
hashedWheelTimer = new HashedWheelTimer();
nettyGroup = new NioEventLoopGroup();
+ nioExec = Executors.newFixedThreadPool(1);
+ minaTimerEx = Executors.newScheduledThreadPool(1);
}
- @After
- public void tearDown() throws Exception {
+ @AfterClass
+ public static void tearDown() throws Exception {
hashedWheelTimer.stop();
- nettyGroup.shutdownGracefully();
+ nettyGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
@Test
public void test() throws Exception {
new Thread(new EchoServer(), "EchoServer").start();
- AuthProvider authProvider = mock(AuthProvider.class);
- doReturn(true).when(authProvider).authenticated(anyString(), anyString());
- doReturn("auth").when(authProvider).toString();
-
- NetconfSSHServer netconfSSHServer = NetconfSSHServer.start(10831, NetconfConfigUtil.getNetconfLocalAddress(),
- new NioEventLoopGroup(), PEMGenerator.generate().toCharArray());
- netconfSSHServer.setAuthProvider(authProvider);
- InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
+ final InetSocketAddress addr = new InetSocketAddress("127.0.0.1", 10831);
+ final SshProxyServer sshProxyServer = new SshProxyServer(minaTimerEx, nettyGroup, nioExec);
+ sshProxyServer.bind(addr, NetconfConfigUtil.getNetconfLocalAddress(),
+ new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ }, new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()));
- final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+ final EchoClientHandler echoClientHandler = connectClient(addr);
Stopwatch stopwatch = new Stopwatch().start();
- while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
- Thread.sleep(100);
+ while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 30) {
+ Thread.sleep(500);
}
assertTrue(echoClientHandler.isConnected());
logger.info("connected, writing to client");
echoClientHandler.write(AHOJ);
+
// check that server sent back the same string
stopwatch = stopwatch.reset().start();
- while (echoClientHandler.read().endsWith(AHOJ) == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
- Thread.sleep(100);
+ while (echoClientHandler.read().endsWith(AHOJ) == false && stopwatch.elapsed(TimeUnit.SECONDS) < 30) {
+ Thread.sleep(500);
}
+
try {
- String read = echoClientHandler.read();
+ final String read = echoClientHandler.read();
assertTrue(read + " should end with " + AHOJ, read.endsWith(AHOJ));
} finally {
logger.info("Closing socket");
- netconfSSHServer.close();
- netconfSSHServer.join();
+ sshProxyServer.close();
}
}
- public EchoClientHandler connectClient(InetSocketAddress address) {
+ public EchoClientHandler connectClient(final InetSocketAddress address) {
final EchoClientHandler echoClientHandler = new EchoClientHandler();
- ChannelInitializer<NioSocketChannel> channelInitializer = new ChannelInitializer<NioSocketChannel>() {
+ final ChannelInitializer<NioSocketChannel> channelInitializer = new ChannelInitializer<NioSocketChannel>() {
@Override
- public void initChannel(NioSocketChannel ch) throws Exception {
+ public void initChannel(final NioSocketChannel ch) throws Exception {
ch.pipeline().addFirst(AsyncSshHandler.createForNetconfSubsystem(new LoginPassword("a", "a")));
ch.pipeline().addLast(echoClientHandler);
}
};
- Bootstrap b = new Bootstrap();
+ final Bootstrap b = new Bootstrap();
b.group(nettyGroup)
.channel(NioSocketChannel.class)
@Test
public void testClientWithoutServer() throws Exception {
- InetSocketAddress address = new InetSocketAddress(12345);
+ final InetSocketAddress address = new InetSocketAddress(12345);
final EchoClientHandler echoClientHandler = connectClient(address);
- Stopwatch stopwatch = new Stopwatch().start();
+ final Stopwatch stopwatch = new Stopwatch().start();
while(echoClientHandler.getState() == State.CONNECTING && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
Thread.sleep(100);
}
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
-import ch.ethz.ssh2.Connection;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
-import java.io.InputStream;
import java.net.InetSocketAddress;
-import junit.framework.Assert;
-import org.apache.commons.io.IOUtils;
+import java.nio.file.Files;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.sshd.ClientSession;
+import org.apache.sshd.SshClient;
+import org.apache.sshd.client.future.AuthFuture;
+import org.apache.sshd.client.future.ConnectFuture;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.netconf.auth.AuthProvider;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
import org.osgi.framework.BundleContext;
import org.osgi.framework.ServiceListener;
private static final String PASSWORD = "netconf";
private static final String HOST = "127.0.0.1";
private static final int PORT = 1830;
- private static final InetSocketAddress tcpAddress = new InetSocketAddress("127.0.0.1", 8383);
private static final Logger logger = LoggerFactory.getLogger(SSHServerTest.class);
- private Thread sshServerThread;
+
+ private SshProxyServer server;
@Mock
private BundleContext mockedContext;
-
+ private final ExecutorService nioExec = Executors.newFixedThreadPool(1);
+ private final EventLoopGroup clientGroup = new NioEventLoopGroup();
+ private final ScheduledExecutorService minaTimerEx = Executors.newScheduledThreadPool(1);
@Before
public void setUp() throws Exception {
doReturn(new ServiceReference[0]).when(mockedContext).getServiceReferences(anyString(), anyString());
logger.info("Creating SSH server");
- String pem;
- try (InputStream is = getClass().getResourceAsStream("/RSA.pk")) {
- pem = IOUtils.toString(is);
- }
-
- EventLoopGroup bossGroup = new NioEventLoopGroup();
- NetconfSSHServer server = NetconfSSHServer.start(PORT, NetconfConfigUtil.getNetconfLocalAddress(),
- bossGroup, pem.toCharArray());
- server.setAuthProvider(new AuthProvider() {
- @Override
- public boolean authenticated(final String username, final String password) {
- return true;
- }
- });
-
- sshServerThread = new Thread(server);
- sshServerThread.setDaemon(true);
- sshServerThread.start();
- logger.info("SSH server on " + PORT);
+ final InetSocketAddress addr = InetSocketAddress.createUnresolved(HOST, PORT);
+ server = new SshProxyServer(minaTimerEx, clientGroup, nioExec);
+ server.bind(addr, NetconfConfigUtil.getNetconfLocalAddress(),
+ new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ return true;
+ }
+ }, new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()));
+ logger.info("SSH server started on " + PORT);
}
@Test
- public void connect() {
+ public void connect() throws Exception {
+ final SshClient sshClient = SshClient.setUpDefaultClient();
+ sshClient.start();
try {
- Connection conn = new Connection(HOST, PORT);
- Assert.assertNotNull(conn);
- logger.info("connecting to SSH server");
- conn.connect();
- logger.info("authenticating ...");
- boolean isAuthenticated = conn.authenticateWithPassword(USER, PASSWORD);
- Assert.assertTrue(isAuthenticated);
- } catch (Exception e) {
- logger.error("Error while starting SSH server.", e);
+ final ConnectFuture connect = sshClient.connect(USER, HOST, PORT);
+ connect.await(30, TimeUnit.SECONDS);
+ org.junit.Assert.assertTrue(connect.isConnected());
+ final ClientSession session = connect.getSession();
+ session.addPasswordIdentity(PASSWORD);
+ final AuthFuture auth = session.auth();
+ auth.await(30, TimeUnit.SECONDS);
+ org.junit.Assert.assertTrue(auth.isSuccess());
+ } finally {
+ sshClient.close(true);
+ server.close();
+ clientGroup.shutdownGracefully().await();
+ minaTimerEx.shutdownNow();
+ nioExec.shutdownNow();
}
-
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-tcp</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-testtool</artifactId>
<artifactId>logback-classic</artifactId>
<scope>compile</scope>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-netty-util</artifactId>
</dependency>
+ <dependency>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>netconf-auth</artifactId>
+ </dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.logback_settings</artifactId>
<groupId>xmlunit</groupId>
<artifactId>xmlunit</artifactId>
</dependency>
-
+ <dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>config-util</artifactId>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-api</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.controller</groupId>
+ <artifactId>ietf-netconf-monitoring</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-client</artifactId>
</dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-yang-types-20130715</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.opendaylight.yangtools.model</groupId>
+ <artifactId>ietf-inet-types</artifactId>
+ </dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-impl</artifactId>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-monitoring</artifactId>
</dependency>
-
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netconf-ssh</artifactId>
</dependency>
-
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>netty-config-api</artifactId>
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import java.util.Collections;
+import java.util.List;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+
+public class DataList {
+
+ private List<XmlElement> configList = Collections.emptyList();
+
+ public List<XmlElement> getConfigList() {
+ return configList;
+ }
+
+ public void setConfigList(List<XmlElement> configList) {
+ this.configList = configList;
+ }
+
+ public void resetConfigList() {
+ configList = Collections.emptyList();
+ }
+
+}
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
+import ch.qos.logback.classic.Level;
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteStreams;
+import com.google.common.io.CharStreams;
import com.google.common.io.Files;
import java.io.File;
+import java.io.FileFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.util.Collections;
+import java.util.Comparator;
import java.util.List;
-
import java.util.concurrent.TimeUnit;
import net.sourceforge.argparse4j.ArgumentParsers;
import net.sourceforge.argparse4j.annotation.Arg;
import net.sourceforge.argparse4j.inf.ArgumentParser;
import net.sourceforge.argparse4j.inf.ArgumentParserException;
-
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.io.CharStreams;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
public final class Main {
- // TODO add logback config
-
- // TODO make exi configurable
-
private static final Logger LOG = LoggerFactory.getLogger(Main.class);
static class Params {
@Arg(dest = "generate-config-address")
public String generateConfigsAddress;
- @Arg(dest = "generate-configs-dir")
- public File generateConfigsDir;
+ @Arg(dest = "distro-folder")
+ public File distroFolder;
@Arg(dest = "generate-configs-batch-size")
public int generateConfigBatchSize;
@Arg(dest = "exi")
public boolean exi;
+ @Arg(dest = "debug")
+ public boolean debug;
+
static ArgumentParser getParser() {
final ArgumentParser parser = ArgumentParsers.newArgumentParser("netconf testool");
- parser.addArgument("--devices-count")
+
+ parser.description("Netconf device simulator. Detailed info can be found at https://wiki.opendaylight.org/view/OpenDaylight_Controller:Netconf:Testtool#Building_testtool");
+
+ parser.addArgument("--device-count")
.type(Integer.class)
.setDefault(1)
.type(Integer.class)
parser.addArgument("--schemas-dir")
.type(File.class)
- .required(true)
- .help("Directory containing yang schemas to describe simulated devices")
+ .help("Directory containing yang schemas to describe simulated devices. Some schemas e.g. netconf monitoring and inet types are included by default")
.dest("schemas-dir");
parser.addArgument("--starting-port")
parser.addArgument("--generate-config-connection-timeout")
.type(Integer.class)
- .setDefault((int)TimeUnit.MINUTES.toMillis(5))
+ .setDefault((int)TimeUnit.MINUTES.toMillis(30))
.help("Timeout to be generated in initial config files")
.dest("generate-config-connection-timeout");
parser.addArgument("--generate-configs-batch-size")
.type(Integer.class)
- .setDefault(100)
+ .setDefault(4000)
.help("Number of connector configs per generated file")
.dest("generate-configs-batch-size");
- parser.addArgument("--generate-configs-dir")
+ parser.addArgument("--distribution-folder")
.type(File.class)
- .help("Directory where initial config files for ODL distribution should be generated")
- .dest("generate-configs-dir");
+ .help("Directory where the karaf distribution for controller is located")
+ .dest("distro-folder");
parser.addArgument("--ssh")
.type(Boolean.class)
parser.addArgument("--exi")
.type(Boolean.class)
- .setDefault(false)
+ .setDefault(true)
.help("Whether to use exi to transport xml content")
.dest("exi");
+ parser.addArgument("--debug")
+ .type(Boolean.class)
+ .setDefault(false)
+ .help("Whether to use debug log level instead of INFO")
+ .dest("debug");
+
return parser;
}
checkArgument(deviceCount > 0, "Device count has to be > 0");
checkArgument(startingPort > 1024, "Starting port has to be > 1024");
- checkArgument(schemasDir.exists(), "Schemas dir has to exist");
- checkArgument(schemasDir.isDirectory(), "Schemas dir has to be a directory");
- checkArgument(schemasDir.canRead(), "Schemas dir has to be readable");
+ if(schemasDir != null) {
+ checkArgument(schemasDir.exists(), "Schemas dir has to exist");
+ checkArgument(schemasDir.isDirectory(), "Schemas dir has to be a directory");
+ checkArgument(schemasDir.canRead(), "Schemas dir has to be readable");
+ }
}
}
public static void main(final String[] args) {
- ch.ethz.ssh2.log.Logger.enabled = true;
-
final Params params = parseArgs(args, Params.getParser());
params.validate();
+ final ch.qos.logback.classic.Logger root = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME);
+ root.setLevel(params.debug ? Level.DEBUG : Level.INFO);
+
final NetconfDeviceSimulator netconfDeviceSimulator = new NetconfDeviceSimulator();
try {
final List<Integer> openDevices = netconfDeviceSimulator.start(params);
- if(params.generateConfigsDir != null) {
- new ConfigGenerator(params.generateConfigsDir, openDevices).generate(params.ssh, params.generateConfigBatchSize, params.generateConfigsTimeout, params.generateConfigsAddress);
+ if(params.distroFolder != null) {
+ final ConfigGenerator configGenerator = new ConfigGenerator(params.distroFolder, openDevices);
+ final List<File> generated = configGenerator.generate(params.ssh, params.generateConfigBatchSize, params.generateConfigsTimeout, params.generateConfigsAddress);
+ configGenerator.updateFeatureFile(generated);
+ configGenerator.changeLoadOrder();
}
} catch (final Exception e) {
LOG.error("Unhandled exception", e);
}
}
-
private static Params parseArgs(final String[] args, final ArgumentParser parser) {
final Params opt = new Params();
try {
public static final String NETCONF_USE_SSH = "false";
public static final String SIM_DEVICE_SUFFIX = "-sim-device";
- private final File directory;
+ private static final String SIM_DEVICE_CFG_PREFIX = "simulated-devices_";
+ private static final String ETC_KARAF_PATH = "etc/";
+ private static final String ETC_OPENDAYLIGHT_KARAF_PATH = ETC_KARAF_PATH + "opendaylight/karaf/";
+
+ public static final String NETCONF_CONNECTOR_ALL_FEATURE = "odl-netconf-connector-all";
+ private static final String ORG_OPS4J_PAX_URL_MVN_CFG = "org.ops4j.pax.url.mvn.cfg";
+
+ private final File configDir;
private final List<Integer> openDevices;
+ private final File ncFeatureFile;
+ private final File etcDir;
+ private final File loadOrderCfgFile;
public ConfigGenerator(final File directory, final List<Integer> openDevices) {
- this.directory = directory;
+ this.configDir = new File(directory, ETC_OPENDAYLIGHT_KARAF_PATH);
+ this.etcDir = new File(directory, ETC_KARAF_PATH);
+ this.loadOrderCfgFile = new File(etcDir, ORG_OPS4J_PAX_URL_MVN_CFG);
+ this.ncFeatureFile = getFeatureFile(directory, "features-netconf-connector");
this.openDevices = openDevices;
}
- public void generate(final boolean useSsh, final int batchSize, final int generateConfigsTimeout, final String address) {
- if(directory.exists() == false) {
- checkState(directory.mkdirs(), "Unable to create folder %s" + directory);
+ public List<File> generate(final boolean useSsh, final int batchSize, final int generateConfigsTimeout, final String address) {
+ if(configDir.exists() == false) {
+ Preconditions.checkState(configDir.mkdirs(), "Unable to create directory " + configDir);
+ }
+
+ for (final File file : configDir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(final File pathname) {
+ return !pathname.isDirectory() && pathname.getName().startsWith(SIM_DEVICE_CFG_PREFIX);
+ }
+ })) {
+ Preconditions.checkState(file.delete(), "Unable to clean previous generated file %s", file);
}
try(InputStream stream = Main.class.getResourceAsStream(NETCONF_CONNECTOR_XML)) {
checkNotNull(stream, "Cannot load %s", NETCONF_CONNECTOR_XML);
String configBlueprint = CharStreams.toString(new InputStreamReader(stream, Charsets.UTF_8));
- // TODO make address configurable
checkState(configBlueprint.contains(NETCONF_CONNECTOR_NAME));
checkState(configBlueprint.contains(NETCONF_CONNECTOR_PORT));
checkState(configBlueprint.contains(NETCONF_USE_SSH));
StringBuilder b = new StringBuilder();
b.append(before);
+ final List<File> generatedConfigs = Lists.newArrayList();
+
for (final Integer openDevice : openDevices) {
if(batchStart == null) {
batchStart = openDevice;
connectorCount++;
if(connectorCount == batchSize) {
b.append(after);
- Files.write(b.toString(), new File(directory, String.format("simulated-devices_%d-%d.xml", batchStart, openDevice)), Charsets.UTF_8);
+ final File to = new File(configDir, String.format(SIM_DEVICE_CFG_PREFIX + "%d-%d.xml", batchStart, openDevice));
+ generatedConfigs.add(to);
+ Files.write(b.toString(), to, Charsets.UTF_8);
connectorCount = 0;
b = new StringBuilder();
b.append(before);
// Write remaining
if(connectorCount != 0) {
b.append(after);
- Files.write(b.toString(), new File(directory, String.format("simulated-devices_%d-%d.xml", batchStart, openDevices.get(openDevices.size() - 1))), Charsets.UTF_8);
+ final File to = new File(configDir, String.format(SIM_DEVICE_CFG_PREFIX + "%d-%d.xml", batchStart, openDevices.get(openDevices.size() - 1)));
+ generatedConfigs.add(to);
+ Files.write(b.toString(), to, Charsets.UTF_8);
}
- LOG.info("Config files generated in {}", directory);
+ LOG.info("Config files generated in {}", configDir);
+ return generatedConfigs;
} catch (final IOException e) {
throw new RuntimeException("Unable to generate config files", e);
}
}
+
+
+ public void updateFeatureFile(final List<File> generated) {
+ // TODO karaf core contains jaxb for feature files, use that for modification
+ try {
+ final Document document = XmlUtil.readXmlToDocument(Files.toString(ncFeatureFile, Charsets.UTF_8));
+ final NodeList childNodes = document.getDocumentElement().getChildNodes();
+
+ for (int i = 0; i < childNodes.getLength(); i++) {
+ final Node item = childNodes.item(i);
+ if(item instanceof Element == false) {
+ continue;
+ }
+ if(item.getLocalName().equals("feature") ==false) {
+ continue;
+ }
+
+ if(NETCONF_CONNECTOR_ALL_FEATURE.equals(((Element) item).getAttribute("name"))) {
+ final Element ncAllFeatureDefinition = (Element) item;
+ // Clean previous generated files
+ for (final XmlElement configfile : XmlElement.fromDomElement(ncAllFeatureDefinition).getChildElements("configfile")) {
+ ncAllFeatureDefinition.removeChild(configfile.getDomElement());
+ }
+ for (final File file : generated) {
+ final Element configfile = document.createElement("configfile");
+ configfile.setTextContent("file:" + ETC_OPENDAYLIGHT_KARAF_PATH + file.getName());
+ configfile.setAttribute("finalname", ETC_OPENDAYLIGHT_KARAF_PATH + file.getName());
+ ncAllFeatureDefinition.appendChild(configfile);
+ }
+ }
+ }
+
+ Files.write(XmlUtil.toString(document), ncFeatureFile, Charsets.UTF_8);
+ LOG.info("Feature file {} updated", ncFeatureFile);
+ } catch (final IOException e) {
+ throw new RuntimeException("Unable to load features file as a resource");
+ } catch (final SAXException e) {
+ throw new RuntimeException("Unable to parse features file");
+ }
+ }
+
+
+ private static File getFeatureFile(final File distroFolder, final String featureName) {
+ checkExistingDir(distroFolder, String.format("Folder %s does not exist", distroFolder));
+
+ final File systemDir = checkExistingDir(new File(distroFolder, "system"), String.format("Folder %s does not contain a karaf distro, folder system is missing", distroFolder));
+ final File netconfConnectorFeaturesParentDir = checkExistingDir(new File(systemDir, "org/opendaylight/controller/" + featureName), String.format("Karaf distro in %s does not contain netconf-connector features", distroFolder));
+
+ // Find newest version for features
+ final File newestVersionDir = Collections.max(
+ Lists.newArrayList(netconfConnectorFeaturesParentDir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(final File pathname) {
+ return pathname.isDirectory();
+ }
+ })), new Comparator<File>() {
+ @Override
+ public int compare(final File o1, final File o2) {
+ return o1.getName().compareTo(o2.getName());
+ }
+ });
+
+ return newestVersionDir.listFiles(new FileFilter() {
+ @Override
+ public boolean accept(final File pathname) {
+ return pathname.getName().contains(featureName);
+ }
+ })[0];
+ }
+
+ private static File checkExistingDir(final File folder, final String msg) {
+ Preconditions.checkArgument(folder.exists(), msg);
+ Preconditions.checkArgument(folder.isDirectory(), msg);
+ return folder;
+ }
+
+ public void changeLoadOrder() {
+ try {
+ Files.write(ByteStreams.toByteArray(getClass().getResourceAsStream("/" +ORG_OPS4J_PAX_URL_MVN_CFG)), loadOrderCfgFile);
+ LOG.info("Load order changed to prefer local bundles/features by rewriting file {}", loadOrderCfgFile);
+ } catch (IOException e) {
+ throw new RuntimeException("Unable to rewrite features file " + loadOrderCfgFile, e);
+ }
+ }
}
}
import com.google.common.base.Charsets;
import com.google.common.base.Function;
+import com.google.common.base.Objects;
import com.google.common.base.Optional;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.io.CharStreams;
import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.local.LocalAddress;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.HashedWheelTimer;
import java.io.Closeable;
-import java.io.File;
import java.io.IOException;
+import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.net.Inet4Address;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.AbstractMap;
import java.util.Date;
import java.util.HashMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
import org.antlr.v4.runtime.ParserRuleContext;
import org.antlr.v4.runtime.tree.ParseTreeWalker;
+import org.apache.sshd.common.util.ThreadUtils;
+import org.apache.sshd.server.PasswordAuthenticator;
+import org.apache.sshd.server.keyprovider.PEMGeneratorHostKeyProvider;
+import org.apache.sshd.server.session.ServerSession;
import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceSnapshot;
import org.opendaylight.controller.netconf.monitoring.osgi.NetconfMonitoringOperationService;
-import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
-import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
+import org.opendaylight.controller.netconf.ssh.SshProxyServer;
import org.opendaylight.yangtools.yang.model.api.SchemaContext;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceListener;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
import org.opendaylight.yangtools.yang.model.repo.util.FilesystemSchemaSourceCache;
import org.opendaylight.yangtools.yang.parser.builder.impl.BuilderUtils;
import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleBuilder;
private static final Logger LOG = LoggerFactory.getLogger(NetconfDeviceSimulator.class);
- public static final int CONNECTION_TIMEOUT_MILLIS = 20000;
-
private final NioEventLoopGroup nettyThreadgroup;
private final HashedWheelTimer hashedWheelTimer;
private final List<Channel> devicesChannels = Lists.newArrayList();
+ private final List<SshProxyServer> sshWrappers = Lists.newArrayList();
+ private final ScheduledExecutorService minaTimerExecutor;
+ private final ExecutorService nioExecutor;
public NetconfDeviceSimulator() {
- this(new NioEventLoopGroup(), new HashedWheelTimer());
+ // TODO make pool size configurable
+ this(new NioEventLoopGroup(), new HashedWheelTimer(),
+ Executors.newScheduledThreadPool(8, new ThreadFactoryBuilder().setNameFormat("netconf-ssh-server-mina-timers-%d").build()),
+ ThreadUtils.newFixedThreadPool("netconf-ssh-server-nio-group", 8));
}
- public NetconfDeviceSimulator(final NioEventLoopGroup eventExecutors, final HashedWheelTimer hashedWheelTimer) {
+ private NetconfDeviceSimulator(final NioEventLoopGroup eventExecutors, final HashedWheelTimer hashedWheelTimer, final ScheduledExecutorService minaTimerExecutor, final ExecutorService nioExecutor) {
this.nettyThreadgroup = eventExecutors;
this.hashedWheelTimer = hashedWheelTimer;
+ this.minaTimerExecutor = minaTimerExecutor;
+ this.nioExecutor = nioExecutor;
}
private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
}
private Map<ModuleBuilder, String> toModuleBuilders(final Map<SourceIdentifier, Map.Entry<ASTSchemaSource, YangTextSchemaSource>> sources) {
- final Map<SourceIdentifier, ParserRuleContext> asts = Maps.transformValues(sources, new Function<Map.Entry<ASTSchemaSource, YangTextSchemaSource>, ParserRuleContext>() {
- @Override
- public ParserRuleContext apply(final Map.Entry<ASTSchemaSource, YangTextSchemaSource> input) {
- return input.getKey().getAST();
- }
- });
- final Map<String, TreeMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
- asts.values(), Optional.<SchemaContext>absent());
+ final Map<SourceIdentifier, ParserRuleContext> asts = Maps.transformValues(sources, new Function<Map.Entry<ASTSchemaSource, YangTextSchemaSource>, ParserRuleContext>() {
+ @Override
+ public ParserRuleContext apply(final Map.Entry<ASTSchemaSource, YangTextSchemaSource> input) {
+ return input.getKey().getAST();
+ }
+ });
+ final Map<String, TreeMap<Date, URI>> namespaceContext = BuilderUtils.createYangNamespaceContext(
+ asts.values(), Optional.<SchemaContext>absent());
- final ParseTreeWalker walker = new ParseTreeWalker();
- final Map<ModuleBuilder, String> sourceToBuilder = new HashMap<>();
+ final ParseTreeWalker walker = new ParseTreeWalker();
+ final Map<ModuleBuilder, String> sourceToBuilder = new HashMap<>();
- for (final Map.Entry<SourceIdentifier, ParserRuleContext> entry : asts.entrySet()) {
- final ModuleBuilder moduleBuilder = YangParserListenerImpl.create(namespaceContext, entry.getKey().getName(),
- walker, entry.getValue()).getModuleBuilder();
+ for (final Map.Entry<SourceIdentifier, ParserRuleContext> entry : asts.entrySet()) {
+ final ModuleBuilder moduleBuilder = YangParserListenerImpl.create(namespaceContext, entry.getKey().getName(),
+ walker, entry.getValue()).getModuleBuilder();
- try(InputStreamReader stream = new InputStreamReader(sources.get(entry.getKey()).getValue().openStream(), Charsets.UTF_8)) {
- sourceToBuilder.put(moduleBuilder, CharStreams.toString(stream));
- } catch (final IOException e) {
- throw new RuntimeException(e);
- }
+ try(InputStreamReader stream = new InputStreamReader(sources.get(entry.getKey()).getValue().openStream(), Charsets.UTF_8)) {
+ sourceToBuilder.put(moduleBuilder, CharStreams.toString(stream));
+ } catch (final IOException e) {
+ throw new RuntimeException(e);
}
-
- return sourceToBuilder;
}
+ return sourceToBuilder;
+ }
+
public List<Integer> start(final Main.Params params) {
+ LOG.info("Starting {}, {} simulated devices starting on port {}", params.deviceCount, params.ssh ? "SSH" : "TCP", params.startingPort);
+
final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
int currentPort = params.startingPort;
final List<Integer> openDevices = Lists.newArrayList();
+
+ // Generate key to temp folder
+ final PEMGeneratorHostKeyProvider keyPairProvider = getPemGeneratorHostKeyProvider();
+
for (int i = 0; i < params.deviceCount; i++) {
final InetSocketAddress address = getAddress(currentPort);
final ChannelFuture server;
if(params.ssh) {
+ final InetSocketAddress bindingAddress = InetSocketAddress.createUnresolved("0.0.0.0", currentPort);
final LocalAddress tcpLocalAddress = new LocalAddress(address.toString());
server = dispatcher.createLocalServer(tcpLocalAddress);
try {
- final NetconfSSHServer sshServer = NetconfSSHServer.start(currentPort, tcpLocalAddress, nettyThreadgroup, getPemArray());
- sshServer.setAuthProvider(new AcceptingAuthProvider());
+ final SshProxyServer sshServer = new SshProxyServer(minaTimerExecutor, nettyThreadgroup, nioExecutor);
+ sshServer.bind(bindingAddress, tcpLocalAddress,
+ new PasswordAuthenticator() {
+ @Override
+ public boolean authenticate(final String username, final String password, final ServerSession session) {
+ // All connections are accepted
+ return true;
+ }
+ }, keyPairProvider);
+
+ sshWrappers.add(sshServer);
} catch (final Exception e) {
LOG.warn("Cannot start simulated device on {}, skipping", address, e);
// Close local server and continue
devicesChannels.add(server.channel());
openDevices.add(currentPort - 1);
-
}
if(openDevices.size() == params.deviceCount) {
- LOG.info("All simulated devices started successfully from port {} to {}", params.startingPort, currentPort);
+ LOG.info("All simulated devices started successfully from port {} to {}", params.startingPort, currentPort - 1);
} else {
LOG.warn("Not all simulated devices started successfully. Started devices ar on ports {}", openDevices);
}
return openDevices;
}
- private char[] getPemArray() {
+ private PEMGeneratorHostKeyProvider getPemGeneratorHostKeyProvider() {
try {
- return PEMGenerator.readOrGeneratePK(new File("PK")).toCharArray();
+ final Path tempFile = Files.createTempFile("tempKeyNetconfTest", "suffix");
+ return new PEMGeneratorHostKeyProvider(tempFile.toAbsolutePath().toString());
} catch (final IOException e) {
+ LOG.error("Unable to generate PEM key", e);
throw new RuntimeException(e);
}
}
public void schemaSourceUnregistered(final PotentialSchemaSource<?> potentialSchemaSource) {}
});
- final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(consumer, YangTextSchemaSource.class, params.schemasDir);
- consumer.registerSchemaSourceListener(cache);
+ if(params.schemasDir != null) {
+ final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(consumer, YangTextSchemaSource.class, params.schemasDir);
+ consumer.registerSchemaSourceListener(cache);
+ }
+
+ addDefaultSchemas(consumer);
final Map<SourceIdentifier, Map.Entry<ASTSchemaSource, YangTextSchemaSource>> asts = Maps.newHashMap();
for (final SourceIdentifier loadedSource : loadedSources) {
return toModuleBuilders(asts);
}
+ private void addDefaultSchemas(final SharedSchemaRepository consumer) {
+ SourceIdentifier sId = new SourceIdentifier("ietf-netconf-monitoring", "2010-10-04");
+ registerSource(consumer, "/META-INF/yang/ietf-netconf-monitoring.yang", sId);
+
+ sId = new SourceIdentifier("ietf-yang-types", "2013-07-15");
+ registerSource(consumer, "/META-INF/yang/ietf-yang-types@2013-07-15.yang", sId);
+
+ sId = new SourceIdentifier("ietf-inet-types", "2010-09-24");
+ registerSource(consumer, "/META-INF/yang/ietf-inet-types.yang", sId);
+ }
+
+ private void registerSource(final SharedSchemaRepository consumer, final String resource, final SourceIdentifier sourceId) {
+ consumer.registerSchemaSource(new SchemaSourceProvider<SchemaSourceRepresentation>() {
+ @Override
+ public CheckedFuture<? extends SchemaSourceRepresentation, SchemaSourceException> getSource(final SourceIdentifier sourceIdentifier) {
+ return Futures.immediateCheckedFuture(new YangTextSchemaSource(sourceId) {
+ @Override
+ protected Objects.ToStringHelper addToStringAttributes(final Objects.ToStringHelper toStringHelper) {
+ return toStringHelper;
+ }
+
+ @Override
+ public InputStream openStream() throws IOException {
+ return getClass().getResourceAsStream(resource);
+ }
+ });
+ }
+ }, PotentialSchemaSource.create(sourceId, YangTextSchemaSource.class, PotentialSchemaSource.Costs.IMMEDIATE.getValue()));
+ }
+
private static InetSocketAddress getAddress(final int port) {
try {
// TODO make address configurable
@Override
public void close() {
+ for (final SshProxyServer sshWrapper : sshWrappers) {
+ sshWrapper.close();
+ }
for (final Channel deviceCh : devicesChannels) {
deviceCh.close();
}
nettyThreadgroup.shutdownGracefully();
+ minaTimerExecutor.shutdownNow();
+ nioExecutor.shutdownNow();
// close Everything
}
static class SimulatedOperationService implements NetconfOperationService {
private final Set<Capability> capabilities;
- private static SimulatedGet sGet;
+ private final long currentSessionId;
public SimulatedOperationService(final Set<Capability> capabilities, final long currentSessionId) {
this.capabilities = capabilities;
- sGet = new SimulatedGet(String.valueOf(currentSessionId));
+ this.currentSessionId = currentSessionId;
}
@Override
@Override
public Set<NetconfOperation> getNetconfOperations() {
- return Sets.<NetconfOperation>newHashSet(sGet);
+ final DataList storage = new DataList();
+ final SimulatedGet sGet = new SimulatedGet(String.valueOf(currentSessionId), storage);
+ final SimulatedEditConfig sEditConfig = new SimulatedEditConfig(String.valueOf(currentSessionId), storage);
+ final SimulatedGetConfig sGetConfig = new SimulatedGetConfig(String.valueOf(currentSessionId), storage);
+ final SimulatedCommit sCommit = new SimulatedCommit(String.valueOf(currentSessionId));
+ return Sets.<NetconfOperation>newHashSet(sGet, sGetConfig, sEditConfig, sCommit);
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+class SimulatedCommit extends AbstractConfigNetconfOperation {
+
+ SimulatedCommit(final String netconfSessionIdForReporting) {
+ super(null, netconfSessionIdForReporting);
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return XmlNetconfConstants.COMMIT;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.editconfig.EditConfigXmlParser;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Attr;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+class SimulatedEditConfig extends AbstractConfigNetconfOperation {
+ private static final String DELETE_EDIT_CONFIG = "delete";
+ private static final String OPERATION = "operation";
+ private static final String REMOVE_EDIT_CONFIG = "remove";
+ private final DataList storage;
+
+ SimulatedEditConfig(final String netconfSessionIdForReporting, final DataList storage) {
+ super(null, netconfSessionIdForReporting);
+ this.storage = storage;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final XmlElement configElementData = operationElement.getOnlyChildElement(XmlNetconfConstants.CONFIG_KEY);
+
+ containsDelete(configElementData);
+ if(containsDelete(configElementData)){
+ storage.resetConfigList();
+ } else {
+ storage.setConfigList(configElementData.getChildElements());
+ }
+
+ return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.<String>absent());
+ }
+
+ @Override
+ protected String getOperationName() {
+ return EditConfigXmlParser.EDIT_CONFIG;
+ }
+
+ private boolean containsDelete(final XmlElement element) {
+ for (final Attr o : element.getAttributes().values()) {
+ if (o.getLocalName().equals(OPERATION)
+ && (o.getValue().equals(DELETE_EDIT_CONFIG) || o.getValue()
+ .equals(REMOVE_EDIT_CONFIG))) {
+ return true;
+ }
+
+ }
+
+ for (final XmlElement xmlElement : element.getChildElements()) {
+ if (containsDelete(xmlElement)) {
+ return true;
+ }
+
+ }
+
+ return false;
+ }
+}
class SimulatedGet extends AbstractConfigNetconfOperation {
- SimulatedGet(final String netconfSessionIdForReporting) {
+ private final DataList storage;
+
+ SimulatedGet(final String netconfSessionIdForReporting, final DataList storage) {
super(null, netconfSessionIdForReporting);
+ this.storage = storage;
}
@Override
protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
- return XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+ final Element element = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+
+ for(final XmlElement e : storage.getConfigList()) {
+ final Element domElement = e.getDomElement();
+ element.appendChild(element.getOwnerDocument().importNode(domElement, true));
+ }
+
+ return element;
}
@Override
--- /dev/null
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.test.tool;
+
+import com.google.common.base.Optional;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.confignetconfconnector.operations.AbstractConfigNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+class SimulatedGetConfig extends AbstractConfigNetconfOperation {
+
+ private final DataList storage;
+
+ SimulatedGetConfig(final String netconfSessionIdForReporting, final DataList storage) {
+ super(null, netconfSessionIdForReporting);
+ this.storage = storage;
+ }
+
+ @Override
+ protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+ final Element element = XmlUtil.createElement(document, XmlNetconfConstants.DATA_KEY, Optional.<String>absent());
+
+ for(final XmlElement e : storage.getConfigList()) {
+ final Element domElement = e.getDomElement();
+ element.appendChild(element.getOwnerDocument().importNode(domElement, true));
+ }
+
+ return element;
+ }
+
+ @Override
+ protected String getOperationName() {
+ return XmlNetconfConstants.GET_CONFIG;
+ }
+}
--- /dev/null
+################################################################################
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+################################################################################
+
+#
+# If set to true, the following property will not allow any certificate to be used
+# when accessing Maven repositories through SSL
+#
+#org.ops4j.pax.url.mvn.certificateCheck=
+
+#
+# Path to the local Maven settings file.
+# The repositories defined in this file will be automatically added to the list
+# of default repositories if the 'org.ops4j.pax.url.mvn.repositories' property
+# below is not set.
+# The following locations are checked for the existence of the settings.xml file
+# * 1. looks for the specified url
+# * 2. if not found looks for ${user.home}/.m2/settings.xml
+# * 3. if not found looks for ${maven.home}/conf/settings.xml
+# * 4. if not found looks for ${M2_HOME}/conf/settings.xml
+#
+#org.ops4j.pax.url.mvn.settings=
+
+#
+# Path to the local Maven repository which is used to avoid downloading
+# artifacts when they already exist locally.
+# The value of this property will be extracted from the settings.xml file
+# above, or defaulted to:
+# System.getProperty( "user.home" ) + "/.m2/repository"
+#
+org.ops4j.pax.url.mvn.localRepository=${karaf.home}/${karaf.default.repository}
+
+#
+# Default this to false. It's just weird to use undocumented repos
+#
+org.ops4j.pax.url.mvn.useFallbackRepositories=false
+
+#
+# Uncomment if you don't wanna use the proxy settings
+# from the Maven conf/settings.xml file
+#
+# org.ops4j.pax.url.mvn.proxySupport=false
+
+#
+# Disable aether support by default. This ensure that the defaultRepositories
+# below will be used
+#
+#org.ops4j.pax.url.mvn.disableAether=true
+
+#
+# Comma separated list of repositories scanned when resolving an artifact.
+# Those repositories will be checked before iterating through the
+# below list of repositories and even before the local repository
+# A repository url can be appended with zero or more of the following flags:
+# @snapshots : the repository contains snaphots
+# @noreleases : the repository does not contain any released artifacts
+#
+# The following property value will add the system folder as a repo.
+#
+#org.ops4j.pax.url.mvn.defaultRepositories=
+
+# Use the default local repo (e.g.~/.m2/repository) as a "remote" repo
+org.ops4j.pax.url.mvn.defaultLocalRepoAsRemote=false
+
+#
+# Comma separated list of repositories scanned when resolving an artifact.
+# The default list includes the following repositories containing releases:
+# http://repo1.maven.org/maven2
+# http://repository.apache.org/content/groups/snapshots-group
+# http://svn.apache.org/repos/asf/servicemix/m2-repo
+# http://repository.springsource.com/maven/bundles/release
+# http://repository.springsource.com/maven/bundles/external
+# To add repositories to the default ones, prepend '+' to the list of repositories
+# to add.
+# A repository url can be appended with zero or more of the following flags:
+# @snapshots : the repository contains snaphots
+# @noreleases : the repository does not contain any released artifacts
+# @id=reponid : the id for the repository, just like in the settings.xml this is optional but recomendet
+#
+# The default list doesn't contain any repository containing snapshots as it can impact the artifacts resolution.
+# You may want to add the following repositories containing snapshots:
+# http://repository.apache.org/content/groups/snapshots-group@id=apache@snapshots@noreleases
+# http://oss.sonatype.org/content/repositories/snapshots@id=sonatype.snapshots.deploy@snapshots@norelease
+# http://oss.sonatype.org/content/repositories/ops4j-snapshots@id=ops4j.sonatype.snapshots.deploy@snapshots@noreleases
+#
+org.ops4j.pax.url.mvn.repositories= \
+ file:${karaf.home}/${karaf.default.repository}@id=system.repository, \
+ file:${karaf.data}/kar@id=kar.repository@multi, \
+ http://repo1.maven.org/maven2@id=central, \
+ http://repository.springsource.com/maven/bundles/release@id=spring.ebr.release, \
+ http://repository.springsource.com/maven/bundles/external@id=spring.ebr.external
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<relativePath>../</relativePath>
</parent>
<artifactId>netconf-usermanager</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
</parent>
<artifactId>netconf-util</artifactId>
<packaging>bundle</packaging>
public final class XmlElement {
+ public static final String DEFAULT_NAMESPACE_PREFIX = "";
+
private final Element element;
private static final Logger logger = LoggerFactory.getLogger(XmlElement.class);
return xmlElement;
}
- private static Map<String, String> extractNamespaces(Element typeElement) throws NetconfDocumentedException {
+ private Map<String, String> extractNamespaces() throws NetconfDocumentedException {
Map<String, String> namespaces = new HashMap<>();
- NamedNodeMap attributes = typeElement.getAttributes();
+ NamedNodeMap attributes = element.getAttributes();
for (int i = 0; i < attributes.getLength(); i++) {
Node attribute = attributes.item(i);
String attribKey = attribute.getNodeName();
if (attribKey.startsWith(XmlUtil.XMLNS_ATTRIBUTE_KEY)) {
String prefix;
if (attribKey.equals(XmlUtil.XMLNS_ATTRIBUTE_KEY)) {
- prefix = "";
+ prefix = DEFAULT_NAMESPACE_PREFIX;
} else {
if (!attribKey.startsWith(XmlUtil.XMLNS_ATTRIBUTE_KEY + ":")){
throw new NetconfDocumentedException("Attribute doesn't start with :",
namespaces.put(prefix, attribute.getNodeValue());
}
}
+
+ // namespace does not have to be defined on this element but inherited
+ if(!namespaces.containsKey(DEFAULT_NAMESPACE_PREFIX)) {
+ Optional<String> namespaceOptionally = getNamespaceOptionally();
+ if(namespaceOptionally.isPresent()) {
+ namespaces.put(DEFAULT_NAMESPACE_PREFIX, namespaceOptionally.get());
+ }
+ }
+
return namespaces;
}
}
public String getName() {
- if (element.getLocalName()!=null && !element.getLocalName().equals("")){
+ if (element.getLocalName()!=null && !element.getLocalName().equals(DEFAULT_NAMESPACE_PREFIX)){
return element.getLocalName();
}
return element.getTagName();
public String getTextContent() throws NetconfDocumentedException {
NodeList childNodes = element.getChildNodes();
if (childNodes.getLength() == 0) {
- return "";
+ return DEFAULT_NAMESPACE_PREFIX;
}
for(int i = 0; i < childNodes.getLength(); i++) {
Node textChild = childNodes.item(i);
public String getNamespaceAttribute() throws MissingNameSpaceException {
String attribute = element.getAttribute(XmlUtil.XMLNS_ATTRIBUTE_KEY);
- if (attribute == null || attribute.equals("")){
+ if (attribute == null || attribute.equals(DEFAULT_NAMESPACE_PREFIX)){
throw new MissingNameSpaceException(String.format("Element %s must specify namespace",
toString()),
NetconfDocumentedException.ErrorType.application,
* is found value will be null.
*/
public Map.Entry<String/* prefix */, String/* namespace */> findNamespaceOfTextContent() throws NetconfDocumentedException {
- Map<String, String> namespaces = extractNamespaces(element);
+ Map<String, String> namespaces = extractNamespaces();
String textContent = getTextContent();
int indexOfColon = textContent.indexOf(':');
String prefix;
if (indexOfColon > -1) {
prefix = textContent.substring(0, indexOfColon);
} else {
- prefix = "";
+ prefix = DEFAULT_NAMESPACE_PREFIX;
}
if (!namespaces.containsKey(prefix)) {
throw new IllegalArgumentException("Cannot find namespace for " + XmlUtil.toString(element) + ". Prefix from content is "
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>netconf-subsystem</artifactId>
- <version>0.2.5-SNAPSHOT</version>
+ <version>0.3.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<prerequisites>
<module>netconf-connector-config</module>
<module>netconf-auth</module>
<module>netconf-usermanager</module>
+ <module>netconf-testtool</module>
+
+ <module>netconf-artifacts</module>
</modules>
<dependencies>
<module>netconf-it</module>
</modules>
</profile>
-
- <profile>
- <id>testtool</id>
- <activation>
- <activeByDefault>false</activeByDefault>
- </activation>
- <modules>
- <module>netconf-testtool</module>
- </modules>
- </profile>
</profiles>
</project>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>networkconfig.neutron.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<enunciate.version>1.26.2</enunciate.version>
portDB.putIfAbsent(input.getID(), input);
// if there are no fixed IPs, allocate one for each subnet in the network
INeutronSubnetCRUD systemCRUD = NeutronCRUDInterfaces.getINeutronSubnetCRUD(this);
+ if (input.getFixedIPs() == null){
+ input.setFixedIPs(new ArrayList<Neutron_IPs>());
+ }
if (input.getFixedIPs().size() == 0) {
List<Neutron_IPs> list = input.getFixedIPs();
Iterator<NeutronSubnet> subnetIterator = systemCRUD.getAllSubnets().iterator();
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>networkconfig.neutron</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<enunciate.version>1.26.2</enunciate.version>
<groupId>org.opendaylight.controller</groupId>
<artifactId>app-northbound</artifactId>
- <version>0.0.1-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>maven-archetype</packaging>
<name>app-northbound</name>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.1-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>bundlescanner.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>commons.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>connectionmanager.northbound</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>containermanager.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>controllermanager.northbound</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>flowprogrammer.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>hosttracker.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>httpservice-bridge</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>HttpService bridge web application</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>northbound.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>ch.qos.logback</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>northbound.client</artifactId>
- <version>0.0.1-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>jolokia-bridge</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>Jolokia bridge web application</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>networkconfig.bridgedomain.northbound</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>networkconfig.neutron.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
org.eclipse.persistence.jaxb.rs,
com.sun.jersey.spi.container.servlet,
javax.ws.rs,
+ javax.ws.rs.ext,
javax.ws.rs.core,
javax.xml.bind.annotation,
javax.xml.bind,
import org.codehaus.enunciate.jaxrs.StatusCodes;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
/**
* For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
- * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
- * TODO: Consolidate and maintain a single copy
+ * and not duplicated within the INeutronLoadBalancerPoolMemberCRUD's cache.
*/
@Path("/pools")
service.neutronLoadBalancerPoolDeleted(singleton);
}
}
-
- /*
- * remove corresponding members from the member cache too
- */
- INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
- if (loadBalancerPoolMemberInterface != null) {
- List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
- ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
- Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
- while (i.hasNext()) {
- NeutronLoadBalancerPoolMember member = i.next();
- if (member.getPoolID() == loadBalancerPoolUUID)
- loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
- }
- }
return Response.status(204).build();
}
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>forwarding.staticrouting.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>statistics.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>subnets.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>swagger-ui</artifactId>
- <version>0.0.1-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>topology.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager.northbound</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.0-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../opendaylight/commons/opendaylight</relativePath>
</parent>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>northboundtest</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.openflow</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>protocol_plugins.stub</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>routing.dijkstra_implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal</artifactId>
- <version>0.8.1-SNAPSHOT</version>
+ <version>0.9.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
package org.opendaylight.controller.sal.action;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.opendaylight.controller.sal.core.Property;
+
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
@XmlElement
protected ActionType type;
private transient boolean isValid = true;
+ private ConcurrentMap<String, Property> props;
/* Dummy constructor for JAXB */
public Action() {
}
}
+ /**
+ * Gets the list of metadata currently registered with this match
+ *
+ * @return List of metadata currently registered
+ */
+ public List <Property> getMetadatas() {
+ if (this.props != null) {
+ // Return all the values in the map
+ Collection res = this.props.values();
+ if (res == null) {
+ return Collections.emptyList();
+ }
+ return new ArrayList<Property>(res);
+ }
+ return Collections.emptyList();
+ }
+
+ /**
+ * Gets the metadata registered with a name if present
+ *
+ * @param name the name of the property to be extracted
+ *
+ * @return List of metadata currently registered
+ */
+ public Property getMetadata(String name) {
+ if (name == null) {
+ return null;
+ }
+ if (this.props != null) {
+ // Return the Property associated to the name
+ return this.props.get(name);
+ }
+ return null;
+ }
+
+ /**
+ * Sets the metadata associated to a name. If the name or prop is NULL,
+ * an exception NullPointerException will be raised.
+ *
+ * @param name the name of the property to be set
+ * @param prop, property to be set
+ */
+ public void setMetadata(String name, Property prop) {
+ if (this.props == null) {
+ props = new ConcurrentHashMap<String, Property>();
+ }
+
+ if (this.props != null) {
+ this.props.put(name, prop);
+ }
+ }
+
+ /**
+ * Remove the metadata associated to a name. If the name is NULL,
+ * nothing will be removed.
+ *
+ * @param name the name of the property to be set
+ * @param prop, property to be set
+ *
+ * @return List of metadata currently registered
+ */
+ public void removeMetadata(String name) {
+ if (this.props == null) {
+ return;
+ }
+
+ if (this.props != null) {
+ this.props.remove(name);
+ }
+ // It's intentional to keep the this.props still allocated
+ // till the parent data structure will be alive, so to avoid
+ // unnecessary allocation/deallocation, even if it's holding
+ // nothing
+ }
+
/**
* Returns the type of this action
*
-
/*
* Copyright (c) 2013 Cisco Systems, Inc. and others. All rights reserved.
*
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentHashMap;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
+import org.opendaylight.controller.sal.core.Property;
import org.opendaylight.controller.sal.utils.EtherTypes;
import org.opendaylight.controller.sal.utils.IPProtocols;
import org.opendaylight.controller.sal.utils.NetUtils;
reversableMatches = Collections.unmodifiableMap(map);
}
private Map<MatchType, MatchField> fields;
- private int matches; // concise way to tell which fields the match is set for (may remove if not needed)
+ private int matches; // concise way to tell which fields the match
+ // is set for (may remove if not needed)
+ private ConcurrentMap<String, Property> props;
public Match() {
fields = new HashMap<MatchType, MatchField>();
matches = match.matches;
}
+ /**
+ * Gets the list of metadata currently registered with this match
+ *
+ * @return List of metadata currently registered
+ */
+ public List <Property> getMetadatas() {
+ if (this.props != null) {
+ // Return all the values in the map
+ Collection res = this.props.values();
+ if (res == null) {
+ return Collections.emptyList();
+ }
+ return new ArrayList<Property>(res);
+ }
+ return Collections.emptyList();
+ }
+
+ /**
+ * Gets the metadata registered with a name if present
+ *
+ * @param name the name of the property to be extracted
+ *
+ * @return List of metadata currently registered
+ */
+ public Property getMetadata(String name) {
+ if (name == null) {
+ return null;
+ }
+ if (this.props != null) {
+ // Return the Property associated to the name
+ return this.props.get(name);
+ }
+ return null;
+ }
+
+ /**
+ * Sets the metadata associated to a name. If the name or prop is NULL,
+ * an exception NullPointerException will be raised.
+ *
+ * @param name the name of the property to be set
+ * @param prop, property to be set
+ */
+ public void setMetadata(String name, Property prop) {
+ if (this.props == null) {
+ props = new ConcurrentHashMap<String, Property>();
+ }
+
+ if (this.props != null) {
+ this.props.put(name, prop);
+ }
+ }
+
+ /**
+ * Remove the metadata associated to a name. If the name is NULL,
+ * nothing will be removed.
+ *
+ * @param name the name of the property to be set
+ * @param prop, property to be set
+ *
+ * @return List of metadata currently registered
+ */
+ public void removeMetadata(String name) {
+ if (this.props == null) {
+ return;
+ }
+
+ if (this.props != null) {
+ this.props.remove(name);
+ }
+ // It's intentional to keep the this.props still allocated
+ // till the parent data structure will be alive, so to avoid
+ // unnecessary allocation/deallocation, even if it's holding
+ // nothing
+ }
+
/**
* Generic setter for frame/packet/message's header fields against which to match
* Note: For MAC addresses, please pass the cloned value to this function
import org.junit.Assert;
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.core.Property;
+import org.opendaylight.controller.sal.core.Tables;
+import org.opendaylight.controller.sal.core.Tier;
import org.opendaylight.controller.sal.utils.EtherTypes;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
import org.slf4j.Logger;
.createNodeConnector((short) 5, node))));
Assert.assertFalse(actions.contains(new Controller()));
}
+
+ @Test
+ public void testMetadata() {
+ Property tier1 = new Tier(1);
+ Property tier2 = new Tier(2);
+ Property table1 = new Tables((byte)0x7f);
+ Action a1 = new PopVlan();
+ List<Property> resprops = null;
+ resprops = a1.getMetadatas();
+ // This should be an empty list
+ Assert.assertTrue(resprops.isEmpty());
+ a1.setMetadata("tier1", tier1);
+ a1.setMetadata("tier2", tier2);
+ a1.setMetadata("table1", table1);
+ resprops = a1.getMetadatas();
+ // Check for the number of elements in it
+ Assert.assertTrue(resprops.size() == 3);
+ // Check if the elements are in it
+ Assert.assertTrue(resprops.contains(tier1));
+ Assert.assertTrue(resprops.contains(tier2));
+ Assert.assertTrue(resprops.contains(table1));
+ // Check for single elements retrieve
+ Assert.assertTrue(a1.getMetadata("tier1").equals(tier1));
+ Assert.assertTrue(a1.getMetadata("tier2").equals(tier2));
+ Assert.assertTrue(a1.getMetadata("table1").equals(table1));
+ // Now remove an element and make sure the remaining are
+ // correct
+ a1.removeMetadata("tier1");
+
+ resprops = a1.getMetadatas();
+ // Check for the number of elements in it
+ Assert.assertTrue(resprops.size() == 2);
+ // Check if the elements are in it
+ Assert.assertFalse(resprops.contains(tier1));
+ Assert.assertTrue(resprops.contains(tier2));
+ Assert.assertTrue(resprops.contains(table1));
+ // Check for single elements retrieve
+ Assert.assertTrue(a1.getMetadata("table1").equals(table1));
+ Assert.assertTrue(a1.getMetadata("tier2").equals(tier2));
+ Assert.assertNull(a1.getMetadata("tier1"));
+
+ // Check for an element never existed
+ Assert.assertNull(a1.getMetadata("table100"));
+
+ // Remove them all
+ a1.removeMetadata("tier2");
+ a1.removeMetadata("table1");
+
+ // Remove also a non-existent one
+ a1.removeMetadata("table100");
+
+ resprops = a1.getMetadatas();
+ // Check there are no elements left
+ Assert.assertTrue(resprops.size() == 0);
+
+ // Now check for exception on setting null values
+ try {
+ a1.setMetadata("foo", null);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+
+ // Now check on using null key
+ try {
+ a1.setMetadata(null, table1);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+
+ // Now check on using null key and null value
+ try {
+ a1.setMetadata(null, null);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+ }
}
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
+import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import org.opendaylight.controller.sal.core.Node;
import org.opendaylight.controller.sal.core.NodeConnector;
+import org.opendaylight.controller.sal.core.Property;
+import org.opendaylight.controller.sal.core.Tables;
+import org.opendaylight.controller.sal.core.Tier;
import org.opendaylight.controller.sal.utils.EtherTypes;
import org.opendaylight.controller.sal.utils.IPProtocols;
import org.opendaylight.controller.sal.utils.NodeConnectorCreator;
// No intersection with null match, empty set
Assert.assertNull(m6.getIntersection(null));
}
+
+ @Test
+ public void testMetadata() {
+ Property tier1 = new Tier(1);
+ Property tier2 = new Tier(2);
+ Property table1 = new Tables((byte)0x7f);
+ Match m1 = new Match();
+ List<Property> resprops = null;
+ resprops = m1.getMetadatas();
+ // This should be null
+ Assert.assertTrue(resprops.isEmpty());
+ m1.setMetadata("tier1", tier1);
+ m1.setMetadata("tier2", tier2);
+ m1.setMetadata("table1", table1);
+ resprops = m1.getMetadatas();
+ // Check for the number of elements in it
+ Assert.assertTrue(resprops.size() == 3);
+ // Check if the elements are in it
+ Assert.assertTrue(resprops.contains(tier1));
+ Assert.assertTrue(resprops.contains(tier2));
+ Assert.assertTrue(resprops.contains(table1));
+ // Check for single elements retrieve
+ Assert.assertTrue(m1.getMetadata("tier1").equals(tier1));
+ Assert.assertTrue(m1.getMetadata("tier2").equals(tier2));
+ Assert.assertTrue(m1.getMetadata("table1").equals(table1));
+ // Now remove an element and make sure the remaining are
+ // correct
+ m1.removeMetadata("tier1");
+
+ resprops = m1.getMetadatas();
+ // Check for the number of elements in it
+ Assert.assertTrue(resprops.size() == 2);
+ // Check if the elements are in it
+ Assert.assertFalse(resprops.contains(tier1));
+ Assert.assertTrue(resprops.contains(tier2));
+ Assert.assertTrue(resprops.contains(table1));
+ // Check for single elements retrieve
+ Assert.assertTrue(m1.getMetadata("table1").equals(table1));
+ Assert.assertTrue(m1.getMetadata("tier2").equals(tier2));
+ Assert.assertNull(m1.getMetadata("tier1"));
+
+ // Check for an element never existed
+ Assert.assertNull(m1.getMetadata("table100"));
+
+ // Remove them all
+ m1.removeMetadata("tier2");
+ m1.removeMetadata("table1");
+
+ // Remove also a non-existent one
+ m1.removeMetadata("table100");
+
+ resprops = m1.getMetadatas();
+ // Check there are no elements left
+ Assert.assertTrue(resprops.size() == 0);
+
+ // Now check for exception on setting null values
+ try {
+ m1.setMetadata("foo", null);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+
+ // Now check on using null key
+ try {
+ m1.setMetadata(null, table1);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+
+ // Now check on using null key and null value
+ try {
+ m1.setMetadata(null, null);
+ // The line below should never be reached
+ Assert.assertTrue(false);
+ } catch (NullPointerException nue) {
+ // NPE should be raised for null value
+ Assert.assertTrue(true);
+ }
+ }
}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.connection.implementation</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>sal.networkconfiguration.implementation</artifactId>
- <version>0.0.3-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustersession</artifactId>
- <version>1.0.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
<artifactId>clustering.services</artifactId>
- <version>0.5.1-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.loadbalancer</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.loadbalancer.northbound</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>samples.simpleforwarding</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../commons/opendaylight</relativePath>
</parent>
<artifactId>security</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager</artifactId>
- <version>0.5.1-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>statisticsmanager.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<!-- Sonar properties using jacoco to retrieve integration test results -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>statisticsmanager.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager</artifactId>
- <version>0.7.1-SNAPSHOT</version>
+ <version>0.8.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>switchmanager.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<properties>
c.add(createContainerServiceDependency(containerName).setService(
IInventoryService.class).setCallbacks(
"setInventoryService", "unsetInventoryService")
- .setRequired(false));
+ .setRequired(true));
c.add(createContainerServiceDependency(containerName).setService(
IStatisticsManager.class).setCallbacks(
"setStatisticsManager", "unsetStatisticsManager")
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>switchmanager.integrationtest</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementation/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.integrationtest</artifactId>
- <version>0.5.2-SNAPSHOT</version>
+ <version>0.6.0-SNAPSHOT</version>
<relativePath>../../commons/integrationtest</relativePath>
</parent>
<artifactId>topologymanager.integrationtest</artifactId>
- <version>0.4.1-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<properties>
<sonar.jacoco.itReportPath>../implementaiton/target/jacoco-it.exec</sonar.jacoco.itReportPath>
<!-- Sonar jacoco plugin to get integration test coverage info -->
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>topologymanager.shell</artifactId>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>usermanager.implementation</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.0-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<scm>
<groupId>org.opendaylight.controller</groupId>
<artifactId>web.brandfragment</artifactId>
- <version>0.4.0-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>devices.web</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>flows.web</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>osgi-brandfragment.web</artifactId>
- <version>0.0.2-SNAPSHOT</version>
+ <version>0.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<description>OSGi management web application brand fragment</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>web</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>topology.web</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.opendaylight</artifactId>
- <version>1.4.2-SNAPSHOT</version>
+ <version>1.5.0-SNAPSHOT</version>
<relativePath>../../commons/opendaylight</relativePath>
</parent>
<artifactId>troubleshoot.web</artifactId>
- <version>0.4.2-SNAPSHOT</version>
+ <version>0.5.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<dependencies>
<dependency>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.parent</artifactId>
- <version>1.0.2-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<relativePath>opendaylight/commons/parent</relativePath>
</parent>
<artifactId>releasepom</artifactId>
- <version>0.1.2-SNAPSHOT</version>
+ <version>0.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<prerequisites>
<maven>3.0</maven>
<!-- <module>third-party/net.sf.jung2</module> -->
<!-- <module>third-party/jersey-servlet</module> -->
<!-- <module>third-party/org.apache.catalina.filters.CorsFilter</module> -->
- <module>third-party/ganymed</module>
<module>third-party/commons/thirdparty</module>
</prerequisites>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.thirdparty</artifactId>
- <version>1.1.2-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<packaging>pom</packaging>
<scm>
<connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <parent>
- <groupId>org.opendaylight.controller</groupId>
- <artifactId>commons.thirdparty</artifactId>
- <version>1.1.2-SNAPSHOT</version>
- <relativePath>../commons/thirdparty</relativePath>
- </parent>
-
- <groupId>org.opendaylight.controller.thirdparty</groupId>
- <artifactId>ganymed</artifactId>
- <version>1.1-SNAPSHOT</version>
- <packaging>bundle</packaging>
-
- <dependencies>
- <dependency>
- <groupId>org.osgi</groupId>
- <artifactId>org.osgi.core</artifactId>
- <version>5.0.0</version>
- </dependency>
- <dependency>
- <groupId>ch.ethz.ganymed</groupId>
- <artifactId>ganymed-ssh2</artifactId>
- <version>261</version>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.felix</groupId>
- <artifactId>maven-bundle-plugin</artifactId>
- <extensions>true</extensions>
- <configuration>
- <instructions>
- <Export-Package>ch.ethz.ssh2.*</Export-Package>
- <Embed-Dependency>ganymed-ssh2;scope=compile</Embed-Dependency>
- <Embed-Transitive>true</Embed-Transitive>
- </instructions>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-enforcer-plugin</artifactId>
- <version>${enforcer.version}</version>
- <executions>
- <execution>
- <id>enforce-no-snapshots</id>
- <goals>
- <goal>enforce</goal>
- </goals>
- <configuration>
- <rules>
- <bannedDependencies>
- <excludes>
- <exclude>ch.ethz.ganymed:ganymed-ssh2:*</exclude>
- </excludes>
- <includes>
- <include>ch.ethz.ganymed:ganymed-ssh2:[261]</include>
- </includes>
- </bannedDependencies>
- </rules>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
-
-
+++ /dev/null
-/*
- * Copyright (c) 2006-2011 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2;
-
-import java.io.CharArrayWriter;
-import java.io.File;
-import java.net.Socket;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.security.SecureRandom;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.auth.AuthenticationManager;
-import ch.ethz.ssh2.channel.ChannelManager;
-import ch.ethz.ssh2.crypto.CryptoWishList;
-import ch.ethz.ssh2.crypto.cipher.BlockCipherFactory;
-import ch.ethz.ssh2.crypto.digest.MAC;
-import ch.ethz.ssh2.packets.PacketIgnore;
-import ch.ethz.ssh2.transport.KexManager;
-import ch.ethz.ssh2.transport.TransportManager;
-import ch.ethz.ssh2.util.TimeoutService;
-import ch.ethz.ssh2.util.TimeoutService.TimeoutToken;
-
-/**
- * A <code>Connection</code> is used to establish an encrypted TCP/IP
- * connection to a SSH-2 server.
- * <p>
- * Typically, one
- * <ol>
- * <li>creates a {@link #Connection(String) Connection} object.</li>
- * <li>calls the {@link #connect() connect()} method.</li>
- * <li>calls some of the authentication methods (e.g., {@link #authenticateWithPublicKey(String, File, String) authenticateWithPublicKey()}).</li>
- * <li>calls one or several times the {@link #openSession() openSession()} method.</li>
- * <li>finally, one must close the connection and release resources with the {@link #close() close()} method.</li>
- * </ol>
- *
- * @author Christian Plattner
- * @version $Id: Connection.java 69 2013-08-09 06:39:56Z dkocher@sudo.ch $
- */
-
-public class Connection
-{
- /**
- * The identifier presented to the SSH-2 server. This is the same
- * as the "softwareversion" defined in RFC 4253.
- * <p/>
- * <b>NOTE: As per the RFC, the "softwareversion" string MUST consist of printable
- * US-ASCII characters, with the exception of whitespace characters and the minus sign (-).</b>
- */
- private String softwareversion = String.format("Ganymed_%s", Version.getSpecification());
-
- /* Will be used to generate all random data needed for the current connection.
- * Note: SecureRandom.nextBytes() is thread safe.
- */
-
- private SecureRandom generator;
-
- private Socket precreatedSocket;
-
- public Connection(Socket socket) {
- this.precreatedSocket = socket;
- this.hostname = socket.getInetAddress().getHostName();
- this.port = socket.getPort();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported cipher algorithms by this implementation.
- */
- public static synchronized String[] getAvailableCiphers()
- {
- return BlockCipherFactory.getDefaultCipherList();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported MAC algorthims by this implementation.
- */
- public static synchronized String[] getAvailableMACs()
- {
- return MAC.getMacList();
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @return The list of supported server host key algorthims by this implementation.
- */
- public static synchronized String[] getAvailableServerHostKeyAlgorithms()
- {
- return KexManager.getDefaultServerHostkeyAlgorithmList();
- }
-
- private AuthenticationManager am;
-
- private boolean authenticated = false;
- private ChannelManager cm;
-
- private CryptoWishList cryptoWishList = new CryptoWishList();
-
- private DHGexParameters dhgexpara = new DHGexParameters();
-
- private final String hostname;
-
- private final int port;
-
- private TransportManager tm;
-
- private boolean tcpNoDelay = false;
-
- private ProxyData proxyData = null;
-
- private List<ConnectionMonitor> connectionMonitors = new Vector<ConnectionMonitor>();
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- * <p>
- * Same as {@link #Connection(String, int) Connection(hostname, 22)}.
- *
- * @param hostname the hostname of the SSH-2 server.
- */
- public Connection(String hostname)
- {
- this(hostname, 22);
- }
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- *
- * @param hostname
- * the host where we later want to connect to.
- * @param port
- * port on the server, normally 22.
- */
- public Connection(String hostname, int port)
- {
- this.hostname = hostname;
- this.port = port;
- }
-
- /**
- * Prepares a fresh <code>Connection</code> object which can then be used
- * to establish a connection to the specified SSH-2 server.
- *
- * @param hostname
- * the host where we later want to connect to.
- * @param port
- * port on the server, normally 22.
- * @param softwareversion
- * Allows you to set a custom "softwareversion" string as defined in RFC 4253.
- * <b>NOTE: As per the RFC, the "softwareversion" string MUST consist of printable
- * US-ASCII characters, with the exception of whitespace characters and the minus sign (-).</b>
- */
- public Connection(String hostname, int port, String softwareversion)
- {
- this.hostname = hostname;
- this.port = port;
- this.softwareversion = softwareversion;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * is based on DSA (it uses DSA to sign a challenge sent by the server).
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pem
- * A <code>String</code> containing the DSA private key of the
- * user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" tag). The string may contain
- * linefeeds.
- * @param password
- * If the PEM string is 3DES encrypted ("DES-EDE3-CBC"), then you
- * must specify the password. Otherwise, this argument will be
- * ignored and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- *
- * @deprecated You should use one of the {@link #authenticateWithPublicKey(String, File, String) authenticateWithPublicKey()}
- * methods, this method is just a wrapper for it and will
- * disappear in future builds.
- *
- */
- public synchronized boolean authenticateWithDSA(String user, String pem, String password) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (pem == null)
- throw new IllegalArgumentException("pem argument is null");
-
- authenticated = am.authenticatePublicKey(user, pem.toCharArray(), password, getOrCreateSecureRND());
-
- return authenticated;
- }
-
- /**
- * A wrapper that calls {@link #authenticateWithKeyboardInteractive(String, String[], InteractiveCallback)
- * authenticateWithKeyboardInteractivewith} a <code>null</code> submethod list.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param cb
- * An <code>InteractiveCallback</code> which will be used to
- * determine the responses to the questions asked by the server.
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithKeyboardInteractive(String user, InteractiveCallback cb)
- throws IOException
- {
- return authenticateWithKeyboardInteractive(user, null, cb);
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * is based on "keyboard-interactive", specified in
- * draft-ietf-secsh-auth-kbdinteract-XX. Basically, you have to define a
- * callback object which will be feeded with challenges generated by the
- * server. Answers are then sent back to the server. It is possible that the
- * callback will be called several times during the invocation of this
- * method (e.g., if the server replies to the callback's answer(s) with
- * another challenge...)
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * Note: some SSH servers advertise "keyboard-interactive", however, any
- * interactive request will be denied (without having sent any challenge to
- * the client).
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param submethods
- * An array of submethod names, see
- * draft-ietf-secsh-auth-kbdinteract-XX. May be <code>null</code>
- * to indicate an empty list.
- * @param cb
- * An <code>InteractiveCallback</code> which will be used to
- * determine the responses to the questions asked by the server.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithKeyboardInteractive(String user, String[] submethods,
- InteractiveCallback cb) throws IOException
- {
- if (cb == null)
- throw new IllegalArgumentException("Callback may not ne NULL!");
-
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- authenticated = am.authenticateInteractive(user, submethods, cb);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * sends username and password to the server.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * Note: if this method fails, then please double-check that it is actually
- * offered by the server (use {@link #getRemainingAuthMethods(String) getRemainingAuthMethods()}.
- * <p>
- * Often, password authentication is disabled, but users are not aware of it.
- * Many servers only offer "publickey" and "keyboard-interactive". However,
- * even though "keyboard-interactive" *feels* like password authentication
- * (e.g., when using the putty or openssh clients) it is *not* the same mechanism.
- *
- * @param user
- * @param password
- * @return if the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPassword(String user, String password) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (password == null)
- throw new IllegalArgumentException("password argument is null");
-
- authenticated = am.authenticatePassword(user, password);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself.
- * This method can be used to explicitly use the special "none"
- * authentication method (where only a username has to be specified).
- * <p>
- * Note 1: The "none" method may always be tried by clients, however as by
- * the specs, the server will not explicitly announce it. In other words,
- * the "none" token will never show up in the list returned by
- * {@link #getRemainingAuthMethods(String)}.
- * <p>
- * Note 2: no matter which one of the authenticateWithXXX() methods
- * you call, the library will always issue exactly one initial "none"
- * authentication request to retrieve the initially allowed list of
- * authentication methods by the server. Please read RFC 4252 for the
- * details.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If further authentication steps are needed, <code>false</code>
- * is returned and one can retry by any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- *
- * @param user
- * @return if the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithNone(String user) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- /* Trigger the sending of the PacketUserauthRequestNone packet */
- /* (if not already done) */
-
- authenticated = am.authenticateNone(user);
-
- return authenticated;
- }
-
- /**
- * After a successful connect, one has to authenticate oneself.
- * The authentication method "publickey" works by signing a challenge
- * sent by the server. The signature is either DSA or RSA based - it
- * just depends on the type of private key you specify, either a DSA
- * or RSA private key in PEM format. And yes, this is may seem to be a
- * little confusing, the method is called "publickey" in the SSH-2 protocol
- * specification, however since we need to generate a signature, you
- * actually have to supply a private key =).
- * <p>
- * The private key contained in the PEM file may also be encrypted ("Proc-Type: 4,ENCRYPTED").
- * The library supports DES-CBC and DES-EDE3-CBC encryption, as well
- * as the more exotic PEM encrpytions AES-128-CBC, AES-192-CBC and AES-256-CBC.
- * <p>
- * If the authentication phase is complete, <code>true</code> will be
- * returned. If the server does not accept the request (or if further
- * authentication steps are needed), <code>false</code> is returned and
- * one can retry either by using this or any other authentication method
- * (use the <code>getRemainingAuthMethods</code> method to get a list of
- * the remaining possible methods).
- * <p>
- * NOTE PUTTY USERS: Event though your key file may start with "-----BEGIN..."
- * it is not in the expected format. You have to convert it to the OpenSSH
- * key format by using the "puttygen" tool (can be downloaded from the Putty
- * website). Simply load your key and then use the "Conversions/Export OpenSSH key"
- * functionality to get a proper PEM file.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pemPrivateKey
- * A <code>char[]</code> containing a DSA or RSA private key of the
- * user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" or "-----BEGIN RSA PRIVATE KEY-----"
- * tag). The char array may contain linebreaks/linefeeds.
- * @param password
- * If the PEM structure is encrypted ("Proc-Type: 4,ENCRYPTED") then
- * you must specify a password. Otherwise, this argument will be ignored
- * and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPublicKey(String user, char[] pemPrivateKey, String password)
- throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- if (user == null)
- throw new IllegalArgumentException("user argument is null");
-
- if (pemPrivateKey == null)
- throw new IllegalArgumentException("pemPrivateKey argument is null");
-
- authenticated = am.authenticatePublicKey(user, pemPrivateKey, password, getOrCreateSecureRND());
-
- return authenticated;
- }
-
- /**
- * A convenience wrapper function which reads in a private key (PEM format, either DSA or RSA)
- * and then calls <code>authenticateWithPublicKey(String, char[], String)</code>.
- * <p>
- * NOTE PUTTY USERS: Event though your key file may start with "-----BEGIN..."
- * it is not in the expected format. You have to convert it to the OpenSSH
- * key format by using the "puttygen" tool (can be downloaded from the Putty
- * website). Simply load your key and then use the "Conversions/Export OpenSSH key"
- * functionality to get a proper PEM file.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param pemFile
- * A <code>File</code> object pointing to a file containing a DSA or RSA
- * private key of the user in OpenSSH key format (PEM, you can't miss the
- * "-----BEGIN DSA PRIVATE KEY-----" or "-----BEGIN RSA PRIVATE KEY-----"
- * tag).
- * @param password
- * If the PEM file is encrypted then you must specify the password.
- * Otherwise, this argument will be ignored and can be set to <code>null</code>.
- *
- * @return whether the connection is now authenticated.
- * @throws IOException
- */
- public synchronized boolean authenticateWithPublicKey(String user, File pemFile, String password)
- throws IOException
- {
- if (pemFile == null)
- throw new IllegalArgumentException("pemFile argument is null");
-
- char[] buff = new char[256];
-
- CharArrayWriter cw = new CharArrayWriter();
-
- FileReader fr = new FileReader(pemFile);
-
- while (true)
- {
- int len = fr.read(buff);
- if (len < 0)
- break;
- cw.write(buff, 0, len);
- }
-
- fr.close();
-
- return authenticateWithPublicKey(user, cw.toCharArray(), password);
- }
-
- /**
- * Add a {@link ConnectionMonitor} to this connection. Can be invoked at any time,
- * but it is best to add connection monitors before invoking
- * <code>connect()</code> to avoid glitches (e.g., you add a connection monitor after
- * a successful connect(), but the connection has died in the mean time. Then,
- * your connection monitor won't be notified.)
- * <p>
- * You can add as many monitors as you like. If a monitor has already been added, then
- * this method does nothing.
- *
- * @see ConnectionMonitor
- *
- * @param cmon An object implementing the {@link ConnectionMonitor} interface.
- */
- public synchronized void addConnectionMonitor(ConnectionMonitor cmon)
- {
- if (cmon == null)
- throw new IllegalArgumentException("cmon argument is null");
-
- if (!connectionMonitors.contains(cmon))
- {
- connectionMonitors.add(cmon);
-
- if (tm != null)
- tm.setConnectionMonitors(connectionMonitors);
- }
- }
-
- /**
- * Remove a {@link ConnectionMonitor} from this connection.
- *
- * @param cmon
- * @return whether the monitor could be removed
- */
- public synchronized boolean removeConnectionMonitor(ConnectionMonitor cmon)
- {
- if (cmon == null)
- throw new IllegalArgumentException("cmon argument is null");
-
- boolean existed = connectionMonitors.remove(cmon);
-
- if (tm != null)
- tm.setConnectionMonitors(connectionMonitors);
-
- return existed;
- }
-
- /**
- * Close the connection to the SSH-2 server. All assigned sessions will be
- * closed, too. Can be called at any time. Don't forget to call this once
- * you don't need a connection anymore - otherwise the receiver thread may
- * run forever.
- */
- public synchronized void close()
- {
- Throwable t = new Throwable("Closed due to user request.");
- close(t, false);
- }
-
- public synchronized void close(Throwable t, boolean hard)
- {
- if (cm != null)
- cm.closeAllChannels();
-
- if (tm != null)
- {
- tm.close(t, hard == false);
- tm = null;
- }
- am = null;
- cm = null;
- authenticated = false;
- }
-
- /**
- * Same as {@link #connect(ServerHostKeyVerifier, int, int) connect(null, 0, 0)}.
- *
- * @return see comments for the {@link #connect(ServerHostKeyVerifier, int, int) connect(ServerHostKeyVerifier, int, int)} method.
- * @throws IOException
- */
- public synchronized ConnectionInfo connect() throws IOException
- {
- return connect(null, 0, 0);
- }
-
- /**
- * Same as {@link #connect(ServerHostKeyVerifier, int, int) connect(verifier, 0, 0)}.
- *
- * @return see comments for the {@link #connect(ServerHostKeyVerifier, int, int) connect(ServerHostKeyVerifier, int, int)} method.
- * @throws IOException
- */
- public synchronized ConnectionInfo connect(ServerHostKeyVerifier verifier) throws IOException
- {
- return connect(verifier, 0, 0);
- }
-
- /**
- * Connect to the SSH-2 server and, as soon as the server has presented its
- * host key, use the {@link ServerHostKeyVerifier#verifyServerHostKey(String,
- * int, String, byte[]) ServerHostKeyVerifier.verifyServerHostKey()}
- * method of the <code>verifier</code> to ask for permission to proceed.
- * If <code>verifier</code> is <code>null</code>, then any host key will be
- * accepted - this is NOT recommended, since it makes man-in-the-middle attackes
- * VERY easy (somebody could put a proxy SSH server between you and the real server).
- * <p>
- * Note: The verifier will be called before doing any crypto calculations
- * (i.e., diffie-hellman). Therefore, if you don't like the presented host key then
- * no CPU cycles are wasted (and the evil server has less information about us).
- * <p>
- * However, it is still possible that the server presented a fake host key: the server
- * cheated (typically a sign for a man-in-the-middle attack) and is not able to generate
- * a signature that matches its host key. Don't worry, the library will detect such
- * a scenario later when checking the signature (the signature cannot be checked before
- * having completed the diffie-hellman exchange).
- * <p>
- * Note 2: The {@link ServerHostKeyVerifier#verifyServerHostKey(String,
- * int, String, byte[]) ServerHostKeyVerifier.verifyServerHostKey()} method
- * will *NOT* be called from the current thread, the call is being made from a
- * background thread (there is a background dispatcher thread for every
- * established connection).
- * <p>
- * Note 3: This method will block as long as the key exchange of the underlying connection
- * has not been completed (and you have not specified any timeouts).
- * <p>
- * Note 4: If you want to re-use a connection object that was successfully connected,
- * then you must call the {@link #close()} method before invoking <code>connect()</code> again.
- *
- * @param verifier
- * An object that implements the
- * {@link ServerHostKeyVerifier} interface. Pass <code>null</code>
- * to accept any server host key - NOT recommended.
- *
- * @param connectTimeout
- * Connect the underlying TCP socket to the server with the given timeout
- * value (non-negative, in milliseconds). Zero means no timeout. If a proxy is being
- * used (see {@link #setProxyData(ProxyData)}), then this timeout is used for the
- * connection establishment to the proxy.
- *
- * @param kexTimeout
- * Timeout for complete connection establishment (non-negative,
- * in milliseconds). Zero means no timeout. The timeout counts from the
- * moment you invoke the connect() method and is cancelled as soon as the
- * first key-exchange round has finished. It is possible that
- * the timeout event will be fired during the invocation of the
- * <code>verifier</code> callback, but it will only have an effect after
- * the <code>verifier</code> returns.
- *
- * @return A {@link ConnectionInfo} object containing the details of
- * the established connection.
- *
- * @throws IOException
- * If any problem occurs, e.g., the server's host key is not
- * accepted by the <code>verifier</code> or there is problem during
- * the initial crypto setup (e.g., the signature sent by the server is wrong).
- * <p>
- * In case of a timeout (either connectTimeout or kexTimeout)
- * a SocketTimeoutException is thrown.
- * <p>
- * An exception may also be thrown if the connection was already successfully
- * connected (no matter if the connection broke in the mean time) and you invoke
- * <code>connect()</code> again without having called {@link #close()} first.
- * <p>
- * If a HTTP proxy is being used and the proxy refuses the connection,
- * then a {@link HTTPProxyException} may be thrown, which
- * contains the details returned by the proxy. If the proxy is buggy and does
- * not return a proper HTTP response, then a normal IOException is thrown instead.
- */
- public synchronized ConnectionInfo connect(ServerHostKeyVerifier verifier, int connectTimeout, int kexTimeout)
- throws IOException
- {
- final class TimeoutState
- {
- boolean isCancelled = false;
- boolean timeoutSocketClosed = false;
- }
-
- if (tm != null)
- throw new IOException("Connection to " + hostname + " is already in connected state!");
-
- if (connectTimeout < 0)
- throw new IllegalArgumentException("connectTimeout must be non-negative!");
-
- if (kexTimeout < 0)
- throw new IllegalArgumentException("kexTimeout must be non-negative!");
-
- final TimeoutState state = new TimeoutState();
-
- tm = new TransportManager();
- tm.setSoTimeout(connectTimeout);
- tm.setConnectionMonitors(connectionMonitors);
-
- /* Make sure that the runnable below will observe the new value of "tm"
- * and "state" (the runnable will be executed in a different thread, which
- * may be already running, that is why we need a memory barrier here).
- * See also the comment in Channel.java if you
- * are interested in the details.
- *
- * OKOK, this is paranoid since adding the runnable to the todo list
- * of the TimeoutService will ensure that all writes have been flushed
- * before the Runnable reads anything
- * (there is a synchronized block in TimeoutService.addTimeoutHandler).
- */
-
- synchronized (tm)
- {
- /* We could actually synchronize on anything. */
- }
-
- try
- {
- TimeoutToken token = null;
-
- if (kexTimeout > 0)
- {
- final Runnable timeoutHandler = new Runnable()
- {
- public void run()
- {
- synchronized (state)
- {
- if (state.isCancelled)
- return;
- state.timeoutSocketClosed = true;
- tm.close(new SocketTimeoutException("The connect timeout expired"), false);
- }
- }
- };
-
- long timeoutHorizont = System.currentTimeMillis() + kexTimeout;
-
- token = TimeoutService.addTimeoutHandler(timeoutHorizont, timeoutHandler);
- }
-
- try
- {
-
- if (precreatedSocket != null) {
- tm.clientInit(precreatedSocket, softwareversion, cryptoWishList, verifier, dhgexpara,
- getOrCreateSecureRND());
- } else {
- tm.clientInit(hostname, port, softwareversion, cryptoWishList, verifier, dhgexpara, connectTimeout,
- getOrCreateSecureRND(), proxyData);
- }
- }
- catch (SocketTimeoutException se)
- {
- throw (SocketTimeoutException) new SocketTimeoutException(
- "The connect() operation on the socket timed out.").initCause(se);
- }
-
- tm.setTcpNoDelay(tcpNoDelay);
-
- /* Wait until first KEX has finished */
-
- ConnectionInfo ci = tm.getConnectionInfo(1);
-
- /* Now try to cancel the timeout, if needed */
-
- if (token != null)
- {
- TimeoutService.cancelTimeoutHandler(token);
-
- /* Were we too late? */
-
- synchronized (state)
- {
- if (state.timeoutSocketClosed)
- throw new IOException("This exception will be replaced by the one below =)");
- /* Just in case the "cancelTimeoutHandler" invocation came just a little bit
- * too late but the handler did not enter the semaphore yet - we can
- * still stop it.
- */
- state.isCancelled = true;
- }
- }
-
- return ci;
- }
- catch (SocketTimeoutException ste)
- {
- throw ste;
- }
- catch (IOException e1)
- {
- /* This will also invoke any registered connection monitors */
- close(new Throwable("There was a problem during connect."), false);
-
- synchronized (state)
- {
- /* Show a clean exception, not something like "the socket is closed!?!" */
- if (state.timeoutSocketClosed)
- throw new SocketTimeoutException("The kexTimeout (" + kexTimeout + " ms) expired.");
- }
-
- /* Do not wrap a HTTPProxyException */
- if (e1 instanceof HTTPProxyException)
- throw e1;
-
- throw (IOException) new IOException("There was a problem while connecting to " + hostname + ":" + port)
- .initCause(e1);
- }
- }
-
- /**
- * Creates a new {@link LocalPortForwarder}.
- * A <code>LocalPortForwarder</code> forwards TCP/IP connections that arrive at a local
- * port via the secure tunnel to another host (which may or may not be
- * identical to the remote SSH-2 server).
- * <p>
- * This method must only be called after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent forwardings.
- *
- * @param local_port the local port the LocalPortForwarder shall bind to.
- * @param host_to_connect target address (IP or hostname)
- * @param port_to_connect target port
- * @return A {@link LocalPortForwarder} object.
- * @throws IOException
- */
- public synchronized LocalPortForwarder createLocalPortForwarder(int local_port, String host_to_connect,
- int port_to_connect) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward ports, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward ports, connection is not authenticated.");
-
- return new LocalPortForwarder(cm, local_port, host_to_connect, port_to_connect);
- }
-
- /**
- * Creates a new {@link LocalPortForwarder}.
- * A <code>LocalPortForwarder</code> forwards TCP/IP connections that arrive at a local
- * port via the secure tunnel to another host (which may or may not be
- * identical to the remote SSH-2 server).
- * <p>
- * This method must only be called after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent forwardings.
- *
- * @param addr specifies the InetSocketAddress where the local socket shall be bound to.
- * @param host_to_connect target address (IP or hostname)
- * @param port_to_connect target port
- * @return A {@link LocalPortForwarder} object.
- * @throws IOException
- */
- public synchronized LocalPortForwarder createLocalPortForwarder(InetSocketAddress addr, String host_to_connect,
- int port_to_connect) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward ports, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward ports, connection is not authenticated.");
-
- return new LocalPortForwarder(cm, addr, host_to_connect, port_to_connect);
- }
-
- /**
- * Creates a new {@link LocalStreamForwarder}.
- * A <code>LocalStreamForwarder</code> manages an Input/Outputstream pair
- * that is being forwarded via the secure tunnel into a TCP/IP connection to another host
- * (which may or may not be identical to the remote SSH-2 server).
- *
- * @param host_to_connect
- * @param port_to_connect
- * @return A {@link LocalStreamForwarder} object.
- * @throws IOException
- */
- public synchronized LocalStreamForwarder createLocalStreamForwarder(String host_to_connect, int port_to_connect)
- throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot forward, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot forward, connection is not authenticated.");
-
- return new LocalStreamForwarder(cm, host_to_connect, port_to_connect);
- }
-
- /**
- * Create a very basic {@link SCPClient} that can be used to copy
- * files from/to the SSH-2 server.
- * <p>
- * Works only after one has passed successfully the authentication step.
- * There is no limit on the number of concurrent SCP clients.
- * <p>
- * Note: This factory method will probably disappear in the future.
- *
- * @return A {@link SCPClient} object.
- * @throws IOException
- */
- public synchronized SCPClient createSCPClient() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot create SCP client, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot create SCP client, connection is not authenticated.");
-
- return new SCPClient(this);
- }
-
- /**
- * Force an asynchronous key re-exchange (the call does not block). The
- * latest values set for MAC, Cipher and DH group exchange parameters will
- * be used. If a key exchange is currently in progress, then this method has
- * the only effect that the so far specified parameters will be used for the
- * next (server driven) key exchange.
- * <p>
- * Note: This implementation will never start a key exchange (other than the initial one)
- * unless you or the SSH-2 server ask for it.
- *
- * @throws IOException
- * In case of any failure behind the scenes.
- */
- public synchronized void forceKeyExchange() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- tm.forceKeyExchange(cryptoWishList, dhgexpara, null, null);
- }
-
- /**
- * Returns the hostname that was passed to the constructor.
- *
- * @return the hostname
- */
- public synchronized String getHostname()
- {
- return hostname;
- }
-
- /**
- * Returns the port that was passed to the constructor.
- *
- * @return the TCP port
- */
- public synchronized int getPort()
- {
- return port;
- }
-
- /**
- * Returns a {@link ConnectionInfo} object containing the details of
- * the connection. Can be called as soon as the connection has been
- * established (successfully connected).
- *
- * @return A {@link ConnectionInfo} object.
- * @throws IOException
- * In case of any failure behind the scenes.
- */
- public synchronized ConnectionInfo getConnectionInfo() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException(
- "Cannot get details of connection, you need to establish a connection first.");
- return tm.getConnectionInfo(1);
- }
-
- /**
- * After a successful connect, one has to authenticate oneself. This method
- * can be used to tell which authentication methods are supported by the
- * server at a certain stage of the authentication process (for the given
- * username).
- * <p>
- * Note 1: the username will only be used if no authentication step was done
- * so far (it will be used to ask the server for a list of possible
- * authentication methods by sending the initial "none" request). Otherwise,
- * this method ignores the user name and returns a cached method list
- * (which is based on the information contained in the last negative server response).
- * <p>
- * Note 2: the server may return method names that are not supported by this
- * implementation.
- * <p>
- * After a successful authentication, this method must not be called
- * anymore.
- *
- * @param user
- * A <code>String</code> holding the username.
- *
- * @return a (possibly emtpy) array holding authentication method names.
- * @throws IOException
- */
- public synchronized String[] getRemainingAuthMethods(String user) throws IOException
- {
- if (user == null)
- throw new IllegalArgumentException("user argument may not be NULL!");
-
- if (tm == null)
- throw new IllegalStateException("Connection is not established!");
-
- if (authenticated)
- throw new IllegalStateException("Connection is already authenticated!");
-
- if (am == null)
- am = new AuthenticationManager(tm);
-
- if (cm == null)
- cm = new ChannelManager(tm);
-
- return am.getRemainingMethods(user);
- }
-
- /**
- * Determines if the authentication phase is complete. Can be called at any
- * time.
- *
- * @return <code>true</code> if no further authentication steps are
- * needed.
- */
- public synchronized boolean isAuthenticationComplete()
- {
- return authenticated;
- }
-
- /**
- * Returns true if there was at least one failed authentication request and
- * the last failed authentication request was marked with "partial success"
- * by the server. This is only needed in the rare case of SSH-2 server setups
- * that cannot be satisfied with a single successful authentication request
- * (i.e., multiple authentication steps are needed.)
- * <p>
- * If you are interested in the details, then have a look at RFC4252.
- *
- * @return if the there was a failed authentication step and the last one
- * was marked as a "partial success".
- */
- public synchronized boolean isAuthenticationPartialSuccess()
- {
- if (am == null)
- return false;
-
- return am.getPartialSuccess();
- }
-
- /**
- * Checks if a specified authentication method is available. This method is
- * actually just a wrapper for {@link #getRemainingAuthMethods(String)
- * getRemainingAuthMethods()}.
- *
- * @param user
- * A <code>String</code> holding the username.
- * @param method
- * An authentication method name (e.g., "publickey", "password",
- * "keyboard-interactive") as specified by the SSH-2 standard.
- * @return if the specified authentication method is currently available.
- * @throws IOException
- */
- public synchronized boolean isAuthMethodAvailable(String user, String method) throws IOException
- {
- if (method == null)
- throw new IllegalArgumentException("method argument may not be NULL!");
-
- String methods[] = getRemainingAuthMethods(user);
-
- for (int i = 0; i < methods.length; i++)
- {
- if (methods[i].compareTo(method) == 0)
- return true;
- }
-
- return false;
- }
-
- private SecureRandom getOrCreateSecureRND()
- {
- if (generator == null)
- generator = new SecureRandom();
-
- return generator;
- }
-
- /**
- * Open a new {@link Session} on this connection. Works only after one has passed
- * successfully the authentication step. There is no limit on the number of
- * concurrent sessions.
- *
- * @return A {@link Session} object.
- * @throws IOException
- */
- public synchronized Session openSession() throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("Cannot open session, you need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("Cannot open session, connection is not authenticated.");
-
- return new Session(cm, getOrCreateSecureRND());
- }
-
- /**
- * Send an SSH_MSG_IGNORE packet. This method will generate a random data attribute
- * (length between 0 (invlusive) and 16 (exclusive) bytes, contents are random bytes).
- * <p>
- * This method must only be called once the connection is established.
- *
- * @throws IOException
- */
- public synchronized void sendIgnorePacket() throws IOException
- {
- SecureRandom rnd = getOrCreateSecureRND();
-
- byte[] data = new byte[rnd.nextInt(16)];
- rnd.nextBytes(data);
-
- sendIgnorePacket(data);
- }
-
- /**
- * Send an SSH_MSG_IGNORE packet with the given data attribute.
- * <p>
- * This method must only be called once the connection is established.
- *
- * @throws IOException
- */
- public synchronized void sendIgnorePacket(byte[] data) throws IOException
- {
- if (data == null)
- throw new IllegalArgumentException("data argument must not be null.");
-
- if (tm == null)
- throw new IllegalStateException(
- "Cannot send SSH_MSG_IGNORE packet, you need to establish a connection first.");
-
- PacketIgnore pi = new PacketIgnore();
- pi.setData(data);
-
- tm.sendMessage(pi.getPayload());
- }
-
- /**
- * Removes duplicates from a String array, keeps only first occurence
- * of each element. Does not destroy order of elements; can handle nulls.
- * Uses a very efficient O(N^2) algorithm =)
- *
- * @param list a String array.
- * @return a cleaned String array.
- */
- private String[] removeDuplicates(String[] list)
- {
- if ((list == null) || (list.length < 2))
- return list;
-
- String[] list2 = new String[list.length];
-
- int count = 0;
-
- for (int i = 0; i < list.length; i++)
- {
- boolean duplicate = false;
-
- String element = list[i];
-
- for (int j = 0; j < count; j++)
- {
- if (((element == null) && (list2[j] == null)) || ((element != null) && (element.equals(list2[j]))))
- {
- duplicate = true;
- break;
- }
- }
-
- if (duplicate)
- continue;
-
- list2[count++] = list[i];
- }
-
- if (count == list2.length)
- return list2;
-
- String[] tmp = new String[count];
- System.arraycopy(list2, 0, tmp, 0, count);
-
- return tmp;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param ciphers
- */
- public synchronized void setClient2ServerCiphers(String[] ciphers)
- {
- if ((ciphers == null) || (ciphers.length == 0))
- throw new IllegalArgumentException();
- ciphers = removeDuplicates(ciphers);
- BlockCipherFactory.checkCipherList(ciphers);
- cryptoWishList.c2s_enc_algos = ciphers;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param macs
- */
- public synchronized void setClient2ServerMACs(String[] macs)
- {
- if ((macs == null) || (macs.length == 0))
- throw new IllegalArgumentException();
- macs = removeDuplicates(macs);
- MAC.checkMacList(macs);
- cryptoWishList.c2s_mac_algos = macs;
- }
-
- /**
- * Sets the parameters for the diffie-hellman group exchange. Unless you
- * know what you are doing, you will never need this. Default values are
- * defined in the {@link DHGexParameters} class.
- *
- * @param dgp {@link DHGexParameters}, non null.
- *
- */
- public synchronized void setDHGexParameters(DHGexParameters dgp)
- {
- if (dgp == null)
- throw new IllegalArgumentException();
-
- dhgexpara = dgp;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param ciphers
- */
- public synchronized void setServer2ClientCiphers(String[] ciphers)
- {
- if ((ciphers == null) || (ciphers.length == 0))
- throw new IllegalArgumentException();
- ciphers = removeDuplicates(ciphers);
- BlockCipherFactory.checkCipherList(ciphers);
- cryptoWishList.s2c_enc_algos = ciphers;
- }
-
- /**
- * Unless you know what you are doing, you will never need this.
- *
- * @param macs
- */
- public synchronized void setServer2ClientMACs(String[] macs)
- {
- if ((macs == null) || (macs.length == 0))
- throw new IllegalArgumentException();
-
- macs = removeDuplicates(macs);
- MAC.checkMacList(macs);
- cryptoWishList.s2c_mac_algos = macs;
- }
-
- /**
- * Define the set of allowed server host key algorithms to be used for
- * the following key exchange operations.
- * <p>
- * Unless you know what you are doing, you will never need this.
- *
- * @param algos An array of allowed server host key algorithms.
- * SSH-2 defines <code>ssh-dss</code> and <code>ssh-rsa</code>.
- * The entries of the array must be ordered after preference, i.e.,
- * the entry at index 0 is the most preferred one. You must specify
- * at least one entry.
- */
- public synchronized void setServerHostKeyAlgorithms(String[] algos)
- {
- if ((algos == null) || (algos.length == 0))
- throw new IllegalArgumentException();
-
- algos = removeDuplicates(algos);
- KexManager.checkServerHostkeyAlgorithmsList(algos);
- cryptoWishList.serverHostKeyAlgorithms = algos;
- }
-
- /**
- * Enable/disable TCP_NODELAY (disable/enable Nagle's algorithm) on the underlying socket.
- * <p>
- * Can be called at any time. If the connection has not yet been established
- * then the passed value will be stored and set after the socket has been set up.
- * The default value that will be used is <code>false</code>.
- *
- * @param enable the argument passed to the <code>Socket.setTCPNoDelay()</code> method.
- * @throws IOException
- */
- public synchronized void setTCPNoDelay(boolean enable) throws IOException
- {
- tcpNoDelay = enable;
-
- if (tm != null)
- tm.setTcpNoDelay(enable);
- }
-
- /**
- * Used to tell the library that the connection shall be established through a proxy server.
- * It only makes sense to call this method before calling the {@link #connect() connect()}
- * method.
- * <p>
- * At the moment, only HTTP proxies are supported.
- * <p>
- * Note: This method can be called any number of times. The {@link #connect() connect()}
- * method will use the value set in the last preceding invocation of this method.
- *
- * @see HTTPProxyData
- *
- * @param proxyData Connection information about the proxy. If <code>null</code>, then
- * no proxy will be used (non surprisingly, this is also the default).
- */
- public synchronized void setProxyData(ProxyData proxyData)
- {
- this.proxyData = proxyData;
- }
-
- /**
- * Request a remote port forwarding.
- * If successful, then forwarded connections will be redirected to the given target address.
- * You can cancle a requested remote port forwarding by calling
- * {@link #cancelRemotePortForwarding(int) cancelRemotePortForwarding()}.
- * <p>
- * A call of this method will block until the peer either agreed or disagreed to your request-
- * <p>
- * Note 1: this method typically fails if you
- * <ul>
- * <li>pass a port number for which the used remote user has not enough permissions (i.e., port
- * < 1024)</li>
- * <li>or pass a port number that is already in use on the remote server</li>
- * <li>or if remote port forwarding is disabled on the server.</li>
- * </ul>
- * <p>
- * Note 2: (from the openssh man page): By default, the listening socket on the server will be
- * bound to the loopback interface only. This may be overriden by specifying a bind address.
- * Specifying a remote bind address will only succeed if the server's <b>GatewayPorts</b> option
- * is enabled (see sshd_config(5)).
- *
- * @param bindAddress address to bind to on the server:
- * <ul>
- * <li>"" means that connections are to be accepted on all protocol families
- * supported by the SSH implementation</li>
- * <li>"0.0.0.0" means to listen on all IPv4 addresses</li>
- * <li>"::" means to listen on all IPv6 addresses</li>
- * <li>"localhost" means to listen on all protocol families supported by the SSH
- * implementation on loopback addresses only, [RFC3330] and RFC3513]</li>
- * <li>"127.0.0.1" and "::1" indicate listening on the loopback interfaces for
- * IPv4 and IPv6 respectively</li>
- * </ul>
- * @param bindPort port number to bind on the server (must be > 0)
- * @param targetAddress the target address (IP or hostname)
- * @param targetPort the target port
- * @throws IOException
- */
- public synchronized void requestRemotePortForwarding(String bindAddress, int bindPort, String targetAddress,
- int targetPort) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("The connection is not authenticated.");
-
- if ((bindAddress == null) || (targetAddress == null) || (bindPort <= 0) || (targetPort <= 0))
- throw new IllegalArgumentException();
-
- cm.requestGlobalForward(bindAddress, bindPort, targetAddress, targetPort);
- }
-
- /**
- * Cancel an earlier requested remote port forwarding.
- * Currently active forwardings will not be affected (e.g., disrupted).
- * Note that further connection forwarding requests may be received until
- * this method has returned.
- *
- * @param bindPort the allocated port number on the server
- * @throws IOException if the remote side refuses the cancel request or another low
- * level error occurs (e.g., the underlying connection is closed)
- */
- public synchronized void cancelRemotePortForwarding(int bindPort) throws IOException
- {
- if (tm == null)
- throw new IllegalStateException("You need to establish a connection first.");
-
- if (!authenticated)
- throw new IllegalStateException("The connection is not authenticated.");
-
- cm.requestCancelGlobalForward(bindPort);
- }
-
- /**
- * Provide your own instance of SecureRandom. Can be used, e.g., if you
- * want to seed the used SecureRandom generator manually.
- * <p>
- * The SecureRandom instance is used during key exchanges, public key authentication,
- * x11 cookie generation and the like.
- *
- * @param rnd a SecureRandom instance
- */
- public synchronized void setSecureRandom(SecureRandom rnd)
- {
- if (rnd == null)
- throw new IllegalArgumentException();
-
- this.generator = rnd;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2006-2013 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2.channel;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.ChannelCondition;
-import ch.ethz.ssh2.PtySettings;
-import ch.ethz.ssh2.ServerConnectionCallback;
-import ch.ethz.ssh2.ServerSessionCallback;
-import ch.ethz.ssh2.log.Logger;
-import ch.ethz.ssh2.packets.PacketChannelFailure;
-import ch.ethz.ssh2.packets.PacketChannelOpenConfirmation;
-import ch.ethz.ssh2.packets.PacketChannelOpenFailure;
-import ch.ethz.ssh2.packets.PacketChannelSuccess;
-import ch.ethz.ssh2.packets.PacketGlobalCancelForwardRequest;
-import ch.ethz.ssh2.packets.PacketGlobalForwardRequest;
-import ch.ethz.ssh2.packets.PacketOpenDirectTCPIPChannel;
-import ch.ethz.ssh2.packets.PacketOpenSessionChannel;
-import ch.ethz.ssh2.packets.PacketSessionExecCommand;
-import ch.ethz.ssh2.packets.PacketSessionPtyRequest;
-import ch.ethz.ssh2.packets.PacketSessionStartShell;
-import ch.ethz.ssh2.packets.PacketSessionSubsystemRequest;
-import ch.ethz.ssh2.packets.PacketSessionX11Request;
-import ch.ethz.ssh2.packets.Packets;
-import ch.ethz.ssh2.packets.TypesReader;
-import ch.ethz.ssh2.server.ServerConnectionState;
-import ch.ethz.ssh2.transport.MessageHandler;
-import ch.ethz.ssh2.transport.TransportManager;
-
-/**
- * ChannelManager. Please read the comments in Channel.java.
- * <p/>
- * Besides the crypto part, this is the core of the library.
- *
- * @author Christian Plattner
- * @version $Id: ChannelManager.java 48 2013-08-01 12:22:33Z cleondris@gmail.com $
- */
-public class ChannelManager implements MessageHandler
-{
- private static final Logger log = Logger.getLogger(ChannelManager.class);
-
- private final ServerConnectionState server_state;
- private final TransportManager tm;
-
- private final HashMap<String, X11ServerData> x11_magic_cookies = new HashMap<String, X11ServerData>();
-
- private final List<Channel> channels = new Vector<Channel>();
- private int nextLocalChannel = 100;
- private boolean shutdown = false;
- private int globalSuccessCounter = 0;
- private int globalFailedCounter = 0;
-
- private final HashMap<Integer, RemoteForwardingData> remoteForwardings = new HashMap<Integer, RemoteForwardingData>();
-
- private final List<IChannelWorkerThread> listenerThreads = new Vector<IChannelWorkerThread>();
-
- private boolean listenerThreadsAllowed = true;
-
- /**
- * Constructor for client-mode.
- * @param tm
- */
- public ChannelManager(TransportManager tm)
- {
- this.server_state = null;
- this.tm = tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- /**
- * Constructor for server-mode.
- * @param state
- */
- public ChannelManager(ServerConnectionState state)
- {
- this.server_state = state;
- this.tm = state.tm;
- tm.registerMessageHandler(this, 80, 100);
- }
-
- private Channel getChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- return c;
- }
- }
- return null;
- }
-
- private void removeChannel(int id)
- {
- synchronized (channels)
- {
- for (Channel c : channels)
- {
- if (c.localID == id)
- {
- channels.remove(c);
- break;
- }
- }
- }
- }
-
- private int addChannel(Channel c)
- {
- synchronized (channels)
- {
- channels.add(c);
- return nextLocalChannel++;
- }
- }
-
- private void waitUntilChannelOpen(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- synchronized (c)
- {
- while (c.state == Channel.STATE_OPENING)
- {
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.state != Channel.STATE_OPEN)
- {
- removeChannel(c.localID);
-
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("Could not open channel (" + detail + ")");
- }
- }
-
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
-
- private void waitForGlobalSuccessOrFailure() throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (channels)
- {
- while ((globalSuccessCounter == 0) && (globalFailedCounter == 0))
- {
- if (shutdown)
- {
- throw new IOException("The connection is being shutdown");
- }
-
- try
- {
- channels.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (globalFailedCounter != 0)
- {
- throw new IOException("The server denied the request (did you enable port forwarding?)");
- }
-
- if (globalSuccessCounter == 0)
- {
- throw new IOException("Illegal state.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- private void waitForChannelSuccessOrFailure(Channel c) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (c)
- {
- while ((c.successCounter == 0) && (c.failedCounter == 0))
- {
- if (c.state != Channel.STATE_OPEN)
- {
- String detail = c.getReasonClosed();
-
- if (detail == null)
- detail = "state: " + c.state;
-
- throw new IOException("This SSH2 channel is not open (" + detail + ")");
- }
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- if (c.failedCounter != 0)
- {
- throw new IOException("The server denied the request.");
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public void registerX11Cookie(String hexFakeCookie, X11ServerData data)
- {
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.put(hexFakeCookie, data);
- }
- }
-
- public void unRegisterX11Cookie(String hexFakeCookie, boolean killChannels)
- {
- if (hexFakeCookie == null)
- throw new IllegalStateException("hexFakeCookie may not be null");
-
- synchronized (x11_magic_cookies)
- {
- x11_magic_cookies.remove(hexFakeCookie);
- }
-
- if (killChannels == false)
- return;
-
- log.debug("Closing all X11 channels for the given fake cookie");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- synchronized (c)
- {
- if (hexFakeCookie.equals(c.hexX11FakeCookie) == false)
- continue;
- }
-
- try
- {
- closeChannel(c, "Closing X11 channel since the corresponding session is closing", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public X11ServerData checkX11Cookie(String hexFakeCookie)
- {
- synchronized (x11_magic_cookies)
- {
- if (hexFakeCookie != null)
- return x11_magic_cookies.get(hexFakeCookie);
- }
- return null;
- }
-
- public void closeAllChannels()
- {
- log.debug("Closing all channels");
-
- List<Channel> channel_copy = new Vector<Channel>();
-
- synchronized (channels)
- {
- channel_copy.addAll(channels);
- }
-
- for (Channel c : channel_copy)
- {
- try
- {
- closeChannel(c, "Closing all channels", true);
- }
- catch (IOException ignored)
- {
- }
- }
- }
-
- public void closeChannel(Channel c, String reason, boolean force) throws IOException
- {
- byte msg[] = new byte[5];
-
- synchronized (c)
- {
- if (force)
- {
- c.state = Channel.STATE_CLOSED;
- c.EOF = true;
- }
-
- c.setReasonClosed(reason);
-
- msg[0] = Packets.SSH_MSG_CHANNEL_CLOSE;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
-
- c.notifyAll();
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- c.closeMessageSent = true;
- }
-
- log.debug("Sent SSH_MSG_CHANNEL_CLOSE (channel " + c.localID + ")");
- }
-
- public void sendEOF(Channel c) throws IOException
- {
- byte[] msg = new byte[5];
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- return;
-
- msg[0] = Packets.SSH_MSG_CHANNEL_EOF;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(msg);
- }
-
-
- log.debug("Sent EOF (Channel " + c.localID + "/" + c.remoteID + ")");
- }
-
- public void sendOpenConfirmation(Channel c) throws IOException
- {
- PacketChannelOpenConfirmation pcoc = null;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- return;
-
- c.state = Channel.STATE_OPEN;
-
- pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID, c.localWindow, c.localMaxPacketSize);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- return;
- tm.sendMessage(pcoc.getPayload());
- }
- }
-
- public void sendData(Channel c, byte[] buffer, int pos, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- while (len > 0)
- {
- int thislen = 0;
- byte[] msg;
-
- synchronized (c)
- {
- while (true)
- {
- if (c.state == Channel.STATE_CLOSED)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- if (c.state != Channel.STATE_OPEN)
- throw new ChannelClosedException("SSH channel in strange state. (" + c.state + ")");
-
- if (c.remoteWindow != 0)
- break;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* len > 0, no sign extension can happen when comparing */
-
- thislen = (c.remoteWindow >= len) ? len : (int) c.remoteWindow;
-
- int estimatedMaxDataLen = c.remoteMaxPacketSize - (tm.getPacketOverheadEstimate() + 9);
-
- /* The worst case scenario =) a true bottleneck */
-
- if (estimatedMaxDataLen <= 0)
- {
- estimatedMaxDataLen = 1;
- }
-
- if (thislen > estimatedMaxDataLen)
- thislen = estimatedMaxDataLen;
-
- c.remoteWindow -= thislen;
-
- msg = new byte[1 + 8 + thislen];
-
- msg[0] = Packets.SSH_MSG_CHANNEL_DATA;
- msg[1] = (byte) (c.remoteID >> 24);
- msg[2] = (byte) (c.remoteID >> 16);
- msg[3] = (byte) (c.remoteID >> 8);
- msg[4] = (byte) (c.remoteID);
- msg[5] = (byte) (thislen >> 24);
- msg[6] = (byte) (thislen >> 16);
- msg[7] = (byte) (thislen >> 8);
- msg[8] = (byte) (thislen);
-
- System.arraycopy(buffer, pos, msg, 9, thislen);
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent == true)
- throw new ChannelClosedException("SSH channel is closed. (" + c.getReasonClosed() + ")");
-
- tm.sendMessage(msg);
- }
-
- pos += thislen;
- len -= thislen;
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int requestGlobalForward(String bindAddress, int bindPort, String targetAddress, int targetPort)
- throws IOException
- {
- RemoteForwardingData rfd = new RemoteForwardingData();
-
- rfd.bindAddress = bindAddress;
- rfd.bindPort = bindPort;
- rfd.targetAddress = targetAddress;
- rfd.targetPort = targetPort;
-
- synchronized (remoteForwardings)
- {
- Integer key = new Integer(bindPort);
-
- if (remoteForwardings.get(key) != null)
- {
- throw new IOException("There is already a forwarding for remote port " + bindPort);
- }
-
- remoteForwardings.put(key, rfd);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalForwardRequest pgf = new PacketGlobalForwardRequest(true, bindAddress, bindPort);
- tm.sendMessage(pgf.getPayload());
-
- log.debug("Requesting a remote forwarding ('" + bindAddress + "', " + bindPort + ")");
-
- try
- {
- waitForGlobalSuccessOrFailure();
- }
- catch (IOException e)
- {
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- throw e;
- }
-
- return bindPort;
- }
-
- public void requestCancelGlobalForward(int bindPort) throws IOException
- {
- RemoteForwardingData rfd = null;
-
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(bindPort));
-
- if (rfd == null)
- throw new IOException("Sorry, there is no known remote forwarding for remote port " + bindPort);
- }
-
- synchronized (channels)
- {
- globalSuccessCounter = globalFailedCounter = 0;
- }
-
- PacketGlobalCancelForwardRequest pgcf = new PacketGlobalCancelForwardRequest(true, rfd.bindAddress,
- rfd.bindPort);
- tm.sendMessage(pgcf.getPayload());
-
- log.debug("Requesting cancelation of remote forward ('" + rfd.bindAddress + "', " + rfd.bindPort + ")");
-
- waitForGlobalSuccessOrFailure();
-
- /* Only now we are sure that no more forwarded connections will arrive */
-
- synchronized (remoteForwardings)
- {
- remoteForwardings.remove(rfd);
- }
- }
-
- public void registerThread(IChannelWorkerThread thr) throws IOException
- {
- synchronized (listenerThreads)
- {
- if (listenerThreadsAllowed == false)
- throw new IOException("Too late, this connection is closed.");
- listenerThreads.add(thr);
- }
- }
-
- public Channel openDirectTCPIPChannel(String host_to_connect, int port_to_connect, String originator_IP_address,
- int originator_port) throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces writing out to main memory
- }
-
- PacketOpenDirectTCPIPChannel dtc = new PacketOpenDirectTCPIPChannel(c.localID, c.localWindow,
- c.localMaxPacketSize, host_to_connect, port_to_connect, originator_IP_address, originator_port);
-
- tm.sendMessage(dtc.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public Channel openSessionChannel() throws IOException
- {
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.localID = addChannel(c);
- // end of synchronized block forces the writing out to main memory
- }
-
- log.debug("Sending SSH_MSG_CHANNEL_OPEN (Channel " + c.localID + ")");
-
- PacketOpenSessionChannel smo = new PacketOpenSessionChannel(c.localID, c.localWindow, c.localMaxPacketSize);
- tm.sendMessage(smo.getPayload());
-
- waitUntilChannelOpen(c);
-
- return c;
- }
-
- public void requestPTY(Channel c, String term, int term_width_characters, int term_height_characters,
- int term_width_pixels, int term_height_pixels, byte[] terminal_modes) throws IOException
- {
- PacketSessionPtyRequest spr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
-
- spr = new PacketSessionPtyRequest(c.remoteID, true, term, term_width_characters, term_height_characters,
- term_width_pixels, term_height_pixels, terminal_modes);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request PTY on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(spr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("PTY request failed").initCause(e);
- }
- }
-
- public void requestX11(Channel c, boolean singleConnection, String x11AuthenticationProtocol,
- String x11AuthenticationCookie, int x11ScreenNumber) throws IOException
- {
- PacketSessionX11Request psr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
-
- psr = new PacketSessionX11Request(c.remoteID, true, singleConnection, x11AuthenticationProtocol,
- x11AuthenticationCookie, x11ScreenNumber);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request X11 on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(psr.getPayload());
- }
-
- log.debug("Requesting X11 forwarding (Channel " + c.localID + "/" + c.remoteID + ")");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The X11 request failed.").initCause(e);
- }
- }
-
- public void requestSubSystem(Channel c, String subSystemName) throws IOException
- {
- PacketSessionSubsystemRequest ssr;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
-
- ssr = new PacketSessionSubsystemRequest(c.remoteID, true, subSystemName);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot request subsystem on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(ssr.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The subsystem request failed.").initCause(e);
- }
- }
-
- public void requestExecCommand(Channel c, String cmd) throws IOException
- {
- this.requestExecCommand(c, cmd, null);
- }
-
- /**
- * @param charsetName The charset used to convert between Java Unicode Strings and byte encodings
- */
- public void requestExecCommand(Channel c, String cmd, String charsetName) throws IOException
- {
- PacketSessionExecCommand sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionExecCommand(c.remoteID, true, cmd);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot execute command on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload(charsetName));
- }
-
- log.debug("Executing command (channel " + c.localID + ", '" + cmd + "')");
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The execute request failed.").initCause(e);
- }
- }
-
- public void requestShell(Channel c) throws IOException
- {
- PacketSessionStartShell sm;
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
-
- sm = new PacketSessionStartShell(c.remoteID, true);
-
- c.successCounter = c.failedCounter = 0;
- }
-
- synchronized (c.channelSendLock)
- {
- if (c.closeMessageSent)
- throw new IOException("Cannot start shell on this channel (" + c.getReasonClosed() + ")");
- tm.sendMessage(sm.getPayload());
- }
-
- try
- {
- waitForChannelSuccessOrFailure(c);
- }
- catch (IOException e)
- {
- throw (IOException) new IOException("The shell request failed.").initCause(e);
- }
- }
-
- public void msgChannelExtendedData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 13)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int dataType = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
- int len = ((msg[9] & 0xff) << 24) | ((msg[10] & 0xff) << 16) | ((msg[11] & 0xff) << 8) | (msg[12] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EXTENDED_DATA message for non-existent channel " + id);
-
- if (dataType != Packets.SSH_EXTENDED_DATA_STDERR)
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has unknown type (" + dataType + ")");
-
- if (len != (msglen - 13))
- throw new IOException("SSH_MSG_CHANNEL_EXTENDED_DATA message has wrong len (calculated " + (msglen - 13)
- + ", got " + len + ")");
-
- log.debug("Got SSH_MSG_CHANNEL_EXTENDED_DATA (channel " + id + ", " + len + ")");
-
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
-
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_EXTENDED_DATA, but channel is not in correct state ("
- + c.state + ")");
-
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
-
- c.localWindow -= len;
-
- System.arraycopy(msg, 13, c.stderrBuffer, c.stderrWritepos, len);
- c.stderrWritepos += len;
-
- c.notifyAll();
- }
- }
-
- /**
- * Wait until for a condition.
- *
- * @param c Channel
- * @param timeout in ms, 0 means no timeout.
- * @param condition_mask minimum event mask (at least one of the conditions must be fulfilled)
- * @return all current events
- */
- public int waitForCondition(Channel c, long timeout, int condition_mask)
- {
- boolean wasInterrupted = false;
-
- try
- {
- long end_time = 0;
- boolean end_time_set = false;
-
- synchronized (c)
- {
- while (true)
- {
- int current_cond = 0;
-
- int stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- int stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if (stdoutAvail > 0)
- current_cond = current_cond | ChannelCondition.STDOUT_DATA;
-
- if (stderrAvail > 0)
- current_cond = current_cond | ChannelCondition.STDERR_DATA;
-
- if (c.EOF)
- current_cond = current_cond | ChannelCondition.EOF;
-
- if (c.getExitStatus() != null)
- current_cond = current_cond | ChannelCondition.EXIT_STATUS;
-
- if (c.getExitSignal() != null)
- current_cond = current_cond | ChannelCondition.EXIT_SIGNAL;
-
- if (c.state == Channel.STATE_CLOSED)
- return current_cond | ChannelCondition.CLOSED | ChannelCondition.EOF;
-
- if ((current_cond & condition_mask) != 0)
- return current_cond;
-
- if (timeout > 0)
- {
- if (!end_time_set)
- {
- end_time = System.currentTimeMillis() + timeout;
- end_time_set = true;
- }
- else
- {
- timeout = end_time - System.currentTimeMillis();
-
- if (timeout <= 0)
- return current_cond | ChannelCondition.TIMEOUT;
- }
- }
-
- try
- {
- if (timeout > 0)
- c.wait(timeout);
- else
- c.wait();
- }
- catch (InterruptedException e)
- {
- wasInterrupted = true;
- }
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public int getAvailable(Channel c, boolean extended) throws IOException
- {
- synchronized (c)
- {
- int avail;
-
- if (extended)
- avail = c.stderrWritepos - c.stderrReadpos;
- else
- avail = c.stdoutWritepos - c.stdoutReadpos;
-
- return ((avail > 0) ? avail : (c.EOF ? -1 : 0));
- }
- }
-
- public int getChannelData(Channel c, boolean extended, byte[] target, int off, int len) throws IOException
- {
- boolean wasInterrupted = false;
-
- try
- {
- int copylen = 0;
- int increment = 0;
- int remoteID = 0;
- int localID = 0;
-
- synchronized (c)
- {
- int stdoutAvail = 0;
- int stderrAvail = 0;
-
- while (true)
- {
- /*
- * Data available? We have to return remaining data even if the
- * channel is already closed.
- */
-
- stdoutAvail = c.stdoutWritepos - c.stdoutReadpos;
- stderrAvail = c.stderrWritepos - c.stderrReadpos;
-
- if ((!extended) && (stdoutAvail != 0))
- break;
-
- if ((extended) && (stderrAvail != 0))
- break;
-
- /* Do not wait if more data will never arrive (EOF or CLOSED) */
-
- if ((c.EOF) || (c.state != Channel.STATE_OPEN))
- return -1;
-
- try
- {
- c.wait();
- }
- catch (InterruptedException ignore)
- {
- wasInterrupted = true;
- }
- }
-
- /* OK, there is some data. Return it. */
-
- if (!extended)
- {
- copylen = (stdoutAvail > len) ? len : stdoutAvail;
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, target, off, copylen);
- c.stdoutReadpos += copylen;
-
- if (c.stdoutReadpos != c.stdoutWritepos)
-
- System.arraycopy(c.stdoutBuffer, c.stdoutReadpos, c.stdoutBuffer, 0, c.stdoutWritepos
- - c.stdoutReadpos);
-
- c.stdoutWritepos -= c.stdoutReadpos;
- c.stdoutReadpos = 0;
- }
- else
- {
- copylen = (stderrAvail > len) ? len : stderrAvail;
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, target, off, copylen);
- c.stderrReadpos += copylen;
-
- if (c.stderrReadpos != c.stderrWritepos)
-
- System.arraycopy(c.stderrBuffer, c.stderrReadpos, c.stderrBuffer, 0, c.stderrWritepos
- - c.stderrReadpos);
-
- c.stderrWritepos -= c.stderrReadpos;
- c.stderrReadpos = 0;
- }
-
- if (c.state != Channel.STATE_OPEN)
- return copylen;
-
- if (c.localWindow < ((Channel.CHANNEL_BUFFER_SIZE + 1) / 2))
- {
- int minFreeSpace = Math.min(Channel.CHANNEL_BUFFER_SIZE - c.stdoutWritepos,
- Channel.CHANNEL_BUFFER_SIZE - c.stderrWritepos);
-
- increment = minFreeSpace - c.localWindow;
- c.localWindow = minFreeSpace;
- }
-
- remoteID = c.remoteID; /* read while holding the lock */
- localID = c.localID; /* read while holding the lock */
- }
-
- /*
- * If a consumer reads stdout and stdin in parallel, we may end up with
- * sending two msgWindowAdjust messages. Luckily, it
- * does not matter in which order they arrive at the server.
- */
-
- if (increment > 0)
- {
- log.debug("Sending SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + localID + ", " + increment + ")");
-
- synchronized (c.channelSendLock)
- {
- byte[] msg = c.msgWindowAdjust;
-
- msg[0] = Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST;
- msg[1] = (byte) (remoteID >> 24);
- msg[2] = (byte) (remoteID >> 16);
- msg[3] = (byte) (remoteID >> 8);
- msg[4] = (byte) (remoteID);
- msg[5] = (byte) (increment >> 24);
- msg[6] = (byte) (increment >> 16);
- msg[7] = (byte) (increment >> 8);
- msg[8] = (byte) (increment);
-
- if (c.closeMessageSent == false)
- tm.sendMessage(msg);
- }
- }
-
- return copylen;
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
-
- }
-
- public void msgChannelData(byte[] msg, int msglen) throws IOException
- {
- if (msglen <= 9)
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int len = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_DATA message for non-existent channel " + id);
-
- if (len != (msglen - 9))
- throw new IOException("SSH_MSG_CHANNEL_DATA message has wrong len (calculated " + (msglen - 9) + ", got "
- + len + ")");
-
- log.debug("Got SSH_MSG_CHANNEL_DATA (channel " + id + ", " + len + ")");
-
- synchronized (c)
- {
- if (c.state == Channel.STATE_CLOSED)
- return; // ignore
-
- if (c.state != Channel.STATE_OPEN)
- throw new IOException("Got SSH_MSG_CHANNEL_DATA, but channel is not in correct state (" + c.state + ")");
-
- if (c.localWindow < len)
- throw new IOException("Remote sent too much data, does not fit into window.");
-
- c.localWindow -= len;
-
- System.arraycopy(msg, 9, c.stdoutBuffer, c.stdoutWritepos, len);
- c.stdoutWritepos += len;
-
- c.notifyAll();
- }
- }
-
- public void msgChannelWindowAdjust(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 9)
- throw new IOException("SSH_MSG_CHANNEL_WINDOW_ADJUST message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
- int windowChange = ((msg[5] & 0xff) << 24) | ((msg[6] & 0xff) << 16) | ((msg[7] & 0xff) << 8) | (msg[8] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_WINDOW_ADJUST message for non-existent channel " + id);
-
- synchronized (c)
- {
- final long huge = 0xFFFFffffL; /* 2^32 - 1 */
-
- c.remoteWindow += (windowChange & huge); /* avoid sign extension */
-
- /* TODO - is this a good heuristic? */
-
- if ((c.remoteWindow > huge))
- c.remoteWindow = huge;
-
- c.notifyAll();
- }
-
-
- log.debug("Got SSH_MSG_CHANNEL_WINDOW_ADJUST (channel " + id + ", " + windowChange + ")");
- }
-
- public void msgChannelOpen(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- String channelType = tr.readString();
- int remoteID = tr.readUINT32(); /* sender channel */
- int remoteWindow = tr.readUINT32(); /* initial window size */
- int remoteMaxPacketSize = tr.readUINT32(); /* maximum packet size */
-
- if ("x11".equals(channelType))
- {
- synchronized (x11_magic_cookies)
- {
- /* If we did not request X11 forwarding, then simply ignore this bogus request. */
-
- if (x11_magic_cookies.size() == 0)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED, "X11 forwarding not activated", "");
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
- log.warning("Unexpected X11 request, denying it!");
-
- return;
- }
- }
-
- String remoteOriginatorAddress = tr.readString();
- int remoteOriginatorPort = tr.readUINT32();
-
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* properly convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
-
- /*
- * The open confirmation message will be sent from another thread
- */
-
- RemoteX11AcceptThread rxat = new RemoteX11AcceptThread(c, remoteOriginatorAddress, remoteOriginatorPort);
- rxat.setDaemon(true);
- rxat.start();
-
- return;
- }
-
- if ("forwarded-tcpip".equals(channelType))
- {
- String remoteConnectedAddress = tr.readString(); /* address that was connected */
- int remoteConnectedPort = tr.readUINT32(); /* port that was connected */
- String remoteOriginatorAddress = tr.readString(); /* originator IP address */
- int remoteOriginatorPort = tr.readUINT32(); /* originator port */
-
- RemoteForwardingData rfd = null;
-
- synchronized (remoteForwardings)
- {
- rfd = remoteForwardings.get(new Integer(remoteConnectedPort));
- }
-
- if (rfd == null)
- {
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID,
- Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "No thanks, unknown port in forwarded-tcpip request", "");
-
- /* Always try to be polite. */
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
- log.debug("Unexpected forwarded-tcpip request, denying it!");
-
- return;
- }
-
- Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- }
-
- /*
- * The open confirmation message will be sent from another thread.
- */
-
- RemoteAcceptThread rat = new RemoteAcceptThread(c, remoteConnectedAddress, remoteConnectedPort,
- remoteOriginatorAddress, remoteOriginatorPort, rfd.targetAddress, rfd.targetPort);
-
- rat.setDaemon(true);
- rat.start();
-
- return;
- }
-
- if ((server_state != null) && ("session".equals(channelType)))
- {
- ServerConnectionCallback cb = null;
-
- synchronized (server_state)
- {
- cb = server_state.cb_conn;
- }
-
- if (cb == null)
- {
- tm.sendAsynchronousMessage(new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_ADMINISTRATIVELY_PROHIBITED,
- "Sessions are currently not enabled", "en").getPayload());
-
- return;
- }
-
- final Channel c = new Channel(this);
-
- synchronized (c)
- {
- c.remoteID = remoteID;
- c.remoteWindow = remoteWindow & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = remoteMaxPacketSize;
- c.localID = addChannel(c);
- c.state = Channel.STATE_OPEN;
- c.ss = new ServerSessionImpl(c);
- }
-
- PacketChannelOpenConfirmation pcoc = new PacketChannelOpenConfirmation(c.remoteID, c.localID,
- c.localWindow, c.localMaxPacketSize);
-
- tm.sendAsynchronousMessage(pcoc.getPayload());
-
- c.ss.sscb = cb.acceptSession(c.ss);
-
- return;
- }
-
- /* Tell the server that we have no idea what it is talking about */
-
- PacketChannelOpenFailure pcof = new PacketChannelOpenFailure(remoteID, Packets.SSH_OPEN_UNKNOWN_CHANNEL_TYPE,
- "Unknown channel type", "");
-
- tm.sendAsynchronousMessage(pcof.getPayload());
-
-
- log.warning("The peer tried to open an unsupported channel type (" + channelType + ")");
- }
-
- /* Starts the given runnable in a foreground (non-daemon) thread */
- private void runAsync(Runnable r)
- {
- Thread t = new Thread(r);
- t.start();
- }
-
- public void msgChannelRequest(byte[] msg, int msglen) throws IOException
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- int id = tr.readUINT32();
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_REQUEST message for non-existent channel " + id);
-
- ServerSessionImpl server_session = null;
-
- if (server_state != null)
- {
- synchronized (c)
- {
- server_session = c.ss;
- }
- }
-
- String type = tr.readString("US-ASCII");
- boolean wantReply = tr.readBoolean();
-
- log.debug("Got SSH_MSG_CHANNEL_REQUEST (channel " + id + ", '" + type + "')");
-
- if (type.equals("exit-status"))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-status message, 'want reply' is true");
-
- int exit_status = tr.readUINT32();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_status = new Integer(exit_status);
- c.notifyAll();
- }
-
- log.debug("Got EXIT STATUS (channel " + id + ", status " + exit_status + ")");
-
- return;
- }
-
- if ((server_state == null) && (type.equals("exit-signal")))
- {
- if (wantReply != false)
- throw new IOException(
- "Badly formatted SSH_MSG_CHANNEL_REQUEST exit-signal message, 'want reply' is true");
-
- String signame = tr.readString("US-ASCII");
- tr.readBoolean();
- tr.readString();
- tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- synchronized (c)
- {
- c.exit_signal = signame;
- c.notifyAll();
- }
-
- log.debug("Got EXIT SIGNAL (channel " + id + ", signal " + signame + ")");
-
- return;
- }
-
- if ((server_session != null) && (type.equals("pty-req")))
- {
- PtySettings pty = new PtySettings();
-
- pty.term = tr.readString();
- pty.term_width_characters = tr.readUINT32();
- pty.term_height_characters = tr.readUINT32();
- pty.term_width_pixels = tr.readUINT32();
- pty.term_height_pixels = tr.readUINT32();
- pty.terminal_modes = tr.readByteString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
-
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestPtyReq(server_session, pty);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("subsystem")))
- {
- String command = tr.readString();
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestSubsystem(server_session, command);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("shell")))
- {
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestShell(server_session);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- if ((server_session != null) && (type.equals("exec")))
- {
- String command = tr.readString();
-
- if (tr.remain() != 0)
- throw new IOException("Badly formatted SSH_MSG_CHANNEL_REQUEST message");
-
- Runnable run_after_sending_success = null;
- ServerSessionCallback sscb = server_session.getServerSessionCallback();
-
- if (sscb != null)
- run_after_sending_success = sscb.requestExec(server_session, command);
-
- if (wantReply)
- {
- if (run_after_sending_success != null)
- {
- tm.sendAsynchronousMessage(new PacketChannelSuccess(c.remoteID).getPayload());
- }
- else
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
- }
-
- if (run_after_sending_success != null)
- {
- runAsync(run_after_sending_success);
- }
-
- return;
- }
-
- /* We simply ignore unknown channel requests, however, if the server wants a reply,
- * then we signal that we have no idea what it is about.
- */
-
- if (wantReply)
- {
- tm.sendAsynchronousMessage(new PacketChannelFailure(c.remoteID).getPayload());
- }
-
- log.debug("Channel request '" + type + "' is not known, ignoring it");
- }
-
- public void msgChannelEOF(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_EOF message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_EOF message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_EOF (channel " + id + ")");
- }
-
- public void msgChannelClose(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_CLOSE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_CLOSE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("Close requested by remote");
- c.closeMessageRecv = true;
-
- removeChannel(c.localID);
-
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_CLOSE (channel " + id + ")");
- }
-
- public void msgChannelSuccess(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_SUCCESS message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_SUCCESS message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.successCounter++;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_SUCCESS (channel " + id + ")");
- }
-
- public void msgChannelFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen != 5)
- throw new IOException("SSH_MSG_CHANNEL_FAILURE message has wrong size (" + msglen + ")");
-
- int id = ((msg[1] & 0xff) << 24) | ((msg[2] & 0xff) << 16) | ((msg[3] & 0xff) << 8) | (msg[4] & 0xff);
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_FAILURE message for non-existent channel " + id);
-
- synchronized (c)
- {
- c.failedCounter++;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_FAILURE (channel " + id + ")");
- }
-
- public void msgChannelOpenConfirmation(byte[] msg, int msglen) throws IOException
- {
- PacketChannelOpenConfirmation sm = new PacketChannelOpenConfirmation(msg, 0, msglen);
-
- Channel c = getChannel(sm.recipientChannelID);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for non-existent channel "
- + sm.recipientChannelID);
-
- synchronized (c)
- {
- if (c.state != Channel.STATE_OPENING)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_CONFIRMATION message for channel "
- + sm.recipientChannelID);
-
- c.remoteID = sm.senderChannelID;
- c.remoteWindow = sm.initialWindowSize & 0xFFFFffffL; /* convert UINT32 to long */
- c.remoteMaxPacketSize = sm.maxPacketSize;
- c.state = Channel.STATE_OPEN;
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_OPEN_CONFIRMATION (channel " + sm.recipientChannelID + " / remote: "
- + sm.senderChannelID + ")");
- }
-
- public void msgChannelOpenFailure(byte[] msg, int msglen) throws IOException
- {
- if (msglen < 5)
- throw new IOException("SSH_MSG_CHANNEL_OPEN_FAILURE message has wrong size (" + msglen + ")");
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- int id = tr.readUINT32(); /* sender channel */
-
- Channel c = getChannel(id);
-
- if (c == null)
- throw new IOException("Unexpected SSH_MSG_CHANNEL_OPEN_FAILURE message for non-existent channel " + id);
-
- int reasonCode = tr.readUINT32();
- String description = tr.readString("UTF-8");
-
- String reasonCodeSymbolicName = null;
-
- switch (reasonCode)
- {
- case 1:
- reasonCodeSymbolicName = "SSH_OPEN_ADMINISTRATIVELY_PROHIBITED";
- break;
- case 2:
- reasonCodeSymbolicName = "SSH_OPEN_CONNECT_FAILED";
- break;
- case 3:
- reasonCodeSymbolicName = "SSH_OPEN_UNKNOWN_CHANNEL_TYPE";
- break;
- case 4:
- reasonCodeSymbolicName = "SSH_OPEN_RESOURCE_SHORTAGE";
- break;
- default:
- reasonCodeSymbolicName = "UNKNOWN REASON CODE (" + reasonCode + ")";
- }
-
- StringBuilder descriptionBuffer = new StringBuilder();
- descriptionBuffer.append(description);
-
- for (int i = 0; i < descriptionBuffer.length(); i++)
- {
- char cc = descriptionBuffer.charAt(i);
-
- if ((cc >= 32) && (cc <= 126))
- continue;
- descriptionBuffer.setCharAt(i, '\uFFFD');
- }
-
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The server refused to open the channel (" + reasonCodeSymbolicName + ", '"
- + descriptionBuffer.toString() + "')");
- c.notifyAll();
- }
-
- log.debug("Got SSH_MSG_CHANNEL_OPEN_FAILURE (channel " + id + ")");
- }
-
- public void msgGlobalRequest(byte[] msg, int msglen) throws IOException
- {
- /* Currently we do not support any kind of global request */
-
- TypesReader tr = new TypesReader(msg, 0, msglen);
-
- tr.readByte(); // skip packet type
- String requestName = tr.readString();
- boolean wantReply = tr.readBoolean();
-
- if (wantReply)
- {
- byte[] reply_failure = new byte[1];
- reply_failure[0] = Packets.SSH_MSG_REQUEST_FAILURE;
-
- tm.sendAsynchronousMessage(reply_failure);
- }
-
- /* We do not clean up the requestName String - that is OK for debug */
-
- log.debug("Got SSH_MSG_GLOBAL_REQUEST (" + requestName + ")");
- }
-
- public void msgGlobalSuccess() throws IOException
- {
- synchronized (channels)
- {
- globalSuccessCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_SUCCESS");
- }
-
- public void msgGlobalFailure() throws IOException
- {
- synchronized (channels)
- {
- globalFailedCounter++;
- channels.notifyAll();
- }
-
- log.debug("Got SSH_MSG_REQUEST_FAILURE");
- }
-
- public void handleMessage(byte[] msg, int msglen) throws IOException
- {
- if (msg == null)
- {
-
- log.debug("HandleMessage: got shutdown");
-
- synchronized (listenerThreads)
- {
- for (IChannelWorkerThread lat : listenerThreads)
- {
- lat.stopWorking();
- }
- listenerThreadsAllowed = false;
- }
-
- synchronized (channels)
- {
- shutdown = true;
-
- for (Channel c : channels)
- {
- synchronized (c)
- {
- c.EOF = true;
- c.state = Channel.STATE_CLOSED;
- c.setReasonClosed("The connection is being shutdown");
- c.closeMessageRecv = true; /*
- * You never know, perhaps
- * we are waiting for a
- * pending close message
- * from the server...
- */
- c.notifyAll();
- }
- }
-
- channels.clear();
- channels.notifyAll(); /* Notify global response waiters */
- return;
- }
- }
-
- switch (msg[0])
- {
- case Packets.SSH_MSG_CHANNEL_OPEN_CONFIRMATION:
- msgChannelOpenConfirmation(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_WINDOW_ADJUST:
- msgChannelWindowAdjust(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_DATA:
- msgChannelData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EXTENDED_DATA:
- msgChannelExtendedData(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_REQUEST:
- msgChannelRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_EOF:
- msgChannelEOF(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN:
- msgChannelOpen(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_CLOSE:
- msgChannelClose(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_SUCCESS:
- msgChannelSuccess(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_FAILURE:
- msgChannelFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_CHANNEL_OPEN_FAILURE:
- msgChannelOpenFailure(msg, msglen);
- break;
- case Packets.SSH_MSG_GLOBAL_REQUEST:
- msgGlobalRequest(msg, msglen);
- break;
- case Packets.SSH_MSG_REQUEST_SUCCESS:
- msgGlobalSuccess();
- break;
- case Packets.SSH_MSG_REQUEST_FAILURE:
- msgGlobalFailure();
- break;
- default:
- throw new IOException("Cannot handle unknown channel message " + (msg[0] & 0xff));
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2006-2013 Christian Plattner. All rights reserved.
- * Please refer to the LICENSE.txt for licensing details.
- */
-
-package ch.ethz.ssh2.transport;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.SecureRandom;
-import java.util.List;
-import java.util.Vector;
-
-import ch.ethz.ssh2.ConnectionInfo;
-import ch.ethz.ssh2.ConnectionMonitor;
-import ch.ethz.ssh2.DHGexParameters;
-import ch.ethz.ssh2.HTTPProxyData;
-import ch.ethz.ssh2.HTTPProxyException;
-import ch.ethz.ssh2.ProxyData;
-import ch.ethz.ssh2.ServerHostKeyVerifier;
-import ch.ethz.ssh2.crypto.Base64;
-import ch.ethz.ssh2.crypto.CryptoWishList;
-import ch.ethz.ssh2.crypto.cipher.BlockCipher;
-import ch.ethz.ssh2.crypto.digest.MAC;
-import ch.ethz.ssh2.log.Logger;
-import ch.ethz.ssh2.packets.PacketDisconnect;
-import ch.ethz.ssh2.packets.Packets;
-import ch.ethz.ssh2.packets.TypesReader;
-import ch.ethz.ssh2.server.ServerConnectionState;
-import ch.ethz.ssh2.signature.DSAPrivateKey;
-import ch.ethz.ssh2.signature.RSAPrivateKey;
-import ch.ethz.ssh2.util.StringEncoder;
-import ch.ethz.ssh2.util.Tokenizer;
-
-/*
- * Yes, the "standard" is a big mess. On one side, the say that arbitary channel
- * packets are allowed during kex exchange, on the other side we need to blindly
- * ignore the next _packet_ if the KEX guess was wrong. Where do we know from that
- * the next packet is not a channel data packet? Yes, we could check if it is in
- * the KEX range. But the standard says nothing about this. The OpenSSH guys
- * block local "normal" traffic during KEX. That's fine - however, they assume
- * that the other side is doing the same. During re-key, if they receive traffic
- * other than KEX, they become horribly irritated and kill the connection. Since
- * we are very likely going to communicate with OpenSSH servers, we have to play
- * the same game - even though we could do better.
- *
- * btw: having stdout and stderr on the same channel, with a shared window, is
- * also a VERY good idea... =(
- */
-
-/**
- * TransportManager.
- *
- * @author Christian Plattner
- * @version $Id: TransportManager.java 47 2013-07-31 23:59:52Z cleondris@gmail.com $
- */
-public class TransportManager
-{
- private static final Logger log = Logger.getLogger(TransportManager.class);
-
- private static class HandlerEntry
- {
- MessageHandler mh;
- int low;
- int high;
- }
-
- private final List<AsynchronousEntry> asynchronousQueue = new Vector<AsynchronousEntry>();
- private Thread asynchronousThread = null;
- private boolean asynchronousPending = false;
-
- class AsynchronousEntry
- {
- public byte[] msg;
- public Runnable run;
-
- public AsynchronousEntry(byte[] msg, Runnable run)
- {
- this.msg = msg;
- this.run = run;
- }
- }
-
- class AsynchronousWorker extends Thread
- {
- @Override
- public void run()
- {
- while (true)
- {
- AsynchronousEntry item = null;
-
- synchronized (asynchronousQueue)
- {
- if (asynchronousQueue.size() == 0)
- {
- /* Only now we may reset the flag, since we are sure that all queued items
- * have been sent (there is a slight delay between de-queuing and sending,
- * this is why we need this flag! See code below. Sending takes place outside
- * of this lock, this is why a test for size()==0 (from another thread) does not ensure
- * that all messages have been sent.
- */
-
- asynchronousPending = false;
-
- /* Notify any senders that they can proceed, all async messages have been delivered */
-
- asynchronousQueue.notifyAll();
-
- /* After the queue is empty for about 2 seconds, stop this thread */
-
- try
- {
- asynchronousQueue.wait(2000);
- }
- catch (InterruptedException ignore)
- {
- }
-
- if (asynchronousQueue.size() == 0)
- {
- asynchronousThread = null;
- return;
- }
- }
-
- item = asynchronousQueue.remove(0);
- }
-
- /* The following invocation may throw an IOException.
- * There is no point in handling it - it simply means
- * that the connection has a problem and we should stop
- * sending asynchronously messages. We do not need to signal that
- * we have exited (asynchronousThread = null): further
- * messages in the queue cannot be sent by this or any
- * other thread.
- * Other threads will sooner or later (when receiving or
- * sending the next message) get the same IOException and
- * get to the same conclusion.
- */
-
- try
- {
- sendMessageImmediate(item.msg);
- }
- catch (IOException e)
- {
- return;
- }
-
- if (item.run != null)
- {
- try
- {
- item.run.run();
- }
- catch (Exception ignore)
- {
- }
-
- }
- }
- }
- }
-
- private Socket sock = new Socket();
-
- private final Object connectionSemaphore = new Object();
-
- private boolean flagKexOngoing = false;
- private boolean connectionClosed = false;
-
- private Throwable reasonClosedCause = null;
-
- private TransportConnection tc;
- private KexManager km;
-
- private final List<HandlerEntry> messageHandlers = new Vector<HandlerEntry>();
-
- private Thread receiveThread;
-
- private List<ConnectionMonitor> connectionMonitors = new Vector<ConnectionMonitor>();
- private boolean monitorsWereInformed = false;
-
- /**
- * There were reports that there are JDKs which use
- * the resolver even though one supplies a dotted IP
- * address in the Socket constructor. That is why we
- * try to generate the InetAdress "by hand".
- *
- * @param host
- * @return the InetAddress
- * @throws UnknownHostException
- */
- private static InetAddress createInetAddress(String host) throws UnknownHostException
- {
- /* Check if it is a dotted IP4 address */
-
- InetAddress addr = parseIPv4Address(host);
-
- if (addr != null)
- {
- return addr;
- }
-
- return InetAddress.getByName(host);
- }
-
- private static InetAddress parseIPv4Address(String host) throws UnknownHostException
- {
- if (host == null)
- {
- return null;
- }
-
- String[] quad = Tokenizer.parseTokens(host, '.');
-
- if ((quad == null) || (quad.length != 4))
- {
- return null;
- }
-
- byte[] addr = new byte[4];
-
- for (int i = 0; i < 4; i++)
- {
- int part = 0;
-
- if ((quad[i].length() == 0) || (quad[i].length() > 3))
- {
- return null;
- }
-
- for (int k = 0; k < quad[i].length(); k++)
- {
- char c = quad[i].charAt(k);
-
- /* No, Character.isDigit is not the same */
- if ((c < '0') || (c > '9'))
- {
- return null;
- }
-
- part = part * 10 + (c - '0');
- }
-
- if (part > 255) /* 300.1.2.3 is invalid =) */
- {
- return null;
- }
-
- addr[i] = (byte) part;
- }
-
- return InetAddress.getByAddress(host, addr);
- }
-
- public int getPacketOverheadEstimate()
- {
- return tc.getPacketOverheadEstimate();
- }
-
- public void setTcpNoDelay(boolean state) throws IOException
- {
- sock.setTcpNoDelay(state);
- }
-
- public void setSoTimeout(int timeout) throws IOException
- {
- sock.setSoTimeout(timeout);
- }
-
- public ConnectionInfo getConnectionInfo(int kexNumber) throws IOException
- {
- return km.getOrWaitForConnectionInfo(kexNumber);
- }
-
- public Throwable getReasonClosedCause()
- {
- synchronized (connectionSemaphore)
- {
- return reasonClosedCause;
- }
- }
-
- public byte[] getSessionIdentifier()
- {
- return km.sessionId;
- }
-
- public void close(Throwable cause, boolean useDisconnectPacket)
- {
- if (useDisconnectPacket == false)
- {
- /* OK, hard shutdown - do not aquire the semaphore,
- * perhaps somebody is inside (and waits until the remote
- * side is ready to accept new data). */
-
- try
- {
- sock.close();
- }
- catch (IOException ignore)
- {
- }
-
- /* OK, whoever tried to send data, should now agree that
- * there is no point in further waiting =)
- * It is safe now to aquire the semaphore.
- */
- }
-
- synchronized (connectionSemaphore)
- {
- if (connectionClosed == false)
- {
- if (useDisconnectPacket == true)
- {
- try
- {
- byte[] msg = new PacketDisconnect(Packets.SSH_DISCONNECT_BY_APPLICATION, cause.getMessage(), "")
- .getPayload();
- if (tc != null)
- {
- tc.sendMessage(msg);
- }
- }
- catch (IOException ignore)
- {
- }
-
- try
- {
- sock.close();
- }
- catch (IOException ignore)
- {
- }
- }
-
- connectionClosed = true;
- reasonClosedCause = cause; /* may be null */
- }
- connectionSemaphore.notifyAll();
- }
-
- /* No check if we need to inform the monitors */
-
- List<ConnectionMonitor> monitors = new Vector<ConnectionMonitor>();
-
- synchronized (this)
- {
- /* Short term lock to protect "connectionMonitors"
- * and "monitorsWereInformed"
- * (they may be modified concurrently)
- */
-
- if (monitorsWereInformed == false)
- {
- monitorsWereInformed = true;
- monitors.addAll(connectionMonitors);
- }
- }
-
- for (ConnectionMonitor cmon : monitors)
- {
- try
- {
- cmon.connectionLost(reasonClosedCause);
- }
- catch (Exception ignore)
- {
- }
- }
- }
-
- private static Socket establishConnection(String hostname, int port, ProxyData proxyData, int connectTimeout)
- throws IOException
- {
- /* See the comment for createInetAddress() */
-
- if (proxyData == null)
- {
- InetAddress addr = createInetAddress(hostname);
- Socket s = new Socket();
- s.connect(new InetSocketAddress(addr, port), connectTimeout);
- return s;
- }
-
- if (proxyData instanceof HTTPProxyData)
- {
- HTTPProxyData pd = (HTTPProxyData) proxyData;
-
- /* At the moment, we only support HTTP proxies */
-
- InetAddress addr = createInetAddress(pd.proxyHost);
- Socket s = new Socket();
- s.connect(new InetSocketAddress(addr, pd.proxyPort), connectTimeout);
-
- /* OK, now tell the proxy where we actually want to connect to */
-
- StringBuilder sb = new StringBuilder();
-
- sb.append("CONNECT ");
- sb.append(hostname);
- sb.append(':');
- sb.append(port);
- sb.append(" HTTP/1.0\r\n");
-
- if ((pd.proxyUser != null) && (pd.proxyPass != null))
- {
- String credentials = pd.proxyUser + ":" + pd.proxyPass;
- char[] encoded = Base64.encode(StringEncoder.GetBytes(credentials));
- sb.append("Proxy-Authorization: Basic ");
- sb.append(encoded);
- sb.append("\r\n");
- }
-
- if (pd.requestHeaderLines != null)
- {
- for (int i = 0; i < pd.requestHeaderLines.length; i++)
- {
- if (pd.requestHeaderLines[i] != null)
- {
- sb.append(pd.requestHeaderLines[i]);
- sb.append("\r\n");
- }
- }
- }
-
- sb.append("\r\n");
-
- OutputStream out = s.getOutputStream();
-
- out.write(StringEncoder.GetBytes(sb.toString()));
- out.flush();
-
- /* Now parse the HTTP response */
-
- byte[] buffer = new byte[1024];
- InputStream in = s.getInputStream();
-
- int len = ClientServerHello.readLineRN(in, buffer);
-
- String httpReponse = StringEncoder.GetString(buffer, 0, len);
-
- if (httpReponse.startsWith("HTTP/") == false)
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- /* "HTTP/1.X XYZ X" => 14 characters minimum */
-
- if ((httpReponse.length() < 14) || (httpReponse.charAt(8) != ' ') || (httpReponse.charAt(12) != ' '))
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- int errorCode = 0;
-
- try
- {
- errorCode = Integer.parseInt(httpReponse.substring(9, 12));
- }
- catch (NumberFormatException ignore)
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- if ((errorCode < 0) || (errorCode > 999))
- {
- throw new IOException("The proxy did not send back a valid HTTP response.");
- }
-
- if (errorCode != 200)
- {
- throw new HTTPProxyException(httpReponse.substring(13), errorCode);
- }
-
- /* OK, read until empty line */
-
- while (true)
- {
- len = ClientServerHello.readLineRN(in, buffer);
- if (len == 0)
- {
- break;
- }
- }
- return s;
- }
-
- throw new IOException("Unsupported ProxyData");
- }
-
- private void startReceiver() throws IOException
- {
- receiveThread = new Thread(new Runnable()
- {
- public void run()
- {
- try
- {
- receiveLoop();
- }
- catch (Exception e)
- {
- close(e, false);
-
- log.warning("Receive thread: error in receiveLoop: " + e.getMessage());
- }
-
- if (log.isDebugEnabled())
- {
- log.debug("Receive thread: back from receiveLoop");
- }
-
- /* Tell all handlers that it is time to say goodbye */
-
- if (km != null)
- {
- try
- {
- km.handleMessage(null, 0);
- }
- catch (IOException ignored)
- {
- }
- }
-
- for (HandlerEntry he : messageHandlers)
- {
- try
- {
- he.mh.handleMessage(null, 0);
- }
- catch (Exception ignore)
- {
- }
- }
- }
- });
-
- receiveThread.setDaemon(true);
- receiveThread.start();
- }
-
- public void clientInit(Socket socket, String softwareversion, CryptoWishList cwl,
- ServerHostKeyVerifier verifier, DHGexParameters dhgex, SecureRandom rnd) throws IOException
- {
- /* First, establish the TCP connection to the SSH-2 server */
-
- sock = socket;
-
- /* Parse the server line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- ClientServerHello csh = ClientServerHello.clientHello(softwareversion, sock.getInputStream(),
- sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), rnd);
- String hostname = sock.getInetAddress().getHostName();
- int port = sock.getPort();
-
- km = new ClientKexManager(this, csh, cwl, hostname, port, verifier, rnd);
- km.initiateKEX(cwl, dhgex, null, null);
-
- startReceiver();
- }
-
- public void clientInit(String hostname, int port, String softwareversion, CryptoWishList cwl,
- ServerHostKeyVerifier verifier, DHGexParameters dhgex, int connectTimeout, SecureRandom rnd,
- ProxyData proxyData) throws IOException
- {
- /* First, establish the TCP connection to the SSH-2 server */
-
- sock = establishConnection(hostname, port, proxyData, connectTimeout);
-
- /* Parse the server line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- ClientServerHello csh = ClientServerHello.clientHello(softwareversion, sock.getInputStream(),
- sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), rnd);
-
- km = new ClientKexManager(this, csh, cwl, hostname, port, verifier, rnd);
- km.initiateKEX(cwl, dhgex, null, null);
-
- startReceiver();
- }
-
- public void serverInit(ServerConnectionState state) throws IOException
- {
- /* TCP connection is already established */
-
- this.sock = state.s;
-
- /* Parse the client line and say hello - important: this information is later needed for the
- * key exchange (to stop man-in-the-middle attacks) - that is why we wrap it into an object
- * for later use.
- */
-
- state.csh = ClientServerHello.serverHello(state.softwareversion, sock.getInputStream(), sock.getOutputStream());
-
- tc = new TransportConnection(sock.getInputStream(), sock.getOutputStream(), state.generator);
-
- km = new ServerKexManager(state);
- km.initiateKEX(state.next_cryptoWishList, null, state.next_dsa_key, state.next_rsa_key);
-
- startReceiver();
- }
-
- public void registerMessageHandler(MessageHandler mh, int low, int high)
- {
- HandlerEntry he = new HandlerEntry();
- he.mh = mh;
- he.low = low;
- he.high = high;
-
- synchronized (messageHandlers)
- {
- messageHandlers.add(he);
- }
- }
-
- public void removeMessageHandler(MessageHandler mh, int low, int high)
- {
- synchronized (messageHandlers)
- {
- for (int i = 0; i < messageHandlers.size(); i++)
- {
- HandlerEntry he = messageHandlers.get(i);
- if ((he.mh == mh) && (he.low == low) && (he.high == high))
- {
- messageHandlers.remove(i);
- break;
- }
- }
- }
- }
-
- public void sendKexMessage(byte[] msg) throws IOException
- {
- synchronized (connectionSemaphore)
- {
- if (connectionClosed)
- {
- throw (IOException) new IOException("Sorry, this connection is closed.").initCause(reasonClosedCause);
- }
-
- flagKexOngoing = true;
-
- try
- {
- tc.sendMessage(msg);
- }
- catch (IOException e)
- {
- close(e, false);
- throw e;
- }
- }
- }
-
- public void kexFinished() throws IOException
- {
- synchronized (connectionSemaphore)
- {
- flagKexOngoing = false;
- connectionSemaphore.notifyAll();
- }
- }
-
- /**
- *
- * @param cwl
- * @param dhgex
- * @param dsa may be null if this is a client connection
- * @param rsa may be null if this is a client connection
- * @throws IOException
- */
- public void forceKeyExchange(CryptoWishList cwl, DHGexParameters dhgex, DSAPrivateKey dsa, RSAPrivateKey rsa)
- throws IOException
- {
- synchronized (connectionSemaphore)
- {
- if (connectionClosed)
- /* Inform the caller that there is no point in triggering a new kex */
- throw (IOException) new IOException("Sorry, this connection is closed.").initCause(reasonClosedCause);
- }
-
- km.initiateKEX(cwl, dhgex, dsa, rsa);
- }
-
- public void changeRecvCipher(BlockCipher bc, MAC mac)
- {
- tc.changeRecvCipher(bc, mac);
- }
-
- public void changeSendCipher(BlockCipher bc, MAC mac)
- {
- tc.changeSendCipher(bc, mac);
- }
-
- public void sendAsynchronousMessage(byte[] msg) throws IOException
- {
- sendAsynchronousMessage(msg, null);
- }
-
- public void sendAsynchronousMessage(byte[] msg, Runnable run) throws IOException
- {
- synchronized (asynchronousQueue)
- {
- asynchronousQueue.add(new AsynchronousEntry(msg, run));
- asynchronousPending = true;
-
- /* This limit should be flexible enough. We need this, otherwise the peer
- * can flood us with global requests (and other stuff where we have to reply
- * with an asynchronous message) and (if the server just sends data and does not
- * read what we send) this will probably put us in a low memory situation
- * (our send queue would grow and grow and...) */
-
- if (asynchronousQueue.size() > 100)
- {
- throw new IOException("Error: the peer is not consuming our asynchronous replies.");
- }
-
- /* Check if we have an asynchronous sending thread */
-
- if (asynchronousThread == null)
- {
- asynchronousThread = new AsynchronousWorker();
- asynchronousThread.setDaemon(true);
- asynchronousThread.start();
-
- /* The thread will stop after 2 seconds of inactivity (i.e., empty queue) */
- }
-
- asynchronousQueue.notifyAll();
- }
- }
-
- public void setConnectionMonitors(List<ConnectionMonitor> monitors)
- {
- synchronized (this)
- {
- connectionMonitors = new Vector<ConnectionMonitor>();
- connectionMonitors.addAll(monitors);
- }
- }
-
- /**
- * True if no response message expected.
- */
- private boolean idle;
-
- /**
- * Send a message but ensure that all queued messages are being sent first.
- *
- * @param msg
- * @throws IOException
- */
- public void sendMessage(byte[] msg) throws IOException
- {
- synchronized (asynchronousQueue)
- {
- while (asynchronousPending)
- {
- try
- {
- asynchronousQueue.wait(1000);
- }
- catch (InterruptedException e)
- {
- }
- }
- }
-
- sendMessageImmediate(msg);
- }
-
- /**
- * Send message, ignore queued async messages that have not been delivered yet.
- * Will be called directly from the asynchronousThread thread.
- *
- * @param msg
- * @throws IOException
- */
- public void sendMessageImmediate(byte[] msg) throws IOException
- {
- if (Thread.currentThread() == receiveThread)
- {
- throw new IOException("Assertion error: sendMessage may never be invoked by the receiver thread!");
- }
-
- boolean wasInterrupted = false;
-
- try
- {
- synchronized (connectionSemaphore)
- {
- while (true)
- {
- if (connectionClosed)
- {
- throw (IOException) new IOException("Sorry, this connection is closed.")
- .initCause(reasonClosedCause);
- }
-
- if (flagKexOngoing == false)
- {
- break;
- }
-
- try
- {
- connectionSemaphore.wait();
- }
- catch (InterruptedException e)
- {
- wasInterrupted = true;
- }
- }
-
- try
- {
- tc.sendMessage(msg);
- idle = false;
- }
- catch (IOException e)
- {
- close(e, false);
- throw e;
- }
- }
- }
- finally
- {
- if (wasInterrupted)
- Thread.currentThread().interrupt();
- }
- }
-
- public void receiveLoop() throws IOException
- {
- byte[] msg = new byte[35000];
-
- while (true)
- {
- int msglen;
- try
- {
- msglen = tc.receiveMessage(msg, 0, msg.length);
- }
- catch (SocketTimeoutException e)
- {
- // Timeout in read
- if (idle)
- {
- log.debug("Ignoring socket timeout");
- continue;
- }
- throw e;
- }
- idle = true;
-
- int type = msg[0] & 0xff;
-
- if (type == Packets.SSH_MSG_IGNORE)
- {
- continue;
- }
-
- if (type == Packets.SSH_MSG_DEBUG)
- {
- if (log.isDebugEnabled())
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte();
- tr.readBoolean();
- StringBuilder debugMessageBuffer = new StringBuilder();
- debugMessageBuffer.append(tr.readString("UTF-8"));
-
- for (int i = 0; i < debugMessageBuffer.length(); i++)
- {
- char c = debugMessageBuffer.charAt(i);
-
- if ((c >= 32) && (c <= 126))
- {
- continue;
- }
- debugMessageBuffer.setCharAt(i, '\uFFFD');
- }
-
- log.debug("DEBUG Message from remote: '" + debugMessageBuffer.toString() + "'");
- }
- continue;
- }
-
- if (type == Packets.SSH_MSG_UNIMPLEMENTED)
- {
- throw new IOException("Peer sent UNIMPLEMENTED message, that should not happen.");
- }
-
- if (type == Packets.SSH_MSG_DISCONNECT)
- {
- TypesReader tr = new TypesReader(msg, 0, msglen);
- tr.readByte();
- int reason_code = tr.readUINT32();
- StringBuilder reasonBuffer = new StringBuilder();
- reasonBuffer.append(tr.readString("UTF-8"));
-
- /*
- * Do not get fooled by servers that send abnormal long error
- * messages
- */
-
- if (reasonBuffer.length() > 255)
- {
- reasonBuffer.setLength(255);
- reasonBuffer.setCharAt(254, '.');
- reasonBuffer.setCharAt(253, '.');
- reasonBuffer.setCharAt(252, '.');
- }
-
- /*
- * Also, check that the server did not send characters that may
- * screw up the receiver -> restrict to reasonable US-ASCII
- * subset -> "printable characters" (ASCII 32 - 126). Replace
- * all others with 0xFFFD (UNICODE replacement character).
- */
-
- for (int i = 0; i < reasonBuffer.length(); i++)
- {
- char c = reasonBuffer.charAt(i);
-
- if ((c >= 32) && (c <= 126))
- {
- continue;
- }
- reasonBuffer.setCharAt(i, '\uFFFD');
- }
-
- throw new IOException("Peer sent DISCONNECT message (reason code " + reason_code + "): "
- + reasonBuffer.toString());
- }
-
- /*
- * Is it a KEX Packet?
- */
-
- if ((type == Packets.SSH_MSG_KEXINIT) || (type == Packets.SSH_MSG_NEWKEYS)
- || ((type >= 30) && (type <= 49)))
- {
- km.handleMessage(msg, msglen);
- continue;
- }
-
- MessageHandler mh = null;
-
- for (int i = 0; i < messageHandlers.size(); i++)
- {
- HandlerEntry he = messageHandlers.get(i);
- if ((he.low <= type) && (type <= he.high))
- {
- mh = he.mh;
- break;
- }
- }
-
- if (mh == null)
- {
- throw new IOException("Unexpected SSH message (type " + type + ")");
- }
-
- mh.handleMessage(msg, msglen);
- }
- }
-}
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.thirdparty</artifactId>
- <version>1.1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../commons/thirdparty</relativePath>
</parent>
<scm>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>com.sun.jersey.jersey-servlet</artifactId>
- <version>1.18-SNAPSHOT</version>
+ <version>1.19.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<plugins>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.thirdparty</artifactId>
- <version>1.1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../commons/thirdparty</relativePath>
</parent>
<scm>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>net.sf.jung2</artifactId>
- <version>2.0.2-SNAPSHOT</version>
+ <version>2.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<plugins>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>org.openflow.openflowj</artifactId>
- <version>1.0.3-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<name>OpenFlow Java</name>
<description>A Java implemention of the OpenFlow v1.0 protocol</description>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.thirdparty</artifactId>
- <version>1.1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../commons/thirdparty</relativePath>
</parent>
<parent>
<groupId>org.opendaylight.controller</groupId>
<artifactId>commons.thirdparty</artifactId>
- <version>1.1.1-SNAPSHOT</version>
+ <version>1.2.0-SNAPSHOT</version>
<relativePath>../commons/thirdparty</relativePath>
</parent>
<scm>
<modelVersion>4.0.0</modelVersion>
<groupId>org.opendaylight.controller.thirdparty</groupId>
<artifactId>org.apache.catalina.filters.CorsFilter</artifactId>
- <version>7.0.43-SNAPSHOT</version>
+ <version>7.1.0-SNAPSHOT</version>
<packaging>bundle</packaging>
<build>
<plugins>